source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
SE_fg_grid_thrd_mex.c | #include "mex.h"
#include "SE_fgg.h"
#include "fgg_thrd.h"
void SE_FGG_MEX_params(SE_FGG_params*, const mxArray*, int);
#define X prhs[0]
#define Q prhs[1]
#define OPT prhs[2]
#define H_OUT plhs[0] // Output
#ifndef VERBOSE
#define VERBOSE 0
#endif
static inline
int get_idx0(const double x[3],
const SE_FGG_params* params)
{
// unpack params
const int p = params->P;
const int p_half = params->P_half;
const double h = params->h;
int idx;
int idx_from[3];
// compute index range and centering
if(is_odd(p))
{
for(int j=0; j<3; j++)
{
idx = (int) round(x[j]/h);
idx_from[j] = idx - p_half;
}
}
else
{
for(int j=0; j<3; j++)
{
idx = (int) floor(x[j]/h);
idx_from[j] = idx - (p_half-1);
}
}
return __IDX3_RMAJ(idx_from[0]+p_half,
idx_from[1]+p_half,
idx_from[2]+p_half,
params->npdims[1], params->npdims[2]);
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[] )
{
const int N = mxGetM(X);
double* restrict x = mxGetPr(X);
double* restrict q = mxGetPr(Q);
// pack parameters
SE_FGG_params params;
SE_FGG_MEX_params(¶ms, OPT, N);
// scratch arrays
SE_FGG_work work;
SE_FGG_allocate_workspace(&work, ¶ms,true,true);
// allocate output array
H_OUT = mxCreateNumericArray(3, params.dims, mxDOUBLE_CLASS, mxREAL);
double* H_per = mxGetPr(H_OUT);
SE_fp_set_zero(H_per, SE_prod3(params.dims));
// coordinates and charges
const SE_state st = {.x = x, .q = q};
if(VERBOSE)
mexPrintf("[SE%s FG(G) THRD] N=%d, P=%d\n",PER_STR,N,params.P);
// now do the work
SE_FGG_base_gaussian(&work, ¶ms);
for(int n=0; n < N; n++)
{
double xn[3];
xn[0] = x[n];
xn[1] = x[n+N];
xn[2] = x[n+2*N];
work.idx[n] = get_idx0(xn, ¶ms);
}
#if FGG_THRD
#pragma omp parallel
#else
#warning "Threading must be activated with -DFGG_THRD"
#endif
{
SE_FGG_grid(&work, &st, ¶ms);
}
#ifdef THREE_PERIODIC
SE_FGG_wrap_fcn(H_per, &work, ¶ms);
#endif
#ifdef TWO_PERIODIC
SE2P_FGG_wrap_fcn(H_per, &work, ¶ms);
#endif
#ifdef ONE_PERIODIC
SE1P_FGG_wrap_fcn(H_per, &work, ¶ms);
#endif
#ifdef ZERO_PERIODIC
SE0P_FGG_cpFromwork(H_per, &work, ¶ms);
#endif
// done
SE_FGG_free_workspace(&work);
}
|
rose_matrixmultiply.c | /*
Naive matrix-matrix multiplication(mmm)
By C. Liao
*/
#define N 1000
#define M 1000
#define K 1000
int i;
int j;
int k;
double a[1000][1000];
double b[1000][1000];
double c[1000][1000];
int mmm()
{
//#pragma omp parallel for private(i,j,k) shared(a,b,c)
#pragma omp parallel for private (i,j,k)
for (i = 0; i <= 999; i += 1) {
#pragma omp parallel for private (j,k)
for (j = 0; j <= 999; j += 1) {
for (k = 0; k <= 999; k += 1) {
c[i][j] = c[i][j] + a[i][k] * b[k][j];
}
}
}
return 0;
}
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 32;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,3);t1++) {
lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6));
ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(0,ceild(3*t1-3*t2-2,4)),ceild(3*t1-6,8)),ceild(24*t2-Nz-19,32));t3<=min(min(min(floord(4*Nt+Ny-9,32),floord(12*t1+Ny+15,32)),floord(24*t2+Ny+11,32)),floord(24*t1-24*t2+Nz+Ny+13,32));t3++) {
for (t4=max(max(max(max(0,ceild(3*t1-3*t2-6,8)),ceild(3*t1-14,16)),ceild(24*t2-Nz-51,64)),ceild(32*t3-Ny-51,64));t4<=min(min(min(min(floord(4*Nt+Nx-9,64),floord(12*t1+Nx+15,64)),floord(24*t2+Nx+11,64)),floord(32*t3+Nx+19,64)),floord(24*t1-24*t2+Nz+Nx+13,64));t4++) {
for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(32*t3-Ny+5,4)),ceild(64*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),8*t3+6),16*t4+14);t5++) {
for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(32*t3,4*t5+4);t7<=min(32*t3+31,4*t5+Ny-5);t7++) {
lbv=max(64*t4,4*t5+4);
ubv=min(64*t4+63,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
strmm.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/ztrmm.c, normal z -> s, Fri Sep 28 17:38:03 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_trmm
*
* Performs a triangular matrix-matrix multiply of the form
*
* \f[B = \alpha [op(A) \times B] \f], if side = PlasmaLeft or
* \f[B = \alpha [B \times op(A)] \f], if side = PlasmaRight
*
* where op( X ) is one of:
*
* - op(A) = A or
* - op(A) = A^T or
* - op(A) = A^T
*
* alpha is a scalar, B is an m-by-n matrix and A is a unit or non-unit, upper
* or lower triangular matrix.
*
*******************************************************************************
*
* @param[in] side
* Specifies whether op( A ) appears on the left or on the right of B:
* - PlasmaLeft: alpha*op( A )*B
* - PlasmaRight: alpha*B*op( A )
*
* @param[in] uplo
* Specifies whether the matrix A is upper triangular or lower
* triangular:
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] transa
* Specifies whether the matrix A is transposed, not transposed or
* conjugate transposed:
* - PlasmaNoTrans: A is transposed;
* - PlasmaTrans: A is not transposed;
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] diag
* Specifies whether or not A is unit triangular:
* - PlasmaNonUnit: A is non-unit triangular;
* - PlasmaUnit: A is unit triangular.
*
* @param[in] m
* The number of rows of matrix B.
* m >= 0.
*
* @param[in] n
* The number of columns of matrix B.
* n >= 0.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] pA
* The triangular matrix A of dimension lda-by-k, where k is m when
* side='L' or 'l' and k is n when when side='R' or 'r'. If uplo =
* PlasmaUpper, the leading k-by-k upper triangular part of the array
* A contains the upper triangular matrix, and the strictly lower
* triangular part of A is not referenced. If uplo = PlasmaLower, the
* leading k-by-k lower triangular part of the array A contains the
* lower triangular matrix, and the strictly upper triangular part of
* A is not referenced. If diag = PlasmaUnit, the diagonal elements of
* A are also not referenced and are assumed to be 1.
*
* @param[in] lda
* The leading dimension of the array A. When side='L' or 'l',
* lda >= max(1,m), when side='R' or 'r' then lda >= max(1,n).
*
* @param[in,out] pB
* On entry, the matrix B of dimension ldb-by-n.
* On exit, the result of a triangular matrix-matrix multiply
* ( alpha*op(A)*B ) or ( alpha*B*op(A) ).
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_strmm
* @sa plasma_ctrmm
* @sa plasma_dtrmm
* @sa plasma_strmm
*
******************************************************************************/
int plasma_strmm(plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
int m, int n,
float alpha, float *pA, int lda,
float *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (side != PlasmaLeft && side != PlasmaRight) {
plasma_error("illegal value of side");
return -1;
}
if (uplo != PlasmaUpper && uplo != PlasmaLower) {
plasma_error("illegal value of uplo");
return -2;
}
if (transa != PlasmaConjTrans &&
transa != PlasmaNoTrans &&
transa != PlasmaTrans )
{
plasma_error("illegal value of transa");
return -3;
}
if (diag != PlasmaUnit && diag != PlasmaNonUnit) {
plasma_error("illegal value of diag");
return -4;
}
if (m < 0) {
plasma_error("illegal value of m");
return -5;
}
if (n < 0) {
plasma_error("illegal value of n");
return -6;
}
int k = (side == PlasmaLeft) ? m : n;
if (lda < imax(1, k)) {
plasma_error("illegal value of lda");
return -8;
}
if (ldb < imax(1, m)) {
plasma_error("illegal value of ldb");
return -10;
}
// quick return
if (imin(m, n) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_trmm(plasma, PlasmaRealFloat, m, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t B;
int retval;
retval = plasma_desc_triangular_create(PlasmaRealFloat, uplo, nb, nb,
k, k, 0, 0, k, k, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_triangular_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb,
m, n, 0, 0, m, n, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate matrices to tile layout.
plasma_omp_str2desc(pA, lda, A, &sequence, &request);
plasma_omp_sge2desc(pB, ldb, B, &sequence, &request);
// Call tile async interface.
plasma_omp_strmm(side, uplo, transa, diag,
alpha, A,
B,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_sdesc2ge(B, pB, ldb, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
// Return status.
return sequence.status;
}
/***************************************************************************//**
*
* @ingroup plasma_trmm
*
* Performs triangular matrix multiplication. Non-blocking tile version of
* plasma_strmm(). May return before the computation is finished. Operates on
* matrices stored by tiles. All matrices are passed through descriptors. All
* dimensions are taken from the descriptors. Allows for pipelining of
* operations at runtime.
*
*******************************************************************************
*
* @param[in] side
* Specifies whether op( A ) appears on the left or on the right of B:
* - PlasmaLeft: alpha*op( A )*B
* - PlasmaRight: alpha*B*op( A )
*
* @param[in] uplo
* Specifies whether the matrix A is upper triangular or lower
* triangular:
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] transa
* Specifies whether the matrix A is transposed, not transposed or
* conjugate transposed:
* - PlasmaNoTrans: A is transposed;
* - PlasmaTrans: A is not transposed;
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] diag
* Specifies whether or not A is unit triangular:
* - PlasmaNonUnit: A is non-unit triangular;
* - PlasmaUnit: A is unit triangular.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* Descriptor of the triangular matrix A.
*
* @param[in,out] B
* Descriptor of matrix B.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_strmm
* @sa plasma_omp_ctrmm
* @sa plasma_omp_dtrmm
* @sa plasma_omp_strmm
*
******************************************************************************/
void plasma_omp_strmm(plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
float alpha, plasma_desc_t A,
plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorNotInitialized);
return;
}
// Check input arguments.
if (side != PlasmaLeft && side != PlasmaRight) {
plasma_error("illegal value of side");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (uplo != PlasmaUpper && uplo != PlasmaLower) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (transa != PlasmaConjTrans &&
transa != PlasmaNoTrans &&
transa != PlasmaTrans) {
plasma_error("illegal value of transa");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (diag != PlasmaUnit && diag != PlasmaNonUnit) {
plasma_error("illegal value of diag");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.m == 0 || A.n == 0 || B.m == 0 || B.n == 0)
return;
if (alpha == 0.0) {
float zzero = 0.0;
plasma_pslaset(PlasmaGeneral, zzero, zzero, B, sequence, request);
return;
}
// Call parallel function.
plasma_pstrmm(side, uplo, transa, diag, alpha,
A, B,
sequence, request);
}
|
dds.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD DDDD SSSSS %
% D D D D SS %
% D D D D SSS %
% D D D D SS %
% DDDD DDDD SSSSS %
% %
% %
% Read/Write Microsoft Direct Draw Surface Image Format %
% %
% Software Design %
% Bianca van Schaik %
% March 2008 %
% Dirk Lemstra %
% September 2013 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/module.h"
#include "MagickCore/transform.h"
/*
Definitions
*/
#define DDSD_CAPS 0x00000001
#define DDSD_HEIGHT 0x00000002
#define DDSD_WIDTH 0x00000004
#define DDSD_PITCH 0x00000008
#define DDSD_PIXELFORMAT 0x00001000
#define DDSD_MIPMAPCOUNT 0x00020000
#define DDSD_LINEARSIZE 0x00080000
#define DDSD_DEPTH 0x00800000
#define DDPF_ALPHAPIXELS 0x00000001
#define DDPF_FOURCC 0x00000004
#define DDPF_RGB 0x00000040
#define DDPF_LUMINANCE 0x00020000
#define FOURCC_DXT1 0x31545844
#define FOURCC_DXT3 0x33545844
#define FOURCC_DXT5 0x35545844
#define FOURCC_DX10 0x30315844
#define DDSCAPS_COMPLEX 0x00000008
#define DDSCAPS_TEXTURE 0x00001000
#define DDSCAPS_MIPMAP 0x00400000
#define DDSCAPS2_CUBEMAP 0x00000200
#define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400
#define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800
#define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000
#define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000
#define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000
#define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000
#define DDSCAPS2_VOLUME 0x00200000
#define DDSEXT_DIMENSION_TEX2D 0x00000003
#define DDSEXTFLAGS_CUBEMAP 0x00000004
typedef enum DXGI_FORMAT
{
DXGI_FORMAT_UNKNOWN,
DXGI_FORMAT_R32G32B32A32_TYPELESS,
DXGI_FORMAT_R32G32B32A32_FLOAT,
DXGI_FORMAT_R32G32B32A32_UINT,
DXGI_FORMAT_R32G32B32A32_SINT,
DXGI_FORMAT_R32G32B32_TYPELESS,
DXGI_FORMAT_R32G32B32_FLOAT,
DXGI_FORMAT_R32G32B32_UINT,
DXGI_FORMAT_R32G32B32_SINT,
DXGI_FORMAT_R16G16B16A16_TYPELESS,
DXGI_FORMAT_R16G16B16A16_FLOAT,
DXGI_FORMAT_R16G16B16A16_UNORM,
DXGI_FORMAT_R16G16B16A16_UINT,
DXGI_FORMAT_R16G16B16A16_SNORM,
DXGI_FORMAT_R16G16B16A16_SINT,
DXGI_FORMAT_R32G32_TYPELESS,
DXGI_FORMAT_R32G32_FLOAT,
DXGI_FORMAT_R32G32_UINT,
DXGI_FORMAT_R32G32_SINT,
DXGI_FORMAT_R32G8X24_TYPELESS,
DXGI_FORMAT_D32_FLOAT_S8X24_UINT,
DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS,
DXGI_FORMAT_X32_TYPELESS_G8X24_UINT,
DXGI_FORMAT_R10G10B10A2_TYPELESS,
DXGI_FORMAT_R10G10B10A2_UNORM,
DXGI_FORMAT_R10G10B10A2_UINT,
DXGI_FORMAT_R11G11B10_FLOAT,
DXGI_FORMAT_R8G8B8A8_TYPELESS,
DXGI_FORMAT_R8G8B8A8_UNORM,
DXGI_FORMAT_R8G8B8A8_UNORM_SRGB,
DXGI_FORMAT_R8G8B8A8_UINT,
DXGI_FORMAT_R8G8B8A8_SNORM,
DXGI_FORMAT_R8G8B8A8_SINT,
DXGI_FORMAT_R16G16_TYPELESS,
DXGI_FORMAT_R16G16_FLOAT,
DXGI_FORMAT_R16G16_UNORM,
DXGI_FORMAT_R16G16_UINT,
DXGI_FORMAT_R16G16_SNORM,
DXGI_FORMAT_R16G16_SINT,
DXGI_FORMAT_R32_TYPELESS,
DXGI_FORMAT_D32_FLOAT,
DXGI_FORMAT_R32_FLOAT,
DXGI_FORMAT_R32_UINT,
DXGI_FORMAT_R32_SINT,
DXGI_FORMAT_R24G8_TYPELESS,
DXGI_FORMAT_D24_UNORM_S8_UINT,
DXGI_FORMAT_R24_UNORM_X8_TYPELESS,
DXGI_FORMAT_X24_TYPELESS_G8_UINT,
DXGI_FORMAT_R8G8_TYPELESS,
DXGI_FORMAT_R8G8_UNORM,
DXGI_FORMAT_R8G8_UINT,
DXGI_FORMAT_R8G8_SNORM,
DXGI_FORMAT_R8G8_SINT,
DXGI_FORMAT_R16_TYPELESS,
DXGI_FORMAT_R16_FLOAT,
DXGI_FORMAT_D16_UNORM,
DXGI_FORMAT_R16_UNORM,
DXGI_FORMAT_R16_UINT,
DXGI_FORMAT_R16_SNORM,
DXGI_FORMAT_R16_SINT,
DXGI_FORMAT_R8_TYPELESS,
DXGI_FORMAT_R8_UNORM,
DXGI_FORMAT_R8_UINT,
DXGI_FORMAT_R8_SNORM,
DXGI_FORMAT_R8_SINT,
DXGI_FORMAT_A8_UNORM,
DXGI_FORMAT_R1_UNORM,
DXGI_FORMAT_R9G9B9E5_SHAREDEXP,
DXGI_FORMAT_R8G8_B8G8_UNORM,
DXGI_FORMAT_G8R8_G8B8_UNORM,
DXGI_FORMAT_BC1_TYPELESS,
DXGI_FORMAT_BC1_UNORM,
DXGI_FORMAT_BC1_UNORM_SRGB,
DXGI_FORMAT_BC2_TYPELESS,
DXGI_FORMAT_BC2_UNORM,
DXGI_FORMAT_BC2_UNORM_SRGB,
DXGI_FORMAT_BC3_TYPELESS,
DXGI_FORMAT_BC3_UNORM,
DXGI_FORMAT_BC3_UNORM_SRGB,
DXGI_FORMAT_BC4_TYPELESS,
DXGI_FORMAT_BC4_UNORM,
DXGI_FORMAT_BC4_SNORM,
DXGI_FORMAT_BC5_TYPELESS,
DXGI_FORMAT_BC5_UNORM,
DXGI_FORMAT_BC5_SNORM,
DXGI_FORMAT_B5G6R5_UNORM,
DXGI_FORMAT_B5G5R5A1_UNORM,
DXGI_FORMAT_B8G8R8A8_UNORM,
DXGI_FORMAT_B8G8R8X8_UNORM,
DXGI_FORMAT_R10G10B10_XR_BIAS_A2_UNORM,
DXGI_FORMAT_B8G8R8A8_TYPELESS,
DXGI_FORMAT_B8G8R8A8_UNORM_SRGB,
DXGI_FORMAT_B8G8R8X8_TYPELESS,
DXGI_FORMAT_B8G8R8X8_UNORM_SRGB,
DXGI_FORMAT_BC6H_TYPELESS,
DXGI_FORMAT_BC6H_UF16,
DXGI_FORMAT_BC6H_SF16,
DXGI_FORMAT_BC7_TYPELESS,
DXGI_FORMAT_BC7_UNORM,
DXGI_FORMAT_BC7_UNORM_SRGB,
DXGI_FORMAT_AYUV,
DXGI_FORMAT_Y410,
DXGI_FORMAT_Y416,
DXGI_FORMAT_NV12,
DXGI_FORMAT_P010,
DXGI_FORMAT_P016,
DXGI_FORMAT_420_OPAQUE,
DXGI_FORMAT_YUY2,
DXGI_FORMAT_Y210,
DXGI_FORMAT_Y216,
DXGI_FORMAT_NV11,
DXGI_FORMAT_AI44,
DXGI_FORMAT_IA44,
DXGI_FORMAT_P8,
DXGI_FORMAT_A8P8,
DXGI_FORMAT_B4G4R4A4_UNORM,
DXGI_FORMAT_P208,
DXGI_FORMAT_V208,
DXGI_FORMAT_V408,
DXGI_FORMAT_SAMPLER_FEEDBACK_MIN_MIP_OPAQUE,
DXGI_FORMAT_SAMPLER_FEEDBACK_MIP_REGION_USED_OPAQUE,
DXGI_FORMAT_FORCE_UINT
} DXGI_FORMAT;
#ifndef SIZE_MAX
#define SIZE_MAX ((size_t) -1)
#endif
/*
Structure declarations.
*/
typedef struct _DDSPixelFormat
{
size_t
flags,
fourcc,
rgb_bitcount,
r_bitmask,
g_bitmask,
b_bitmask,
alpha_bitmask;
} DDSPixelFormat;
typedef struct _DDSInfo
{
size_t
flags,
height,
width,
pitchOrLinearSize,
depth,
mipmapcount,
ddscaps1,
ddscaps2,
extFormat,
extDimension,
extFlags,
extArraySize,
extFlags2;
DDSPixelFormat
pixelformat;
} DDSInfo;
typedef struct _DDSColors
{
unsigned char
r[4],
g[4],
b[4],
a[4];
} DDSColors;
typedef struct _DDSVector4
{
float
x,
y,
z,
w;
} DDSVector4;
typedef struct _DDSVector3
{
float
x,
y,
z;
} DDSVector3;
typedef struct _DDSSourceBlock
{
unsigned char
start,
end,
error;
} DDSSourceBlock;
typedef struct _DDSSingleColorLookup
{
DDSSourceBlock sources[2];
} DDSSingleColorLookup;
typedef MagickBooleanType
DDSDecoder(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *);
typedef MagickBooleanType
DDSPixelDecoder(Image *,DDSInfo *,ExceptionInfo *);
static const DDSSingleColorLookup DDSLookup_5_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 1 } } },
{ { { 0, 0, 2 }, { 0, 1, 0 } } },
{ { { 0, 0, 3 }, { 0, 1, 1 } } },
{ { { 0, 0, 4 }, { 0, 2, 1 } } },
{ { { 1, 0, 3 }, { 0, 2, 0 } } },
{ { { 1, 0, 2 }, { 0, 2, 1 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 1, 2, 1 } } },
{ { { 1, 0, 2 }, { 1, 2, 0 } } },
{ { { 1, 0, 3 }, { 0, 4, 0 } } },
{ { { 1, 0, 4 }, { 0, 5, 1 } } },
{ { { 2, 0, 3 }, { 0, 5, 0 } } },
{ { { 2, 0, 2 }, { 0, 5, 1 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 2, 3, 1 } } },
{ { { 2, 0, 2 }, { 2, 3, 0 } } },
{ { { 2, 0, 3 }, { 0, 7, 0 } } },
{ { { 2, 0, 4 }, { 1, 6, 1 } } },
{ { { 3, 0, 3 }, { 1, 6, 0 } } },
{ { { 3, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 2 }, { 0, 10, 1 } } },
{ { { 3, 0, 3 }, { 0, 10, 0 } } },
{ { { 3, 0, 4 }, { 2, 7, 1 } } },
{ { { 4, 0, 4 }, { 2, 7, 0 } } },
{ { { 4, 0, 3 }, { 0, 11, 0 } } },
{ { { 4, 0, 2 }, { 1, 10, 1 } } },
{ { { 4, 0, 1 }, { 1, 10, 0 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 1 } } },
{ { { 4, 0, 2 }, { 0, 13, 0 } } },
{ { { 4, 0, 3 }, { 0, 13, 1 } } },
{ { { 4, 0, 4 }, { 0, 14, 1 } } },
{ { { 5, 0, 3 }, { 0, 14, 0 } } },
{ { { 5, 0, 2 }, { 2, 11, 1 } } },
{ { { 5, 0, 1 }, { 2, 11, 0 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 1, 14, 1 } } },
{ { { 5, 0, 2 }, { 1, 14, 0 } } },
{ { { 5, 0, 3 }, { 0, 16, 0 } } },
{ { { 5, 0, 4 }, { 0, 17, 1 } } },
{ { { 6, 0, 3 }, { 0, 17, 0 } } },
{ { { 6, 0, 2 }, { 0, 17, 1 } } },
{ { { 6, 0, 1 }, { 0, 18, 1 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 2, 15, 1 } } },
{ { { 6, 0, 2 }, { 2, 15, 0 } } },
{ { { 6, 0, 3 }, { 0, 19, 0 } } },
{ { { 6, 0, 4 }, { 1, 18, 1 } } },
{ { { 7, 0, 3 }, { 1, 18, 0 } } },
{ { { 7, 0, 2 }, { 0, 20, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 2 }, { 0, 22, 1 } } },
{ { { 7, 0, 3 }, { 0, 22, 0 } } },
{ { { 7, 0, 4 }, { 2, 19, 1 } } },
{ { { 8, 0, 4 }, { 2, 19, 0 } } },
{ { { 8, 0, 3 }, { 0, 23, 0 } } },
{ { { 8, 0, 2 }, { 1, 22, 1 } } },
{ { { 8, 0, 1 }, { 1, 22, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 1 } } },
{ { { 8, 0, 2 }, { 0, 25, 0 } } },
{ { { 8, 0, 3 }, { 0, 25, 1 } } },
{ { { 8, 0, 4 }, { 0, 26, 1 } } },
{ { { 9, 0, 3 }, { 0, 26, 0 } } },
{ { { 9, 0, 2 }, { 2, 23, 1 } } },
{ { { 9, 0, 1 }, { 2, 23, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 1, 26, 1 } } },
{ { { 9, 0, 2 }, { 1, 26, 0 } } },
{ { { 9, 0, 3 }, { 0, 28, 0 } } },
{ { { 9, 0, 4 }, { 0, 29, 1 } } },
{ { { 10, 0, 3 }, { 0, 29, 0 } } },
{ { { 10, 0, 2 }, { 0, 29, 1 } } },
{ { { 10, 0, 1 }, { 0, 30, 1 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 2, 27, 1 } } },
{ { { 10, 0, 2 }, { 2, 27, 0 } } },
{ { { 10, 0, 3 }, { 0, 31, 0 } } },
{ { { 10, 0, 4 }, { 1, 30, 1 } } },
{ { { 11, 0, 3 }, { 1, 30, 0 } } },
{ { { 11, 0, 2 }, { 4, 24, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 0 }, { 1, 31, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 2 }, { 2, 30, 1 } } },
{ { { 11, 0, 3 }, { 2, 30, 0 } } },
{ { { 11, 0, 4 }, { 2, 31, 1 } } },
{ { { 12, 0, 4 }, { 2, 31, 0 } } },
{ { { 12, 0, 3 }, { 4, 27, 0 } } },
{ { { 12, 0, 2 }, { 3, 30, 1 } } },
{ { { 12, 0, 1 }, { 3, 30, 0 } } },
{ { { 12, 0, 0 }, { 4, 28, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 1 } } },
{ { { 12, 0, 2 }, { 3, 31, 0 } } },
{ { { 12, 0, 3 }, { 3, 31, 1 } } },
{ { { 12, 0, 4 }, { 4, 30, 1 } } },
{ { { 13, 0, 3 }, { 4, 30, 0 } } },
{ { { 13, 0, 2 }, { 6, 27, 1 } } },
{ { { 13, 0, 1 }, { 6, 27, 0 } } },
{ { { 13, 0, 0 }, { 4, 31, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 1 } } },
{ { { 13, 0, 2 }, { 5, 30, 0 } } },
{ { { 13, 0, 3 }, { 8, 24, 0 } } },
{ { { 13, 0, 4 }, { 5, 31, 1 } } },
{ { { 14, 0, 3 }, { 5, 31, 0 } } },
{ { { 14, 0, 2 }, { 5, 31, 1 } } },
{ { { 14, 0, 1 }, { 6, 30, 1 } } },
{ { { 14, 0, 0 }, { 6, 30, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 1 } } },
{ { { 14, 0, 2 }, { 6, 31, 0 } } },
{ { { 14, 0, 3 }, { 8, 27, 0 } } },
{ { { 14, 0, 4 }, { 7, 30, 1 } } },
{ { { 15, 0, 3 }, { 7, 30, 0 } } },
{ { { 15, 0, 2 }, { 8, 28, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 0 }, { 7, 31, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 2 }, { 8, 30, 1 } } },
{ { { 15, 0, 3 }, { 8, 30, 0 } } },
{ { { 15, 0, 4 }, { 10, 27, 1 } } },
{ { { 16, 0, 4 }, { 10, 27, 0 } } },
{ { { 16, 0, 3 }, { 8, 31, 0 } } },
{ { { 16, 0, 2 }, { 9, 30, 1 } } },
{ { { 16, 0, 1 }, { 9, 30, 0 } } },
{ { { 16, 0, 0 }, { 12, 24, 0 } } },
{ { { 16, 0, 1 }, { 9, 31, 1 } } },
{ { { 16, 0, 2 }, { 9, 31, 0 } } },
{ { { 16, 0, 3 }, { 9, 31, 1 } } },
{ { { 16, 0, 4 }, { 10, 30, 1 } } },
{ { { 17, 0, 3 }, { 10, 30, 0 } } },
{ { { 17, 0, 2 }, { 10, 31, 1 } } },
{ { { 17, 0, 1 }, { 10, 31, 0 } } },
{ { { 17, 0, 0 }, { 12, 27, 0 } } },
{ { { 17, 0, 1 }, { 11, 30, 1 } } },
{ { { 17, 0, 2 }, { 11, 30, 0 } } },
{ { { 17, 0, 3 }, { 12, 28, 0 } } },
{ { { 17, 0, 4 }, { 11, 31, 1 } } },
{ { { 18, 0, 3 }, { 11, 31, 0 } } },
{ { { 18, 0, 2 }, { 11, 31, 1 } } },
{ { { 18, 0, 1 }, { 12, 30, 1 } } },
{ { { 18, 0, 0 }, { 12, 30, 0 } } },
{ { { 18, 0, 1 }, { 14, 27, 1 } } },
{ { { 18, 0, 2 }, { 14, 27, 0 } } },
{ { { 18, 0, 3 }, { 12, 31, 0 } } },
{ { { 18, 0, 4 }, { 13, 30, 1 } } },
{ { { 19, 0, 3 }, { 13, 30, 0 } } },
{ { { 19, 0, 2 }, { 16, 24, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 0 }, { 13, 31, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 2 }, { 14, 30, 1 } } },
{ { { 19, 0, 3 }, { 14, 30, 0 } } },
{ { { 19, 0, 4 }, { 14, 31, 1 } } },
{ { { 20, 0, 4 }, { 14, 31, 0 } } },
{ { { 20, 0, 3 }, { 16, 27, 0 } } },
{ { { 20, 0, 2 }, { 15, 30, 1 } } },
{ { { 20, 0, 1 }, { 15, 30, 0 } } },
{ { { 20, 0, 0 }, { 16, 28, 0 } } },
{ { { 20, 0, 1 }, { 15, 31, 1 } } },
{ { { 20, 0, 2 }, { 15, 31, 0 } } },
{ { { 20, 0, 3 }, { 15, 31, 1 } } },
{ { { 20, 0, 4 }, { 16, 30, 1 } } },
{ { { 21, 0, 3 }, { 16, 30, 0 } } },
{ { { 21, 0, 2 }, { 18, 27, 1 } } },
{ { { 21, 0, 1 }, { 18, 27, 0 } } },
{ { { 21, 0, 0 }, { 16, 31, 0 } } },
{ { { 21, 0, 1 }, { 17, 30, 1 } } },
{ { { 21, 0, 2 }, { 17, 30, 0 } } },
{ { { 21, 0, 3 }, { 20, 24, 0 } } },
{ { { 21, 0, 4 }, { 17, 31, 1 } } },
{ { { 22, 0, 3 }, { 17, 31, 0 } } },
{ { { 22, 0, 2 }, { 17, 31, 1 } } },
{ { { 22, 0, 1 }, { 18, 30, 1 } } },
{ { { 22, 0, 0 }, { 18, 30, 0 } } },
{ { { 22, 0, 1 }, { 18, 31, 1 } } },
{ { { 22, 0, 2 }, { 18, 31, 0 } } },
{ { { 22, 0, 3 }, { 20, 27, 0 } } },
{ { { 22, 0, 4 }, { 19, 30, 1 } } },
{ { { 23, 0, 3 }, { 19, 30, 0 } } },
{ { { 23, 0, 2 }, { 20, 28, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 0 }, { 19, 31, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 2 }, { 20, 30, 1 } } },
{ { { 23, 0, 3 }, { 20, 30, 0 } } },
{ { { 23, 0, 4 }, { 22, 27, 1 } } },
{ { { 24, 0, 4 }, { 22, 27, 0 } } },
{ { { 24, 0, 3 }, { 20, 31, 0 } } },
{ { { 24, 0, 2 }, { 21, 30, 1 } } },
{ { { 24, 0, 1 }, { 21, 30, 0 } } },
{ { { 24, 0, 0 }, { 24, 24, 0 } } },
{ { { 24, 0, 1 }, { 21, 31, 1 } } },
{ { { 24, 0, 2 }, { 21, 31, 0 } } },
{ { { 24, 0, 3 }, { 21, 31, 1 } } },
{ { { 24, 0, 4 }, { 22, 30, 1 } } },
{ { { 25, 0, 3 }, { 22, 30, 0 } } },
{ { { 25, 0, 2 }, { 22, 31, 1 } } },
{ { { 25, 0, 1 }, { 22, 31, 0 } } },
{ { { 25, 0, 0 }, { 24, 27, 0 } } },
{ { { 25, 0, 1 }, { 23, 30, 1 } } },
{ { { 25, 0, 2 }, { 23, 30, 0 } } },
{ { { 25, 0, 3 }, { 24, 28, 0 } } },
{ { { 25, 0, 4 }, { 23, 31, 1 } } },
{ { { 26, 0, 3 }, { 23, 31, 0 } } },
{ { { 26, 0, 2 }, { 23, 31, 1 } } },
{ { { 26, 0, 1 }, { 24, 30, 1 } } },
{ { { 26, 0, 0 }, { 24, 30, 0 } } },
{ { { 26, 0, 1 }, { 26, 27, 1 } } },
{ { { 26, 0, 2 }, { 26, 27, 0 } } },
{ { { 26, 0, 3 }, { 24, 31, 0 } } },
{ { { 26, 0, 4 }, { 25, 30, 1 } } },
{ { { 27, 0, 3 }, { 25, 30, 0 } } },
{ { { 27, 0, 2 }, { 28, 24, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 0 }, { 25, 31, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 2 }, { 26, 30, 1 } } },
{ { { 27, 0, 3 }, { 26, 30, 0 } } },
{ { { 27, 0, 4 }, { 26, 31, 1 } } },
{ { { 28, 0, 4 }, { 26, 31, 0 } } },
{ { { 28, 0, 3 }, { 28, 27, 0 } } },
{ { { 28, 0, 2 }, { 27, 30, 1 } } },
{ { { 28, 0, 1 }, { 27, 30, 0 } } },
{ { { 28, 0, 0 }, { 28, 28, 0 } } },
{ { { 28, 0, 1 }, { 27, 31, 1 } } },
{ { { 28, 0, 2 }, { 27, 31, 0 } } },
{ { { 28, 0, 3 }, { 27, 31, 1 } } },
{ { { 28, 0, 4 }, { 28, 30, 1 } } },
{ { { 29, 0, 3 }, { 28, 30, 0 } } },
{ { { 29, 0, 2 }, { 30, 27, 1 } } },
{ { { 29, 0, 1 }, { 30, 27, 0 } } },
{ { { 29, 0, 0 }, { 28, 31, 0 } } },
{ { { 29, 0, 1 }, { 29, 30, 1 } } },
{ { { 29, 0, 2 }, { 29, 30, 0 } } },
{ { { 29, 0, 3 }, { 29, 30, 1 } } },
{ { { 29, 0, 4 }, { 29, 31, 1 } } },
{ { { 30, 0, 3 }, { 29, 31, 0 } } },
{ { { 30, 0, 2 }, { 29, 31, 1 } } },
{ { { 30, 0, 1 }, { 30, 30, 1 } } },
{ { { 30, 0, 0 }, { 30, 30, 0 } } },
{ { { 30, 0, 1 }, { 30, 31, 1 } } },
{ { { 30, 0, 2 }, { 30, 31, 0 } } },
{ { { 30, 0, 3 }, { 30, 31, 1 } } },
{ { { 30, 0, 4 }, { 31, 30, 1 } } },
{ { { 31, 0, 3 }, { 31, 30, 0 } } },
{ { { 31, 0, 2 }, { 31, 30, 1 } } },
{ { { 31, 0, 1 }, { 31, 31, 1 } } },
{ { { 31, 0, 0 }, { 31, 31, 0 } } }
};
static const DDSSingleColorLookup DDSLookup_6_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 0 } } },
{ { { 0, 0, 2 }, { 0, 2, 0 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 0, 4, 0 } } },
{ { { 1, 0, 2 }, { 0, 5, 0 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 0, 7, 0 } } },
{ { { 2, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 10, 0 } } },
{ { { 3, 0, 2 }, { 0, 11, 0 } } },
{ { { 4, 0, 1 }, { 0, 12, 1 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 0 } } },
{ { { 4, 0, 2 }, { 0, 14, 0 } } },
{ { { 5, 0, 1 }, { 0, 15, 1 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 0, 16, 0 } } },
{ { { 5, 0, 2 }, { 1, 15, 0 } } },
{ { { 6, 0, 1 }, { 0, 17, 0 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 0, 19, 0 } } },
{ { { 6, 0, 2 }, { 3, 14, 0 } } },
{ { { 7, 0, 1 }, { 0, 20, 0 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 22, 0 } } },
{ { { 7, 0, 2 }, { 4, 15, 0 } } },
{ { { 8, 0, 1 }, { 0, 23, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 0 } } },
{ { { 8, 0, 2 }, { 6, 14, 0 } } },
{ { { 9, 0, 1 }, { 0, 26, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 0, 28, 0 } } },
{ { { 9, 0, 2 }, { 7, 15, 0 } } },
{ { { 10, 0, 1 }, { 0, 29, 0 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 0, 31, 0 } } },
{ { { 10, 0, 2 }, { 9, 14, 0 } } },
{ { { 11, 0, 1 }, { 0, 32, 0 } } },
{ { { 11, 0, 0 }, { 0, 33, 0 } } },
{ { { 11, 0, 1 }, { 2, 30, 0 } } },
{ { { 11, 0, 2 }, { 0, 34, 0 } } },
{ { { 12, 0, 1 }, { 0, 35, 0 } } },
{ { { 12, 0, 0 }, { 0, 36, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 0 } } },
{ { { 12, 0, 2 }, { 0, 37, 0 } } },
{ { { 13, 0, 1 }, { 0, 38, 0 } } },
{ { { 13, 0, 0 }, { 0, 39, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 0 } } },
{ { { 13, 0, 2 }, { 0, 40, 0 } } },
{ { { 14, 0, 1 }, { 0, 41, 0 } } },
{ { { 14, 0, 0 }, { 0, 42, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 0 } } },
{ { { 14, 0, 2 }, { 0, 43, 0 } } },
{ { { 15, 0, 1 }, { 0, 44, 0 } } },
{ { { 15, 0, 0 }, { 0, 45, 0 } } },
{ { { 15, 0, 1 }, { 8, 30, 0 } } },
{ { { 15, 0, 2 }, { 0, 46, 0 } } },
{ { { 16, 0, 2 }, { 0, 47, 0 } } },
{ { { 16, 0, 1 }, { 1, 46, 0 } } },
{ { { 16, 0, 0 }, { 0, 48, 0 } } },
{ { { 16, 0, 1 }, { 0, 49, 0 } } },
{ { { 16, 0, 2 }, { 0, 50, 0 } } },
{ { { 17, 0, 1 }, { 2, 47, 0 } } },
{ { { 17, 0, 0 }, { 0, 51, 0 } } },
{ { { 17, 0, 1 }, { 0, 52, 0 } } },
{ { { 17, 0, 2 }, { 0, 53, 0 } } },
{ { { 18, 0, 1 }, { 4, 46, 0 } } },
{ { { 18, 0, 0 }, { 0, 54, 0 } } },
{ { { 18, 0, 1 }, { 0, 55, 0 } } },
{ { { 18, 0, 2 }, { 0, 56, 0 } } },
{ { { 19, 0, 1 }, { 5, 47, 0 } } },
{ { { 19, 0, 0 }, { 0, 57, 0 } } },
{ { { 19, 0, 1 }, { 0, 58, 0 } } },
{ { { 19, 0, 2 }, { 0, 59, 0 } } },
{ { { 20, 0, 1 }, { 7, 46, 0 } } },
{ { { 20, 0, 0 }, { 0, 60, 0 } } },
{ { { 20, 0, 1 }, { 0, 61, 0 } } },
{ { { 20, 0, 2 }, { 0, 62, 0 } } },
{ { { 21, 0, 1 }, { 8, 47, 0 } } },
{ { { 21, 0, 0 }, { 0, 63, 0 } } },
{ { { 21, 0, 1 }, { 1, 62, 0 } } },
{ { { 21, 0, 2 }, { 1, 63, 0 } } },
{ { { 22, 0, 1 }, { 10, 46, 0 } } },
{ { { 22, 0, 0 }, { 2, 62, 0 } } },
{ { { 22, 0, 1 }, { 2, 63, 0 } } },
{ { { 22, 0, 2 }, { 3, 62, 0 } } },
{ { { 23, 0, 1 }, { 11, 47, 0 } } },
{ { { 23, 0, 0 }, { 3, 63, 0 } } },
{ { { 23, 0, 1 }, { 4, 62, 0 } } },
{ { { 23, 0, 2 }, { 4, 63, 0 } } },
{ { { 24, 0, 1 }, { 13, 46, 0 } } },
{ { { 24, 0, 0 }, { 5, 62, 0 } } },
{ { { 24, 0, 1 }, { 5, 63, 0 } } },
{ { { 24, 0, 2 }, { 6, 62, 0 } } },
{ { { 25, 0, 1 }, { 14, 47, 0 } } },
{ { { 25, 0, 0 }, { 6, 63, 0 } } },
{ { { 25, 0, 1 }, { 7, 62, 0 } } },
{ { { 25, 0, 2 }, { 7, 63, 0 } } },
{ { { 26, 0, 1 }, { 16, 45, 0 } } },
{ { { 26, 0, 0 }, { 8, 62, 0 } } },
{ { { 26, 0, 1 }, { 8, 63, 0 } } },
{ { { 26, 0, 2 }, { 9, 62, 0 } } },
{ { { 27, 0, 1 }, { 16, 48, 0 } } },
{ { { 27, 0, 0 }, { 9, 63, 0 } } },
{ { { 27, 0, 1 }, { 10, 62, 0 } } },
{ { { 27, 0, 2 }, { 10, 63, 0 } } },
{ { { 28, 0, 1 }, { 16, 51, 0 } } },
{ { { 28, 0, 0 }, { 11, 62, 0 } } },
{ { { 28, 0, 1 }, { 11, 63, 0 } } },
{ { { 28, 0, 2 }, { 12, 62, 0 } } },
{ { { 29, 0, 1 }, { 16, 54, 0 } } },
{ { { 29, 0, 0 }, { 12, 63, 0 } } },
{ { { 29, 0, 1 }, { 13, 62, 0 } } },
{ { { 29, 0, 2 }, { 13, 63, 0 } } },
{ { { 30, 0, 1 }, { 16, 57, 0 } } },
{ { { 30, 0, 0 }, { 14, 62, 0 } } },
{ { { 30, 0, 1 }, { 14, 63, 0 } } },
{ { { 30, 0, 2 }, { 15, 62, 0 } } },
{ { { 31, 0, 1 }, { 16, 60, 0 } } },
{ { { 31, 0, 0 }, { 15, 63, 0 } } },
{ { { 31, 0, 1 }, { 24, 46, 0 } } },
{ { { 31, 0, 2 }, { 16, 62, 0 } } },
{ { { 32, 0, 2 }, { 16, 63, 0 } } },
{ { { 32, 0, 1 }, { 17, 62, 0 } } },
{ { { 32, 0, 0 }, { 25, 47, 0 } } },
{ { { 32, 0, 1 }, { 17, 63, 0 } } },
{ { { 32, 0, 2 }, { 18, 62, 0 } } },
{ { { 33, 0, 1 }, { 18, 63, 0 } } },
{ { { 33, 0, 0 }, { 27, 46, 0 } } },
{ { { 33, 0, 1 }, { 19, 62, 0 } } },
{ { { 33, 0, 2 }, { 19, 63, 0 } } },
{ { { 34, 0, 1 }, { 20, 62, 0 } } },
{ { { 34, 0, 0 }, { 28, 47, 0 } } },
{ { { 34, 0, 1 }, { 20, 63, 0 } } },
{ { { 34, 0, 2 }, { 21, 62, 0 } } },
{ { { 35, 0, 1 }, { 21, 63, 0 } } },
{ { { 35, 0, 0 }, { 30, 46, 0 } } },
{ { { 35, 0, 1 }, { 22, 62, 0 } } },
{ { { 35, 0, 2 }, { 22, 63, 0 } } },
{ { { 36, 0, 1 }, { 23, 62, 0 } } },
{ { { 36, 0, 0 }, { 31, 47, 0 } } },
{ { { 36, 0, 1 }, { 23, 63, 0 } } },
{ { { 36, 0, 2 }, { 24, 62, 0 } } },
{ { { 37, 0, 1 }, { 24, 63, 0 } } },
{ { { 37, 0, 0 }, { 32, 47, 0 } } },
{ { { 37, 0, 1 }, { 25, 62, 0 } } },
{ { { 37, 0, 2 }, { 25, 63, 0 } } },
{ { { 38, 0, 1 }, { 26, 62, 0 } } },
{ { { 38, 0, 0 }, { 32, 50, 0 } } },
{ { { 38, 0, 1 }, { 26, 63, 0 } } },
{ { { 38, 0, 2 }, { 27, 62, 0 } } },
{ { { 39, 0, 1 }, { 27, 63, 0 } } },
{ { { 39, 0, 0 }, { 32, 53, 0 } } },
{ { { 39, 0, 1 }, { 28, 62, 0 } } },
{ { { 39, 0, 2 }, { 28, 63, 0 } } },
{ { { 40, 0, 1 }, { 29, 62, 0 } } },
{ { { 40, 0, 0 }, { 32, 56, 0 } } },
{ { { 40, 0, 1 }, { 29, 63, 0 } } },
{ { { 40, 0, 2 }, { 30, 62, 0 } } },
{ { { 41, 0, 1 }, { 30, 63, 0 } } },
{ { { 41, 0, 0 }, { 32, 59, 0 } } },
{ { { 41, 0, 1 }, { 31, 62, 0 } } },
{ { { 41, 0, 2 }, { 31, 63, 0 } } },
{ { { 42, 0, 1 }, { 32, 61, 0 } } },
{ { { 42, 0, 0 }, { 32, 62, 0 } } },
{ { { 42, 0, 1 }, { 32, 63, 0 } } },
{ { { 42, 0, 2 }, { 41, 46, 0 } } },
{ { { 43, 0, 1 }, { 33, 62, 0 } } },
{ { { 43, 0, 0 }, { 33, 63, 0 } } },
{ { { 43, 0, 1 }, { 34, 62, 0 } } },
{ { { 43, 0, 2 }, { 42, 47, 0 } } },
{ { { 44, 0, 1 }, { 34, 63, 0 } } },
{ { { 44, 0, 0 }, { 35, 62, 0 } } },
{ { { 44, 0, 1 }, { 35, 63, 0 } } },
{ { { 44, 0, 2 }, { 44, 46, 0 } } },
{ { { 45, 0, 1 }, { 36, 62, 0 } } },
{ { { 45, 0, 0 }, { 36, 63, 0 } } },
{ { { 45, 0, 1 }, { 37, 62, 0 } } },
{ { { 45, 0, 2 }, { 45, 47, 0 } } },
{ { { 46, 0, 1 }, { 37, 63, 0 } } },
{ { { 46, 0, 0 }, { 38, 62, 0 } } },
{ { { 46, 0, 1 }, { 38, 63, 0 } } },
{ { { 46, 0, 2 }, { 47, 46, 0 } } },
{ { { 47, 0, 1 }, { 39, 62, 0 } } },
{ { { 47, 0, 0 }, { 39, 63, 0 } } },
{ { { 47, 0, 1 }, { 40, 62, 0 } } },
{ { { 47, 0, 2 }, { 48, 46, 0 } } },
{ { { 48, 0, 2 }, { 40, 63, 0 } } },
{ { { 48, 0, 1 }, { 41, 62, 0 } } },
{ { { 48, 0, 0 }, { 41, 63, 0 } } },
{ { { 48, 0, 1 }, { 48, 49, 0 } } },
{ { { 48, 0, 2 }, { 42, 62, 0 } } },
{ { { 49, 0, 1 }, { 42, 63, 0 } } },
{ { { 49, 0, 0 }, { 43, 62, 0 } } },
{ { { 49, 0, 1 }, { 48, 52, 0 } } },
{ { { 49, 0, 2 }, { 43, 63, 0 } } },
{ { { 50, 0, 1 }, { 44, 62, 0 } } },
{ { { 50, 0, 0 }, { 44, 63, 0 } } },
{ { { 50, 0, 1 }, { 48, 55, 0 } } },
{ { { 50, 0, 2 }, { 45, 62, 0 } } },
{ { { 51, 0, 1 }, { 45, 63, 0 } } },
{ { { 51, 0, 0 }, { 46, 62, 0 } } },
{ { { 51, 0, 1 }, { 48, 58, 0 } } },
{ { { 51, 0, 2 }, { 46, 63, 0 } } },
{ { { 52, 0, 1 }, { 47, 62, 0 } } },
{ { { 52, 0, 0 }, { 47, 63, 0 } } },
{ { { 52, 0, 1 }, { 48, 61, 0 } } },
{ { { 52, 0, 2 }, { 48, 62, 0 } } },
{ { { 53, 0, 1 }, { 56, 47, 0 } } },
{ { { 53, 0, 0 }, { 48, 63, 0 } } },
{ { { 53, 0, 1 }, { 49, 62, 0 } } },
{ { { 53, 0, 2 }, { 49, 63, 0 } } },
{ { { 54, 0, 1 }, { 58, 46, 0 } } },
{ { { 54, 0, 0 }, { 50, 62, 0 } } },
{ { { 54, 0, 1 }, { 50, 63, 0 } } },
{ { { 54, 0, 2 }, { 51, 62, 0 } } },
{ { { 55, 0, 1 }, { 59, 47, 0 } } },
{ { { 55, 0, 0 }, { 51, 63, 0 } } },
{ { { 55, 0, 1 }, { 52, 62, 0 } } },
{ { { 55, 0, 2 }, { 52, 63, 0 } } },
{ { { 56, 0, 1 }, { 61, 46, 0 } } },
{ { { 56, 0, 0 }, { 53, 62, 0 } } },
{ { { 56, 0, 1 }, { 53, 63, 0 } } },
{ { { 56, 0, 2 }, { 54, 62, 0 } } },
{ { { 57, 0, 1 }, { 62, 47, 0 } } },
{ { { 57, 0, 0 }, { 54, 63, 0 } } },
{ { { 57, 0, 1 }, { 55, 62, 0 } } },
{ { { 57, 0, 2 }, { 55, 63, 0 } } },
{ { { 58, 0, 1 }, { 56, 62, 1 } } },
{ { { 58, 0, 0 }, { 56, 62, 0 } } },
{ { { 58, 0, 1 }, { 56, 63, 0 } } },
{ { { 58, 0, 2 }, { 57, 62, 0 } } },
{ { { 59, 0, 1 }, { 57, 63, 1 } } },
{ { { 59, 0, 0 }, { 57, 63, 0 } } },
{ { { 59, 0, 1 }, { 58, 62, 0 } } },
{ { { 59, 0, 2 }, { 58, 63, 0 } } },
{ { { 60, 0, 1 }, { 59, 62, 1 } } },
{ { { 60, 0, 0 }, { 59, 62, 0 } } },
{ { { 60, 0, 1 }, { 59, 63, 0 } } },
{ { { 60, 0, 2 }, { 60, 62, 0 } } },
{ { { 61, 0, 1 }, { 60, 63, 1 } } },
{ { { 61, 0, 0 }, { 60, 63, 0 } } },
{ { { 61, 0, 1 }, { 61, 62, 0 } } },
{ { { 61, 0, 2 }, { 61, 63, 0 } } },
{ { { 62, 0, 1 }, { 62, 62, 1 } } },
{ { { 62, 0, 0 }, { 62, 62, 0 } } },
{ { { 62, 0, 1 }, { 62, 63, 0 } } },
{ { { 62, 0, 2 }, { 63, 62, 0 } } },
{ { { 63, 0, 1 }, { 63, 63, 1 } } },
{ { { 63, 0, 0 }, { 63, 63, 0 } } }
};
static const DDSSingleColorLookup*
DDS_LOOKUP[] =
{
DDSLookup_5_4,
DDSLookup_6_4,
DDSLookup_5_4
};
/*
Macros
*/
#define C565_r(x) (((x) & 0xF800) >> 11)
#define C565_g(x) (((x) & 0x07E0) >> 5)
#define C565_b(x) ((x) & 0x001F)
#define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2))
#define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4))
#define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2))
#define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1)
#define FixRange(min, max, steps) \
if (min > max) \
min = max; \
if ((ssize_t) max - min < steps) \
max = MagickMin(min + steps, 255); \
if ((ssize_t) max - min < steps) \
min = MagickMax(0, (ssize_t) max - steps)
#define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z)
#define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \
= value
#define VectorInit3(vector, value) vector.x = vector.y = vector.z = value
#define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \
g && mask.b_bitmask == b && mask.alpha_bitmask == a)
/*
Forward declarations
*/
static MagickBooleanType
WriteDDSImage(const ImageInfo *,Image *,ExceptionInfo *);
static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right,
DDSVector4 *destination)
{
destination->x = left.x + right.x;
destination->y = left.y + right.y;
destination->z = left.z + right.z;
destination->w = left.w + right.w;
}
static inline void VectorClamp(DDSVector4 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
value->w = MagickMin(1.0f,MagickMax(0.0f,value->w));
}
static inline void VectorClamp3(DDSVector3 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
}
static inline void VectorCopy43(const DDSVector4 source,
DDSVector3 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
}
static inline void VectorCopy44(const DDSVector4 source,
DDSVector4 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
destination->w = source.w;
}
static inline void VectorNegativeMultiplySubtract(const DDSVector4 a,
const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination)
{
destination->x = c.x - (a.x * b.x);
destination->y = c.y - (a.y * b.y);
destination->z = c.z - (a.z * b.z);
destination->w = c.w - (a.w * b.w);
}
static inline void VectorMultiply(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
destination->w = left.w * right.w;
}
static inline void VectorMultiply3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
}
static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b,
const DDSVector4 c, DDSVector4 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
destination->w = (a.w * b.w) + c.w;
}
static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b,
const DDSVector3 c, DDSVector3 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
}
static inline void VectorReciprocal(const DDSVector4 value,
DDSVector4 *destination)
{
destination->x = 1.0f / value.x;
destination->y = 1.0f / value.y;
destination->z = 1.0f / value.z;
destination->w = 1.0f / value.w;
}
static inline void VectorSubtract(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
destination->w = left.w - right.w;
}
static inline void VectorSubtract3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
}
static inline void VectorTruncate(DDSVector4 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w);
}
static inline void VectorTruncate3(DDSVector3 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
}
static inline size_t ClampToLimit(const float value, const size_t limit)
{
size_t
result = (int) (value + 0.5f);
if (result < 0.0f)
return(0);
if (result > limit)
return(limit);
return result;
}
static inline size_t ColorTo565(const DDSVector3 point)
{
size_t r = ClampToLimit(31.0f*point.x,31);
size_t g = ClampToLimit(63.0f*point.y,63);
size_t b = ClampToLimit(31.0f*point.z,31);
return (r << 11) | (g << 5) | b;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s D D S %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsDDS() returns MagickTrue if the image format type, identified by the
% magick string, is DDS.
%
% The format of the IsDDS method is:
%
% MagickBooleanType IsDDS(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((char *) magick,"DDS ", 4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadDDSImage() reads a DirectDraw Surface image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadDDSImage method is:
%
% Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: The image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info)
{
size_t
hdr_size,
required;
/* Seek to start of header */
(void) SeekBlob(image, 4, SEEK_SET);
/* Check header field */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 124)
return MagickFalse;
/* Fill in DDS info struct */
dds_info->flags = ReadBlobLSBLong(image);
/* Check required flags */
required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT);
if ((dds_info->flags & required) != required)
return MagickFalse;
dds_info->height = ReadBlobLSBLong(image);
dds_info->width = ReadBlobLSBLong(image);
dds_info->pitchOrLinearSize = ReadBlobLSBLong(image);
dds_info->depth = ReadBlobLSBLong(image);
dds_info->mipmapcount = ReadBlobLSBLong(image);
(void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */
/* Read pixel format structure */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 32)
return MagickFalse;
dds_info->pixelformat.flags = ReadBlobLSBLong(image);
dds_info->pixelformat.fourcc = ReadBlobLSBLong(image);
dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image);
dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image);
dds_info->ddscaps1 = ReadBlobLSBLong(image);
dds_info->ddscaps2 = ReadBlobLSBLong(image);
(void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */
/* Read optional DX10 header if available */
if ((dds_info->pixelformat.flags & DDPF_FOURCC) &&
(dds_info->pixelformat.fourcc == FOURCC_DX10))
{
dds_info->extFormat = ReadBlobLSBLong(image);
dds_info->extDimension = ReadBlobLSBLong(image);
dds_info->extFlags = ReadBlobLSBLong(image);
dds_info->extArraySize = ReadBlobLSBLong(image);
dds_info->extFlags2 = ReadBlobLSBLong(image);
}
else
{
dds_info->extFormat = 0;
dds_info->extDimension = 0;
dds_info->extFlags = 0;
dds_info->extArraySize = 0;
dds_info->extFlags2 = 0;
}
return MagickTrue;
}
static MagickBooleanType SetDXT1Pixels(Image *image,ssize_t x,ssize_t y,
DDSColors colors,size_t bits,Quantum *q)
{
ssize_t
i;
ssize_t
j;
unsigned char
code;
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns &&
(y + j) < (ssize_t) image->rows)
{
code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3);
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
SetPixelOpacity(image,ScaleCharToQuantum(colors.a[code]),q);
if ((colors.a[code] != 0) &&
(image->alpha_trait == UndefinedPixelTrait))
return(MagickFalse);
q+=GetPixelChannels(image);
}
}
}
return(MagickTrue);
}
static MagickBooleanType ReadMipmaps(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,DDSPixelDecoder decoder,ExceptionInfo *exception)
{
MagickBooleanType
status;
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
status=MagickTrue;
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
AcquireNextImage(image_info,image,exception);
if (image->next == (Image *) NULL)
return(MagickFalse);
image->next->alpha_trait=image->alpha_trait;
image=SyncNextImageInList(image);
status=SetImageExtent(image,w,h,exception);
if (status == MagickFalse)
break;
status=decoder(image,dds_info,exception);
if (status == MagickFalse)
break;
if ((w == 1) && (h == 1))
break;
w=DIV2(w);
h=DIV2(h);
}
}
return(status);
}
static void CalculateColors(unsigned short c0, unsigned short c1,
DDSColors *c, MagickBooleanType ignoreAlpha)
{
c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0;
c->r[0] = (unsigned char) C565_red(c0);
c->g[0] = (unsigned char) C565_green(c0);
c->b[0] = (unsigned char) C565_blue(c0);
c->r[1] = (unsigned char) C565_red(c1);
c->g[1] = (unsigned char) C565_green(c1);
c->b[1] = (unsigned char) C565_blue(c1);
if (ignoreAlpha != MagickFalse || c0 > c1)
{
c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3);
c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3);
c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3);
c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3);
c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3);
c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3);
}
else
{
c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2);
c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2);
c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2);
c->r[3] = c->g[3] = c->b[3] = 0;
c->a[3] = 255;
}
}
static MagickBooleanType ReadDXT1Pixels(Image *image,
DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
Quantum
*q;
ssize_t
x;
size_t
bits;
ssize_t
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read 8 bytes of data from the image */
c0=ReadBlobLSBShort(image);
c1=ReadBlobLSBShort(image);
bits=ReadBlobLSBLong(image);
CalculateColors(c0,c1,&colors,MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
if (SetDXT1Pixels(image,x,y,colors,bits,q) == MagickFalse)
{
/* Correct alpha */
SetImageAlpha(image,QuantumRange,exception);
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q != (Quantum *) NULL)
SetDXT1Pixels(image,x,y,colors,bits,q);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
/*
Skip the mipmap images for compressed (DXTn) dds files
*/
static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info,
int texel_size,ExceptionInfo *exception)
{
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
MagickOffsetType
offset;
ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset=(MagickOffsetType)((w+3)/4)*((h+3)/4)*texel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w=DIV2(w);
h=DIV2(h);
if ((w == 1) && (h == 1))
break;
}
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT1(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT1Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT1Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,8,exception));
}
static MagickBooleanType ReadDXT3Pixels(Image *image,
DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
Quantum
*q;
ssize_t
i,
x;
unsigned char
alpha;
size_t
a0,
a1,
bits,
code;
ssize_t
j,
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x),
MagickMin(4, image->rows - y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read alpha values (8 bytes) */
a0 = ReadBlobLSBLong(image);
a1 = ReadBlobLSBLong(image);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
/*
Extract alpha value: multiply 0..15 by 17 to get range 0..255
*/
if (j < 2)
alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf);
else
alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q);
q+=GetPixelChannels(image);
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT3(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT3Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT3Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadDXT5Pixels(Image *image,
DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
MagickSizeType
alpha_bits;
Quantum
*q;
ssize_t
i,
x;
unsigned char
a0,
a1;
size_t
alpha,
bits,
code,
alpha_code;
ssize_t
j,
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x),
MagickMin(4, image->rows - y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read alpha values (8 bytes) */
a0 = (unsigned char) ReadBlobByte(image);
a1 = (unsigned char) ReadBlobByte(image);
alpha_bits = (MagickSizeType)ReadBlobLSBLong(image);
alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns &&
(y + j) < (ssize_t) image->rows)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
/* Extract alpha value */
alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7;
if (alpha_code == 0)
alpha = a0;
else if (alpha_code == 1)
alpha = a1;
else if (a0 > a1)
alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7;
else if (alpha_code == 6)
alpha = 0;
else if (alpha_code == 7)
alpha = 255;
else
alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q);
q+=GetPixelChannels(image);
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT5(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT5Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT5Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadUncompressedRGBPixels(Image *image,
DDSInfo *dds_info,ExceptionInfo *exception)
{
Quantum
*q;
ssize_t
x, y;
unsigned short
color;
for (y = 0; y < (ssize_t) image->rows; y++)
{
q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 8 ||
dds_info->extFormat == DXGI_FORMAT_R8_UNORM)
SetPixelGray(image,ScaleCharToQuantum(ReadBlobByte(image)),q);
else if (dds_info->pixelformat.rgb_bitcount == 16 ||
dds_info->extFormat == DXGI_FORMAT_B5G6R5_UNORM)
{
color=ReadBlobShort(image);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
(((color >> 11)/31.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 5) >> 10)/63.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)),q);
}
else
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
if (dds_info->pixelformat.rgb_bitcount == 32 ||
dds_info->extFormat == DXGI_FORMAT_B8G8R8X8_UNORM)
(void) ReadBlobByte(image);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
/*
Skip the mipmap images for uncompressed (RGB or RGBA) dds files
*/
static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info,
int pixel_size,ExceptionInfo *exception)
{
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
MagickOffsetType
offset;
ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset=(MagickOffsetType)w*h*pixel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w=DIV2(w);
h=DIV2(h);
if ((w == 1) && (h == 1))
break;
}
}
return(MagickTrue);
}
static MagickBooleanType ReadUncompressedRGB(const ImageInfo *image_info,
Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (dds_info->pixelformat.rgb_bitcount == 8 ||
dds_info->extFormat == DXGI_FORMAT_R8_UNORM)
(void) SetImageType(image,GrayscaleType,exception);
else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask(
dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000))
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
if (ReadUncompressedRGBPixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBPixels,
exception));
else
return(SkipRGBMipmaps(image,dds_info,3,exception));
}
static MagickBooleanType ReadUncompressedRGBAPixels(Image *image,
DDSInfo *dds_info,ExceptionInfo *exception)
{
Quantum
*q;
ssize_t
alphaBits,
x,
y;
unsigned short
color;
alphaBits=0;
if (dds_info->pixelformat.rgb_bitcount == 16)
{
if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000))
alphaBits=1;
else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00))
{
alphaBits=2;
(void) SetImageType(image,GrayscaleAlphaType,exception);
}
else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000))
alphaBits=4;
else
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
}
if (dds_info->extFormat == DXGI_FORMAT_B5G5R5A1_UNORM)
alphaBits=1;
for (y = 0; y < (ssize_t) image->rows; y++)
{
q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 16 ||
dds_info->extFormat == DXGI_FORMAT_B5G5R5A1_UNORM)
{
color=ReadBlobShort(image);
if (alphaBits == 1)
{
SetPixelAlpha(image,(color & (1 << 15)) ? QuantumRange : 0,q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 1) >> 11)/31.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 6) >> 11)/31.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)),q);
}
else if (alphaBits == 2)
{
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
(color >> 8)),q);
SetPixelGray(image,ScaleCharToQuantum((unsigned char)color),q);
}
else
{
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
(((color >> 12)/15.0)*255)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 4) >> 12)/15.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 8) >> 12)/15.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 12) >> 12)/15.0)*255)),q);
}
}
else if (dds_info->extFormat == DXGI_FORMAT_R8G8B8A8_UNORM ||
IsBitMask(dds_info->pixelformat,0x000000ff,0x0000ff00,0x00ff0000,0xff000000))
{
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
}
else
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadUncompressedRGBA(const ImageInfo *image_info,
Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadUncompressedRGBAPixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBAPixels,
exception));
else
return(SkipRGBMipmaps(image,dds_info,4,exception));
}
static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
const char
*option;
CompressionType
compression;
DDSInfo
dds_info;
DDSDecoder
*decoder;
Image
*image;
MagickBooleanType
status,
cubemap,
volume,
read_mipmaps;
PixelTrait
alpha_trait;
size_t
n,
num_images;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cubemap=MagickFalse,
volume=MagickFalse,
read_mipmaps=MagickFalse;
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Initialize image structure.
*/
if (ReadDDSInfo(image, &dds_info) != MagickTrue)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP)
cubemap = MagickTrue;
if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0)
volume = MagickTrue;
/*
Determine pixel format
*/
if (dds_info.pixelformat.flags & DDPF_RGB)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
alpha_trait = BlendPixelTrait;
decoder = ReadUncompressedRGBA;
}
else
{
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_LUMINANCE)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
/* Not sure how to handle this */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
else
{
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_FOURCC)
{
switch (dds_info.pixelformat.fourcc)
{
case FOURCC_DXT1:
{
alpha_trait = UndefinedPixelTrait;
compression = DXT1Compression;
decoder = ReadDXT1;
break;
}
case FOURCC_DXT3:
{
alpha_trait = BlendPixelTrait;
compression = DXT3Compression;
decoder = ReadDXT3;
break;
}
case FOURCC_DXT5:
{
alpha_trait = BlendPixelTrait;
compression = DXT5Compression;
decoder = ReadDXT5;
break;
}
case FOURCC_DX10:
{
if (dds_info.extDimension != DDSEXT_DIMENSION_TEX2D)
{
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
switch (dds_info.extFormat)
{
case DXGI_FORMAT_R8_UNORM:
{
compression = NoCompression;
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
break;
}
case DXGI_FORMAT_B5G6R5_UNORM:
{
compression = NoCompression;
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
break;
}
case DXGI_FORMAT_B5G5R5A1_UNORM:
{
compression = NoCompression;
alpha_trait = BlendPixelTrait;
decoder = ReadUncompressedRGBA;
break;
}
case DXGI_FORMAT_B8G8R8A8_UNORM:
{
compression = NoCompression;
alpha_trait = BlendPixelTrait;
decoder = ReadUncompressedRGBA;
break;
}
case DXGI_FORMAT_R8G8B8A8_UNORM:
{
compression = NoCompression;
alpha_trait = BlendPixelTrait;
decoder = ReadUncompressedRGBA;
break;
}
case DXGI_FORMAT_B8G8R8X8_UNORM:
{
compression = NoCompression;
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
break;
}
case DXGI_FORMAT_BC1_UNORM:
{
alpha_trait = UndefinedPixelTrait;
compression = DXT1Compression;
decoder = ReadDXT1;
break;
}
case DXGI_FORMAT_BC2_UNORM:
{
alpha_trait = BlendPixelTrait;
compression = DXT3Compression;
decoder = ReadDXT3;
break;
}
case DXGI_FORMAT_BC3_UNORM:
{
alpha_trait = BlendPixelTrait;
compression = DXT5Compression;
decoder = ReadDXT5;
break;
}
default:
{
/* Unknown format */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
}
if (dds_info.extFlags & DDSEXTFLAGS_CUBEMAP)
cubemap = MagickTrue;
num_images = dds_info.extArraySize;
break;
}
default:
{
/* Unknown FOURCC */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
}
}
else
{
/* Neither compressed nor uncompressed... thus unsupported */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
num_images = 1;
if (cubemap)
{
/*
Determine number of faces defined in the cubemap
*/
num_images = 0;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++;
}
if (volume)
num_images = dds_info.depth;
if ((num_images == 0) || (num_images > GetBlobSize(image)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireMagickResource(ListLengthResource,num_images) == MagickFalse)
ThrowReaderException(ResourceLimitError,"ListLengthExceedsLimit");
option=GetImageOption(image_info,"dds:skip-mipmaps");
if (IsStringFalse(option) != MagickFalse)
read_mipmaps=MagickTrue;
for (n = 0; n < num_images; n++)
{
if (n != 0)
{
/* Start a new image */
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return(DestroyImageList(image));
image=SyncNextImageInList(image);
}
image->alpha_trait=alpha_trait;
image->compression=compression;
image->columns=dds_info.width;
image->rows=dds_info.height;
image->storage_class=DirectClass;
image->endian=LSBEndian;
image->depth=8;
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
(void) SetImageBackgroundColor(image,exception);
status=(decoder)(image_info,image,&dds_info,read_mipmaps,exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
if (n == 0)
return(DestroyImageList(image));
return(GetFirstImageInList(image));
}
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterDDSImage() adds attributes for the DDS image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterDDSImage method is:
%
% RegisterDDSImage(void)
%
*/
ModuleExport size_t RegisterDDSImage(void)
{
MagickInfo
*entry;
entry = AcquireMagickInfo("DDS","DDS","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry = AcquireMagickInfo("DDS","DXT1","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry = AcquireMagickInfo("DDS","DXT5","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
static void RemapIndices(const ssize_t *map, const unsigned char *source,
unsigned char *target)
{
ssize_t
i;
for (i = 0; i < 16; i++)
{
if (map[i] == -1)
target[i] = 3;
else
target[i] = source[map[i]];
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterDDSImage() removes format registrations made by the
% DDS module from the list of supported formats.
%
% The format of the UnregisterDDSImage method is:
%
% UnregisterDDSImage(void)
%
*/
ModuleExport void UnregisterDDSImage(void)
{
(void) UnregisterMagickInfo("DDS");
(void) UnregisterMagickInfo("DXT1");
(void) UnregisterMagickInfo("DXT5");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format.
%
% The format of the WriteBMPImage method is:
%
% MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static size_t CompressAlpha(const size_t min, const size_t max,
const size_t steps, const ssize_t *alphas, unsigned char* indices)
{
unsigned char
codes[8];
ssize_t
i;
size_t
error,
index,
j,
least,
value;
codes[0] = (unsigned char) min;
codes[1] = (unsigned char) max;
codes[6] = 0;
codes[7] = 255;
for (i=1; i < (ssize_t) steps; i++)
codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps);
error = 0;
for (i=0; i<16; i++)
{
if (alphas[i] == -1)
{
indices[i] = 0;
continue;
}
value = alphas[i];
least = SIZE_MAX;
index = 0;
for (j=0; j<8; j++)
{
size_t
dist;
dist = value - (size_t)codes[j];
dist *= dist;
if (dist < least)
{
least = dist;
index = j;
}
}
indices[i] = (unsigned char)index;
error += least;
}
return error;
}
static MagickBooleanType ConstructOrdering(const size_t count,
const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights,
DDSVector4 *xSumwSum, unsigned char *order, size_t iteration)
{
float
dps[16],
f;
ssize_t
i;
size_t
j;
unsigned char
c,
*o,
*p;
o = order + (16*iteration);
for (i=0; i < (ssize_t) count; i++)
{
dps[i] = Dot(points[i],axis);
o[i] = (unsigned char)i;
}
for (i=0; i < (ssize_t) count; i++)
{
for (j=i; j > 0 && dps[j] < dps[j - 1]; j--)
{
f = dps[j];
dps[j] = dps[j - 1];
dps[j - 1] = f;
c = o[j];
o[j] = o[j - 1];
o[j - 1] = c;
}
}
for (i=0; i < (ssize_t) iteration; i++)
{
MagickBooleanType
same;
p = order + (16*i);
same = MagickTrue;
for (j=0; j < count; j++)
{
if (o[j] != p[j])
{
same = MagickFalse;
break;
}
}
if (same != MagickFalse)
return MagickFalse;
}
xSumwSum->x = 0;
xSumwSum->y = 0;
xSumwSum->z = 0;
xSumwSum->w = 0;
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
v;
j = (size_t) o[i];
v.x = points[j].w * points[j].x;
v.y = points[j].w * points[j].y;
v.z = points[j].w * points[j].z;
v.w = points[j].w * 1.0f;
VectorCopy44(v,&pointsWeights[i]);
VectorAdd(*xSumwSum,v,xSumwSum);
}
return MagickTrue;
}
static void CompressClusterFit(const size_t count,
const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3* end,
unsigned char *indices)
{
DDSVector3
axis;
DDSVector4
grid,
gridrcp,
half,
onethird_onethird2,
pointsWeights[16],
two,
twonineths,
twothirds_twothirds2,
xSumwSum;
float
bestError = 1e+37f;
size_t
bestIteration = 0,
besti = 0,
bestj = 0,
bestk = 0,
iterationIndex;
ssize_t
i;
unsigned char
*o,
order[128],
unordered[16];
VectorInit(half,0.5f);
VectorInit(two,2.0f);
VectorInit(onethird_onethird2,1.0f/3.0f);
onethird_onethird2.w = 1.0f/9.0f;
VectorInit(twothirds_twothirds2,2.0f/3.0f);
twothirds_twothirds2.w = 4.0f/9.0f;
VectorInit(twonineths,2.0f/9.0f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
grid.w = 0.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
gridrcp.w = 0.0f;
xSumwSum.x = 0.0f;
xSumwSum.y = 0.0f;
xSumwSum.z = 0.0f;
xSumwSum.w = 0.0f;
ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0);
for (iterationIndex = 0;;)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,1) \
num_threads(GetMagickResourceLimit(ThreadResource))
#endif
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
part0,
part1,
part2;
size_t
ii,
j,
k,
kmin;
VectorInit(part0,0.0f);
for(ii=0; ii < (size_t) i; ii++)
VectorAdd(pointsWeights[ii],part0,&part0);
VectorInit(part1,0.0f);
for (j=(size_t) i;;)
{
if (j == 0)
{
VectorCopy44(pointsWeights[0],&part2);
kmin = 1;
}
else
{
VectorInit(part2,0.0f);
kmin = j;
}
for (k=kmin;;)
{
DDSVector4
a,
alpha2_sum,
alphax_sum,
alphabeta_sum,
b,
beta2_sum,
betax_sum,
e1,
e2,
factor,
part3;
float
error;
VectorSubtract(xSumwSum,part2,&part3);
VectorSubtract(part3,part1,&part3);
VectorSubtract(part3,part0,&part3);
VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum);
VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum);
VectorInit(alpha2_sum,alphax_sum.w);
VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum);
VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum);
VectorInit(beta2_sum,betax_sum.w);
VectorAdd(part1,part2,&alphabeta_sum);
VectorInit(alphabeta_sum,alphabeta_sum.w);
VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum);
VectorMultiply(alpha2_sum,beta2_sum,&factor);
VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor,
&factor);
VectorReciprocal(factor,&factor);
VectorMultiply(alphax_sum,beta2_sum,&a);
VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a);
VectorMultiply(a,factor,&a);
VectorMultiply(betax_sum,alpha2_sum,&b);
VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b);
VectorMultiply(b,factor,&b);
VectorClamp(&a);
VectorMultiplyAdd(grid,a,half,&a);
VectorTruncate(&a);
VectorMultiply(a,gridrcp,&a);
VectorClamp(&b);
VectorMultiplyAdd(grid,b,half,&b);
VectorTruncate(&b);
VectorMultiply(b,gridrcp,&b);
VectorMultiply(b,b,&e1);
VectorMultiply(e1,beta2_sum,&e1);
VectorMultiply(a,a,&e2);
VectorMultiplyAdd(e2,alpha2_sum,e1,&e1);
VectorMultiply(a,b,&e2);
VectorMultiply(e2,alphabeta_sum,&e2);
VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2);
VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2);
VectorMultiplyAdd(two,e2,e1,&e2);
VectorMultiply(e2,metric,&e2);
error = e2.x + e2.y + e2.z;
if (error < bestError)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (DDS_CompressClusterFit)
#endif
{
if (error < bestError)
{
VectorCopy43(a,start);
VectorCopy43(b,end);
bestError = error;
besti = i;
bestj = j;
bestk = k;
bestIteration = iterationIndex;
}
}
}
if (k == count)
break;
VectorAdd(pointsWeights[k],part2,&part2);
k++;
}
if (j == count)
break;
VectorAdd(pointsWeights[j],part1,&part1);
j++;
}
}
if (bestIteration != iterationIndex)
break;
iterationIndex++;
if (iterationIndex == 8)
break;
VectorSubtract3(*end,*start,&axis);
if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order,
iterationIndex) == MagickFalse)
break;
}
o = order + (16*bestIteration);
for (i=0; i < (ssize_t) besti; i++)
unordered[o[i]] = 0;
for (i=besti; i < (ssize_t) bestj; i++)
unordered[o[i]] = 2;
for (i=bestj; i < (ssize_t) bestk; i++)
unordered[o[i]] = 3;
for (i=bestk; i < (ssize_t) count; i++)
unordered[o[i]] = 1;
RemapIndices(map,unordered,indices);
}
static void CompressRangeFit(const size_t count,
const DDSVector4* points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end,
unsigned char *indices)
{
float
d,
bestDist,
max,
min,
val;
DDSVector3
codes[4],
grid,
gridrcp,
half,
dist;
ssize_t
i;
size_t
bestj,
j;
unsigned char
closest[16];
VectorInit3(half,0.5f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
if (count > 0)
{
VectorCopy43(points[0],start);
VectorCopy43(points[0],end);
min = max = Dot(points[0],principle);
for (i=1; i < (ssize_t) count; i++)
{
val = Dot(points[i],principle);
if (val < min)
{
VectorCopy43(points[i],start);
min = val;
}
else if (val > max)
{
VectorCopy43(points[i],end);
max = val;
}
}
}
VectorClamp3(start);
VectorMultiplyAdd3(grid,*start,half,start);
VectorTruncate3(start);
VectorMultiply3(*start,gridrcp,start);
VectorClamp3(end);
VectorMultiplyAdd3(grid,*end,half,end);
VectorTruncate3(end);
VectorMultiply3(*end,gridrcp,end);
codes[0] = *start;
codes[1] = *end;
codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f));
codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f));
codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f));
codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f));
codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f));
codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f));
for (i=0; i < (ssize_t) count; i++)
{
bestDist = 1e+37f;
bestj = 0;
for (j=0; j < 4; j++)
{
dist.x = (points[i].x - codes[j].x) * metric.x;
dist.y = (points[i].y - codes[j].y) * metric.y;
dist.z = (points[i].z - codes[j].z) * metric.z;
d = Dot(dist,dist);
if (d < bestDist)
{
bestDist = d;
bestj = j;
}
}
closest[i] = (unsigned char) bestj;
}
RemapIndices(map, closest, indices);
}
static void ComputeEndPoints(const DDSSingleColorLookup *lookup[],
const unsigned char *color, DDSVector3 *start, DDSVector3 *end,
unsigned char *index)
{
ssize_t
i;
size_t
c,
maxError = SIZE_MAX;
for (i=0; i < 2; i++)
{
const DDSSourceBlock*
sources[3];
size_t
error = 0;
for (c=0; c < 3; c++)
{
sources[c] = &lookup[c][color[c]].sources[i];
error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error);
}
if (error > maxError)
continue;
start->x = (float) sources[0]->start / 31.0f;
start->y = (float) sources[1]->start / 63.0f;
start->z = (float) sources[2]->start / 31.0f;
end->x = (float) sources[0]->end / 31.0f;
end->y = (float) sources[1]->end / 63.0f;
end->z = (float) sources[2]->end / 31.0f;
*index = (unsigned char) (2*i);
maxError = error;
}
}
static void ComputePrincipleComponent(const float *covariance,
DDSVector3 *principle)
{
DDSVector4
row0,
row1,
row2,
v;
ssize_t
i;
row0.x = covariance[0];
row0.y = covariance[1];
row0.z = covariance[2];
row0.w = 0.0f;
row1.x = covariance[1];
row1.y = covariance[3];
row1.z = covariance[4];
row1.w = 0.0f;
row2.x = covariance[2];
row2.y = covariance[4];
row2.z = covariance[5];
row2.w = 0.0f;
VectorInit(v,1.0f);
for (i=0; i < 8; i++)
{
DDSVector4
w;
float
a;
w.x = row0.x * v.x;
w.y = row0.y * v.x;
w.z = row0.z * v.x;
w.w = row0.w * v.x;
w.x = (row1.x * v.y) + w.x;
w.y = (row1.y * v.y) + w.y;
w.z = (row1.z * v.y) + w.z;
w.w = (row1.w * v.y) + w.w;
w.x = (row2.x * v.z) + w.x;
w.y = (row2.y * v.z) + w.y;
w.z = (row2.z * v.z) + w.z;
w.w = (row2.w * v.z) + w.w;
a = (float) PerceptibleReciprocal(MagickMax(w.x,MagickMax(w.y,w.z)));
v.x = w.x * a;
v.y = w.y * a;
v.z = w.z * a;
v.w = w.w * a;
}
VectorCopy43(v,principle);
}
static void ComputeWeightedCovariance(const size_t count,
const DDSVector4 *points, float *covariance)
{
DDSVector3
centroid;
float
total;
size_t
i;
total = 0.0f;
VectorInit3(centroid,0.0f);
for (i=0; i < count; i++)
{
total += points[i].w;
centroid.x += (points[i].x * points[i].w);
centroid.y += (points[i].y * points[i].w);
centroid.z += (points[i].z * points[i].w);
}
if( total > 1.192092896e-07F)
{
centroid.x /= total;
centroid.y /= total;
centroid.z /= total;
}
for (i=0; i < 6; i++)
covariance[i] = 0.0f;
for (i = 0; i < count; i++)
{
DDSVector3
a,
b;
a.x = points[i].x - centroid.x;
a.y = points[i].y - centroid.y;
a.z = points[i].z - centroid.z;
b.x = points[i].w * a.x;
b.y = points[i].w * a.y;
b.z = points[i].w * a.z;
covariance[0] += a.x*b.x;
covariance[1] += a.x*b.y;
covariance[2] += a.x*b.z;
covariance[3] += a.y*b.y;
covariance[4] += a.y*b.z;
covariance[5] += a.z*b.z;
}
}
static void WriteAlphas(Image *image, const ssize_t *alphas, size_t min5,
size_t max5, size_t min7, size_t max7)
{
ssize_t
i;
size_t
err5,
err7,
j;
unsigned char
indices5[16],
indices7[16];
FixRange(min5,max5,5);
err5 = CompressAlpha(min5,max5,5,alphas,indices5);
FixRange(min7,max7,7);
err7 = CompressAlpha(min7,max7,7,alphas,indices7);
if (err7 < err5)
{
for (i=0; i < 16; i++)
{
unsigned char
index;
index = indices7[i];
if( index == 0 )
indices5[i] = 1;
else if (index == 1)
indices5[i] = 0;
else
indices5[i] = 9 - index;
}
min5 = max7;
max5 = min7;
}
(void) WriteBlobByte(image,(unsigned char) min5);
(void) WriteBlobByte(image,(unsigned char) max5);
for(i=0; i < 2; i++)
{
size_t
value = 0;
for (j=0; j < 8; j++)
{
size_t index = (size_t) indices5[j + i*8];
value |= ( index << 3*j );
}
for (j=0; j < 3; j++)
{
size_t byte = (value >> 8*j) & 0xff;
(void) WriteBlobByte(image,(unsigned char) byte);
}
}
}
static void WriteIndices(Image *image, const DDSVector3 start,
const DDSVector3 end, unsigned char *indices)
{
ssize_t
i;
size_t
a,
b;
unsigned char
remapped[16];
const unsigned char
*ind;
a = ColorTo565(start);
b = ColorTo565(end);
for (i=0; i<16; i++)
{
if( a < b )
remapped[i] = (indices[i] ^ 0x1) & 0x3;
else if( a == b )
remapped[i] = 0;
else
remapped[i] = indices[i];
}
if( a < b )
Swap(a,b);
(void) WriteBlobByte(image,(unsigned char) (a & 0xff));
(void) WriteBlobByte(image,(unsigned char) (a >> 8));
(void) WriteBlobByte(image,(unsigned char) (b & 0xff));
(void) WriteBlobByte(image,(unsigned char) (b >> 8));
for (i=0; i<4; i++)
{
ind = remapped + 4*i;
(void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) |
(ind[3] << 6));
}
}
static void WriteCompressed(Image *image, const size_t count,
DDSVector4 *points, const ssize_t *map, const MagickBooleanType clusterFit)
{
float
covariance[16];
DDSVector3
end,
principle,
start;
DDSVector4
metric;
unsigned char
indices[16];
VectorInit(metric,1.0f);
VectorInit3(start,0.0f);
VectorInit3(end,0.0f);
ComputeWeightedCovariance(count,points,covariance);
ComputePrincipleComponent(covariance,&principle);
if ((clusterFit == MagickFalse) || (count == 0))
CompressRangeFit(count,points,map,principle,metric,&start,&end,indices);
else
CompressClusterFit(count,points,map,principle,metric,&start,&end,indices);
WriteIndices(image,start,end,indices);
}
static void WriteSingleColorFit(Image *image, const DDSVector4 *points,
const ssize_t *map)
{
DDSVector3
start,
end;
ssize_t
i;
unsigned char
color[3],
index,
indexes[16],
indices[16];
color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255);
color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255);
color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255);
index=0;
ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index);
for (i=0; i< 16; i++)
indexes[i]=index;
RemapIndices(map,indexes,indices);
WriteIndices(image,start,end,indices);
}
static void WriteFourCC(Image *image, const size_t compression,
const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,
ExceptionInfo *exception)
{
ssize_t
x;
ssize_t
i,
y,
bx,
by;
const Quantum
*p;
for (y=0; y < (ssize_t) image->rows; y+=4)
{
for (x=0; x < (ssize_t) image->columns; x+=4)
{
MagickBooleanType
match;
DDSVector4
point,
points[16];
size_t
count = 0,
max5 = 0,
max7 = 0,
min5 = 255,
min7 = 255,
columns = 4,
rows = 4;
ssize_t
alphas[16],
map[16];
unsigned char
alpha;
if (x + columns >= image->columns)
columns = image->columns - x;
if (y + rows >= image->rows)
rows = image->rows - y;
p=GetVirtualPixels(image,x,y,columns,rows,exception);
if (p == (const Quantum *) NULL)
break;
for (i=0; i<16; i++)
{
map[i] = -1;
alphas[i] = -1;
}
for (by=0; by < (ssize_t) rows; by++)
{
for (bx=0; bx < (ssize_t) columns; bx++)
{
if (compression == FOURCC_DXT5)
alpha = ScaleQuantumToChar(GetPixelAlpha(image,p));
else
alpha = 255;
if (compression == FOURCC_DXT5)
{
if (alpha < min7)
min7 = alpha;
if (alpha > max7)
max7 = alpha;
if (alpha != 0 && alpha < min5)
min5 = alpha;
if (alpha != 255 && alpha > max5)
max5 = alpha;
}
alphas[4*by + bx] = (size_t)alpha;
point.x = (float)ScaleQuantumToChar(GetPixelRed(image,p)) / 255.0f;
point.y = (float)ScaleQuantumToChar(GetPixelGreen(image,p)) / 255.0f;
point.z = (float)ScaleQuantumToChar(GetPixelBlue(image,p)) / 255.0f;
point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f;
p+=GetPixelChannels(image);
match = MagickFalse;
for (i=0; i < (ssize_t) count; i++)
{
if ((points[i].x == point.x) &&
(points[i].y == point.y) &&
(points[i].z == point.z) &&
(alpha >= 128 || compression == FOURCC_DXT5))
{
points[i].w += point.w;
map[4*by + bx] = i;
match = MagickTrue;
break;
}
}
if (match != MagickFalse)
continue;
points[count].x = point.x;
points[count].y = point.y;
points[count].z = point.z;
points[count].w = point.w;
map[4*by + bx] = count;
count++;
}
}
for (i=0; i < (ssize_t) count; i++)
points[i].w = sqrt(points[i].w);
if (compression == FOURCC_DXT5)
WriteAlphas(image,alphas,min5,max5,min7,max7);
if (count == 1)
WriteSingleColorFit(image,points,map);
else
WriteCompressed(image,count,points,map,clusterFit);
}
}
}
static void WriteUncompressed(Image *image, ExceptionInfo *exception)
{
const Quantum
*p;
ssize_t
x;
ssize_t
y;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(image,p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(image,p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(image,p)));
if (image->alpha_trait != UndefinedPixelTrait)
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(image,p)));
p+=GetPixelChannels(image);
}
}
}
static void WriteImageData(Image *image, const size_t pixelFormat,
const size_t compression,const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha, ExceptionInfo *exception)
{
if (pixelFormat == DDPF_FOURCC)
WriteFourCC(image,compression,clusterFit,weightByAlpha,exception);
else
WriteUncompressed(image,exception);
}
static MagickBooleanType WriteMipmaps(Image *image,const ImageInfo *image_info,
const size_t pixelFormat,const size_t compression,const size_t mipmaps,
const MagickBooleanType fromlist,const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha,ExceptionInfo *exception)
{
const char
*option;
Image
*mipmap_image,
*resize_image;
MagickBooleanType
fast_mipmaps,
status;
ssize_t
i;
size_t
columns,
rows;
columns=DIV2(image->columns);
rows=DIV2(image->rows);
option=GetImageOption(image_info,"dds:fast-mipmaps");
fast_mipmaps=IsStringTrue(option);
mipmap_image=image;
resize_image=image;
status=MagickTrue;
for (i=0; i < (ssize_t) mipmaps; i++)
{
if (fromlist == MagickFalse)
{
mipmap_image=ResizeImage(resize_image,columns,rows,TriangleFilter,
exception);
if (mipmap_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
}
else
{
mipmap_image=mipmap_image->next;
if ((mipmap_image->columns != columns) || (mipmap_image->rows != rows))
ThrowBinaryException(CoderError,"ImageColumnOrRowSizeIsNotSupported",
image->filename);
}
DestroyBlob(mipmap_image);
mipmap_image->blob=ReferenceBlob(image->blob);
WriteImageData(mipmap_image,pixelFormat,compression,weightByAlpha,
clusterFit,exception);
if (fromlist == MagickFalse)
{
if (fast_mipmaps == MagickFalse)
mipmap_image=DestroyImage(mipmap_image);
else
{
if (resize_image != image)
resize_image=DestroyImage(resize_image);
resize_image=mipmap_image;
}
}
columns=DIV2(columns);
rows=DIV2(rows);
}
if (resize_image != image)
resize_image=DestroyImage(resize_image);
return(status);
}
static void WriteDDSInfo(Image *image, const size_t pixelFormat,
const size_t compression, const size_t mipmaps)
{
char
software[MagickPathExtent];
ssize_t
i;
unsigned int
format,
caps,
flags;
flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT |
DDSD_PIXELFORMAT);
caps=(unsigned int) DDSCAPS_TEXTURE;
format=(unsigned int) pixelFormat;
if (format == DDPF_FOURCC)
flags=flags | DDSD_LINEARSIZE;
else
flags=flags | DDSD_PITCH;
if (mipmaps > 0)
{
flags=flags | (unsigned int) DDSD_MIPMAPCOUNT;
caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX);
}
if (format != DDPF_FOURCC && image->alpha_trait != UndefinedPixelTrait)
format=format | DDPF_ALPHAPIXELS;
(void) WriteBlob(image,4,(unsigned char *) "DDS ");
(void) WriteBlobLSBLong(image,124);
(void) WriteBlobLSBLong(image,flags);
(void) WriteBlobLSBLong(image,(unsigned int) image->rows);
(void) WriteBlobLSBLong(image,(unsigned int) image->columns);
if (pixelFormat == DDPF_FOURCC)
{
/* Compressed DDS requires linear compressed size of first image */
if (compression == FOURCC_DXT1)
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8));
else /* DXT5 */
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16));
}
else
{
/* Uncompressed DDS requires byte pitch of first image */
if (image->alpha_trait != UndefinedPixelTrait)
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4));
else
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3));
}
(void) WriteBlobLSBLong(image,0x00);
(void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1);
(void) memset(software,0,sizeof(software));
(void) CopyMagickString(software,"IMAGEMAGICK",MagickPathExtent);
(void) WriteBlob(image,44,(unsigned char *) software);
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,format);
if (pixelFormat == DDPF_FOURCC)
{
(void) WriteBlobLSBLong(image,(unsigned int) compression);
for(i=0;i < 5;i++) /* bitcount / masks */
(void) WriteBlobLSBLong(image,0x00);
}
else
{
(void) WriteBlobLSBLong(image,0x00);
if (image->alpha_trait != UndefinedPixelTrait)
{
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0xff000000);
}
else
{
(void) WriteBlobLSBLong(image,24);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0x00);
}
}
(void) WriteBlobLSBLong(image,caps);
for(i=0;i < 4;i++) /* ddscaps2 + reserved region */
(void) WriteBlobLSBLong(image,0x00);
}
static MagickBooleanType WriteDDSImage(const ImageInfo *image_info,
Image *image, ExceptionInfo *exception)
{
const char
*option;
size_t
compression,
columns,
maxMipmaps,
mipmaps,
pixelFormat,
rows;
MagickBooleanType
clusterFit,
fromlist,
status,
weightByAlpha;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
(void) TransformImageColorspace(image,sRGBColorspace,exception);
pixelFormat=DDPF_FOURCC;
compression=FOURCC_DXT5;
if (image->alpha_trait == UndefinedPixelTrait)
compression=FOURCC_DXT1;
if (LocaleCompare(image_info->magick,"dxt1") == 0)
compression=FOURCC_DXT1;
if (image_info->compression == DXT1Compression)
compression=FOURCC_DXT1;
else if (image_info->compression == NoCompression)
pixelFormat=DDPF_RGB;
option=GetImageOption(image_info,"dds:compression");
if (option != (char *) NULL)
{
if (LocaleCompare(option,"dxt1") == 0)
compression=FOURCC_DXT1;
if (LocaleCompare(option,"none") == 0)
pixelFormat=DDPF_RGB;
}
clusterFit=MagickFalse;
weightByAlpha=MagickFalse;
if (pixelFormat == DDPF_FOURCC)
{
option=GetImageOption(image_info,"dds:cluster-fit");
if (IsStringTrue(option) != MagickFalse)
{
clusterFit=MagickTrue;
if (compression != FOURCC_DXT1)
{
option=GetImageOption(image_info,"dds:weight-by-alpha");
if (IsStringTrue(option) != MagickFalse)
weightByAlpha=MagickTrue;
}
}
}
mipmaps=0;
fromlist=MagickFalse;
option=GetImageOption(image_info,"dds:mipmaps");
if (option != (char *) NULL)
{
if (LocaleNCompare(option,"fromlist",8) == 0)
{
Image
*next;
fromlist=MagickTrue;
next=image->next;
while(next != (Image *) NULL)
{
mipmaps++;
next=next->next;
}
}
}
if ((mipmaps == 0) &&
((image->columns & (image->columns - 1)) == 0) &&
((image->rows & (image->rows - 1)) == 0))
{
maxMipmaps=SIZE_MAX;
if (option != (char *) NULL)
maxMipmaps=StringToUnsignedLong(option);
if (maxMipmaps != 0)
{
columns=image->columns;
rows=image->rows;
while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps)
{
columns=DIV2(columns);
rows=DIV2(rows);
mipmaps++;
}
}
}
option=GetImageOption(image_info,"dds:raw");
if (IsStringTrue(option) == MagickFalse)
WriteDDSInfo(image,pixelFormat,compression,mipmaps);
else
mipmaps=0;
WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha,
exception);
if ((mipmaps > 0) && (WriteMipmaps(image,image_info,pixelFormat,compression,
mipmaps,fromlist,clusterFit,weightByAlpha,exception) == MagickFalse))
return(MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
|
matflip.c | #include "matrix.h"
MATRIX mat_fliplr(MATRIX A, MATRIX result)
{
int i, j, m, n;
m = MatCol(A);
n = MatRow(A);
if(result==NULL) if((result = mat_creat(MatRow(A), MatCol(A), UNDEFINED))==NULL)
return mat_error(MAT_MALLOC);
#pragma omp parallel for private(j)
for(i=0; i<n; ++i)
{
for(j=0; j<m; ++j)
{
result[i][j] = A[i][m-j-1];
}
}
return result;
}
MATRIX mat_flipud(MATRIX A, MATRIX result)
{
int i, j, m, n;
m = MatCol(A);
n = MatRow(A);
if(result==NULL) if((result = mat_creat(MatRow(A), MatCol(A), UNDEFINED))==NULL)
return mat_error(MAT_MALLOC);
#pragma omp parallel for private(j)
for(i=0; i<n; ++i)
{
for(j=0; j<m; ++j)
{
result[i][j] = A[n-i-1][j];
}
}
return result;
}
|
entrega.c | #include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
void add (int A[], int B[], int C[], int N) {
int i, carry, sum;
carry = 0;
for (i=0; i<N; i++) {
sum = A[i] + B[i] + carry;
if (sum >= 10) {
carry = 1;
sum -= 10;
} else
carry = 0;
C[i] = sum;
}
if (carry) printf ("overflow in addition!\n");
}
/* B = n * A */
void multiply_one_digit (int A[], int B[], int n, int N) {
int i, carry;
carry = 0;
for (i=0; i<N; i++) {
B[i] = n * A[i];
B[i] += carry;
if (B[i] >= 10) {
carry = B[i] / 10;
B[i] %= 10;
} else
carry = 0;
}
if (carry) printf ("overflow in multiplication!\n");
}
/* "multiplies" a number by BASEn */
void shift_left (int A[], int n, int N) {
int i;
for (i=N-1; i>=n; i--) A[i] = A[i-n];
while (i >= 0) A[i--] = 0;
}
/* C = A * B */
void multiply (int A[], int B[], int C[], int N) {
int i, j, P[N];
for (i=0; i<N; i++) {
/* multiply B by digit A[i] */
multiply_one_digit (B, P, A[i], N);
/* shift the partial product left i bytes */
shift_left (P, i, N);
/* add result to the running sum */
add (C, P, C, N);
}
}
main(int argc, char**argv)
{
//printf("%s\n", argv[1]);
int len1 = strlen(argv[1]);
printf("%d\n",len1);
//printf("%s\n", argv[2]);
int len2 = strlen(argv[2]);
//printf("%d\n",len2);
int N = len1+len2;
int A[N], B[N], C[N];
for(int i=0;i < N; i++){
A[i] = 0;
B[i] = 0;
C[i] = 0;
}
char k[len1];
strcpy(k, argv[1]);
for(int i=0;i < len1; i++){
A[i] = k[len1-1-i] - '0';
}
char l[len2];
strcpy(l, argv[2]);
for(int i=0;i < len2; i++){
B[i] = l[len2-1-i] - '0';
}
// SECUENCIAL
multiply(A,B,C,N);
printf("---SECUENCIAL---\n");
printf("A [ ");
for(int loop = N-1; loop >= 0; loop--)
printf("%d ", A[loop]);
printf("]\n");
printf("B [ ");
for(int loop = N-1; loop >= 0; loop--)
printf("%d ", B[loop]);
printf("]\nC [ ");
for(int loop = N-1; loop >= 0; loop--)
printf("%d ", C[loop]);
printf("]\n");
// PARALELO
int E[N];
for(int i=0;i < N; i++)
E[i] = 0;
//multiply
printf("---PARALELO---\n");
printf("A [ ");
for(int loop = N-1; loop >= 0; loop--)
printf("%d ", A[loop]);
printf("]\n");
printf("B [ ");
for(int loop = N-1; loop >= 0; loop--)
printf("%d ", B[loop]);
//printf("]\nC [ ");
printf("]\n");
omp_set_dynamic(0);
omp_set_num_threads(4);
int D[4*N];
int n, i, carry,j,sum, P[N], tid, nthreads;
#pragma omp parallel private(i,n, carry, j, sum, P, tid)
{
nthreads = omp_get_num_threads();
for(i=0;i < N*nthreads; i++){
D[i] = 0;
E[i] = 0;
}
#pragma omp barrier
tid = omp_get_thread_num();
//printf("soy el thread %u de %u \n", tid, nthreads);
for (i=tid; i<N; i=i+nthreads) {
n = A[i];
//printf("\nthread %d i %d n %d\n",tid,i,n);
if(tid==0){
printf("\nthread %d i %d n %d\n",tid,i,n);
printf("Pbefore [ ");
for(int loop = N-1; loop >= 0; loop--)
printf("%d ", P[loop]);
printf("]\n");
}
carry = 0;
for (j=0; j<N; j++) {
P[j] = n * B[j];
if (tid==0)
printf("\nPJ %d n %d BJ %d\n",P[j],n,B[j]);
P[j] += carry;
if (P[j] >= 10) {
carry = P[j] / 10;
P[j] %= 10;
} else
carry = 0;
}
if (carry) printf ("overflow in multiplication!\n");
/* shift the partial product left i bytes */
if(tid==0){
printf("\nthread %d i %d n %d\n",tid,i,n);
printf("PSH0 [ ");
for(int loop = N-1; loop >= 0; loop--)
printf("%d ", P[loop]);
printf("]\n");
}
for (j=N-1; j>=i; j--) P[j] = P[j-i];
while (j >= 0) P[j--] = 0;
/* add result to the running sum */
if(tid==0){
printf("\nthread %d i %d n %d\n",tid,i,n);
printf("P0 [ ");
for(int loop = N-1; loop >= 0; loop--)
printf("%d ", P[loop]);
printf("]\n");
}
//int sum;
carry = 0;
sum = 0;
for (j=0; j<N; j++) {
sum = D[tid*N+j] + P[j] + carry;
if (sum >= 10) {
carry = 1;
sum -= 10;
} else
carry = 0;
D[tid*N+j] = sum;
}
if (carry) printf ("overflow in addition!\n");
}
#pragma omp barrier
if(tid==0){
printf("D [ ");
for(int loop = N*nthreads-1; loop >= 0; loop--)
printf("%d ", D[loop]);
printf("]\n");
for(int k=0; k<nthreads;k++){
carry = 0;
sum = 0;
for (j=0; j<N; j++) {
sum = E[j] + D[k*N+j] + carry;
if (sum >= 10) {
carry = 1;
sum -= 10;
} else
carry = 0;
E[j] = sum;
}
if (carry) printf ("overflow in addition!\n");
}
printf("E [ ");
for(int loop = N-1; loop >= 0; loop--)
printf("%d ", E[loop]);
printf("]\n");
}
}
/*for(int loop = N-1; loop >= 0; loop--)
printf("%d ", C[loop]);
printf("]\n");*/
} |
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 32;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4));
ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-19,32));t3<=min(min(min(floord(4*Nt+Ny-9,32),floord(8*t1+Ny+7,32)),floord(16*t2+Ny+3,32)),floord(16*t1-16*t2+Nz+Ny+5,32));t3++) {
for (t4=max(max(max(0,ceild(t1-63,64)),ceild(16*t2-Nz-499,512)),ceild(32*t3-Ny-499,512));t4<=min(min(min(min(floord(4*Nt+Nx-9,512),floord(8*t1+Nx+7,512)),floord(16*t2+Nx+3,512)),floord(32*t3+Nx+19,512)),floord(16*t1-16*t2+Nz+Nx+5,512));t4++) {
for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(32*t3-Ny+5,4)),ceild(512*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),Nt-1),2*t1+3),4*t2+2),8*t3+6),128*t4+126);t5++) {
for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(32*t3,4*t5+4);t7<=min(32*t3+31,4*t5+Ny-5);t7++) {
lbv=max(512*t4,4*t5+4);
ubv=min(512*t4+511,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
jacobi-task.c | # include "poisson.h"
/* #pragma omp task/taskwait version of SWEEP. */
void sweep_task (int nx, int ny, double dx, double dy, double *f_,
int itold, int itnew, double *u_, double *unew_, int block_size)
{
int i;
int it;
int j;
double (*f)[nx][ny] = (double (*)[nx][ny])f_;
double (*u)[nx][ny] = (double (*)[nx][ny])u_;
double (*unew)[nx][ny] = (double (*)[nx][ny])unew_;
#pragma omp parallel shared (f, u, unew) private (i, it, j) firstprivate(nx, ny, dx, dy, itold, itnew)
#pragma omp single
{
for (it = itold + 1; it <= itnew; it++) {
// Save the current estimate.
for (i = 0; i < nx; i++) {
#pragma omp task firstprivate(i, ny) private(j) shared(u, unew)
for (j = 0; j < ny; j++) {
(*u)[i][j] = (*unew)[i][j];
}
}
#pragma omp taskwait
// Compute a new estimate.
for (i = 0; i < nx; i++) {
#pragma omp task firstprivate(i, dx, dy, nx, ny) private(j) shared(u, unew, f)
for (j = 0; j < ny; j++) {
if (i == 0 || j == 0 || i == nx - 1 || j == ny - 1) {
(*unew)[i][j] = (*f)[i][j];
} else {
(*unew)[i][j] = 0.25 * ((*u)[i-1][j] + (*u)[i][j+1]
+ (*u)[i][j-1] + (*u)[i+1][j]
+ (*f)[i][j] * dx * dy);
}
}
}
#pragma omp taskwait
}
}
}
|
mandelbrot.c | /*
To compile:
gcc -O3 -o mandelbrot mandelbrot.c png_util.c -I. -lpng -lm -fopenmp
Or just type:
module load gcc
make
To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads):
./mandelbrot 4096 4096 1
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "png_util.h"
// Q2a: add include for OpenMP header file here:
#include <omp.h>
#define MXITER 1000
typedef struct {
double r;
double i;
}complex_t;
// return iterations before z leaves mandelbrot set for given c
int testpoint(complex_t c){
int iter;
complex_t z;
double temp;
z = c;
for(iter=0; iter<MXITER; iter++){
temp = (z.r*z.r) - (z.i*z.i) + c.r;
z.i = z.r*z.i*2. + c.i;
z.r = temp;
if((z.r*z.r+z.i*z.i)>4.0){
return iter;
}
}
return iter;
}
// perform Mandelbrot iteration on a grid of numbers in the complex plane
// record the iteration counts in the count array
void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t cmax, float *count){
int n,m;
complex_t c;
double dr = (cmax.r-cmin.r)/(Nre-1);
double di = (cmax.i-cmin.i)/(Nim-1);;
// Q2c: add a compiler directive to split the outer for loop amongst threads here
#pragma omp parallel for private(m)
for(n=0;n<Nim;++n){
for(m=0;m<Nre;++m){
c.r = cmin.r + dr*m;
c.i = cmin.i + di*n;
count[m+n*Nre] = testpoint(c);
}
}
}
int main(int argc, char **argv){
// to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ]
// usage: ./mandelbrot 4096 4096 1
int Nre = atoi(argv[1]);
int Nim = atoi(argv[2]);
int Nthreads = atoi(argv[3]);
// Q2b: set the number of OpenMP threads to be Nthreads here:
int arg1 = atoi(argv[0]);
omp_set_num_threads(atoi(argv[argc-1]));
// storage for the iteration counts
float *count = (float*) malloc(Nre*Nim*sizeof(float));
// Parameters for a bounding box for "c" that generates an interesting image
const float centRe = -.759856, centIm= .125547;
const float diam = 0.151579;
complex_t cmin;
complex_t cmax;
cmin.r = centRe - 0.5*diam;
cmax.r = centRe + 0.5*diam;
cmin.i = centIm - 0.5*diam;
cmax.i = centIm + 0.5*diam;
// Q2d: complete this to read time before calling mandelbrot with OpenMP API wall clock time
double start;
start = omp_get_wtime();
// compute mandelbrot set
mandelbrot(Nre, Nim, cmin, cmax, count);
// Q2d: complete this to read time after calling mandelbrot using OpenMP wall clock time
double end;
end = omp_get_wtime();
// print elapsed time
printf("elapsed = %g\n", end-start);
// output mandelbrot to png format image
FILE *fp = fopen("mandelbrot.png", "w");
write_hot_png(fp, Nre, Nim, count, 0, 80);
exit(0);
return 0;
}
|
pi_par.c | /*
This program will numerically compute the integral of
4/(1+x*x)
from 0 to 1. The value of this integral is pi -- which
is great since it gives us an easy way to check the answer.
The program was parallelized using OpenMP by adding just
four lines
(1) A line to include omp.h -- the include file that
contains OpenMP's function prototypes and constants.
(2) A pragma that tells OpenMP to create a team of threads
(3) A pragma to cause one of the threads to print the
number of threads being used by the program.
(4) A pragma to split up loop iterations among the team
of threads. This pragma includes 2 clauses to (1) create a
private variable and (2) to cause the threads to compute their
sums locally and then combine their local sums into a
single global value.
History: Written by Tim Mattson, 11/99.
#---------------------------------------------------------------
Modified by JGG to use threads equal to the number of processors.
SoC 2015.
#-----------------------------------------------------------------
*/
#include <stdio.h>
#include <omp.h>
static long num_steps = 100000000;
double step;
int main ()
{
int i, nprocs;
double x, pi, sum = 0.0;
double start_time, run_time;
step = 1.0/(double) num_steps;
/* Use double of system processors (threads) */
nprocs=2*omp_get_num_procs();
/*Computes pi for each number of threads*/
for (i=1;i<=nprocs;i++){
sum = 0.0;
omp_set_num_threads(i);
start_time = omp_get_wtime();
#pragma omp parallel
{
#pragma omp single
printf(" num_threads = %d",omp_get_num_threads());
#pragma omp for reduction(+:sum) private(x)
for (i=1;i<= num_steps; i++){
x = (i-0.5)*step;
sum = sum + 4.0/(1.0+x*x);
}
}
pi = step * sum;
run_time = omp_get_wtime() - start_time;
printf("\n pi is %f in %f seconds and %d threads\n",pi,run_time,i);
}
}
|
declare_reduction_codegen.c | // RUN: %clang_cc1 -verify -fopenmp -x c -emit-llvm %s -triple %itanium_abi_triple -o - -femit-all-decls -disable-llvm-passes | FileCheck %s
// RUN: %clang_cc1 -fopenmp -x c -triple %itanium_abi_triple -emit-pch -o %t %s -femit-all-decls -disable-llvm-passes
// RUN: %clang_cc1 -fopenmp -x c -triple %itanium_abi_triple -include-pch %t -verify %s -emit-llvm -o - -femit-all-decls -disable-llvm-passes | FileCheck --check-prefix=CHECK-LOAD %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
// CHECK: [[SSS_INT:.+]] = type { i32 }
// CHECK-LOAD: [[SSS_INT:.+]] = type { i32 }
#pragma omp declare reduction(+ : int, char : omp_out *= omp_in)
// CHECK: define internal {{.*}}void @{{[^(]+}}(i32* noalias, i32* noalias)
// CHECK: [[MUL:%.+]] = mul nsw i32
// CHECK-NEXT: store i32 [[MUL]], i32*
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(i32* noalias, i32* noalias)
// CHECK-LOAD: [[MUL:%.+]] = mul nsw i32
// CHECK-LOAD-NEXT: store i32 [[MUL]], i32*
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
// CHECK: define internal {{.*}}void @{{[^(]+}}(i8* noalias, i8* noalias)
// CHECK: sext i8
// CHECK: sext i8
// CHECK: [[MUL:%.+]] = mul nsw i32
// CHECK-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8
// CHECK-NEXT: store i8 [[TRUNC]], i8*
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(i8* noalias, i8* noalias)
// CHECK-LOAD: sext i8
// CHECK-LOAD: sext i8
// CHECK-LOAD: [[MUL:%.+]] = mul nsw i32
// CHECK-LOAD-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8
// CHECK-LOAD-NEXT: store i8 [[TRUNC]], i8*
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
#pragma omp declare reduction(fun : float : omp_out += omp_in) initializer(omp_priv = 15 + omp_orig)
// CHECK: define internal {{.*}}void @{{[^(]+}}(float* noalias, float* noalias)
// CHECK: [[ADD:%.+]] = fadd float
// CHECK-NEXT: store float [[ADD]], float*
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: define internal {{.*}}void @{{[^(]+}}(float* noalias, float* noalias)
// CHECK: [[ADD:%.+]] = fadd float 1.5
// CHECK-NEXT: store float [[ADD]], float*
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(float* noalias, float* noalias)
// CHECK-LOAD: [[ADD:%.+]] = fadd float
// CHECK-LOAD-NEXT: store float [[ADD]], float*
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(float* noalias, float* noalias)
// CHECK-LOAD: [[ADD:%.+]] = fadd float 1.5
// CHECK-LOAD-NEXT: store float [[ADD]], float*
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
struct SSS {
int field;
#pragma omp declare reduction(+ : int, char : omp_out *= omp_in)
// CHECK: define internal {{.*}}void @{{[^(]+}}(i32* noalias, i32* noalias)
// CHECK: [[MUL:%.+]] = mul nsw i32
// CHECK-NEXT: store i32 [[MUL]], i32*
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: define internal {{.*}}void @{{[^(]+}}(i8* noalias, i8* noalias)
// CHECK: sext i8
// CHECK: sext i8
// CHECK: [[MUL:%.+]] = mul nsw i32
// CHECK-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8
// CHECK-NEXT: store i8 [[TRUNC]], i8*
// CHECK-NEXT: ret void
// CHECK-NEXT: }
};
void init(struct SSS *priv, struct SSS orig);
#pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias)
// CHECK: call void @llvm.memcpy
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias)
// CHECK: call void @init(
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias)
// CHECK-LOAD: call void @llvm.memcpy
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias)
// CHECK-LOAD: call void @init(
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
// CHECK-LABEL: @main
// CHECK-LOAD-LABEL: @main
int main() {
#pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias)
// CHECK: call void @llvm.memcpy
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias)
// CHECK: call void @init(
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias)
// CHECK-LOAD: call void @llvm.memcpy
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias)
// CHECK-LOAD: call void @init(
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
{
#pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig))
// CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias)
// CHECK: call void @llvm.memcpy
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias)
// CHECK: call void @init(
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias)
// CHECK-LOAD: call void @llvm.memcpy
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias)
// CHECK-LOAD: call void @init(
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
}
return 0;
}
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(i32* noalias, i32* noalias)
// CHECK-LOAD: [[MUL:%.+]] = mul nsw i32
// CHECK-LOAD-NEXT: store i32 [[MUL]], i32*
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
// CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(i8* noalias, i8* noalias)
// CHECK-LOAD: sext i8
// CHECK-LOAD: sext i8
// CHECK-LOAD: [[MUL:%.+]] = mul nsw i32
// CHECK-LOAD-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8
// CHECK-LOAD-NEXT: store i8 [[TRUNC]], i8*
// CHECK-LOAD-NEXT: ret void
// CHECK-LOAD-NEXT: }
#endif
|
thr_omp.h | /* -*- c++ -*- -------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
http://lammps.sandia.gov, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------
Contributing author: Axel Kohlmeyer (Temple U)
------------------------------------------------------------------------- */
#ifndef LMP_THR_OMP_H
#define LMP_THR_OMP_H
#include "pointers.h"
#include "error.h"
#include "fix_omp.h"
#include "thr_data.h"
namespace LAMMPS_NS {
// forward declarations
class Pair;
class Bond;
class Angle;
class Dihedral;
class Improper;
class KSpace;
class Fix;
class ThrOMP {
protected:
LAMMPS *lmp; // reference to base lammps object.
FixOMP *fix; // pointer to fix_omp;
const int thr_style;
int thr_error;
public:
ThrOMP(LAMMPS *, int);
virtual ~ThrOMP();
double memory_usage_thr();
inline void sync_threads() {
#if defined(_OPENMP)
#pragma omp barrier
#endif
{ ; }
};
enum {THR_NONE=0,THR_PAIR=1,THR_BOND=1<<1,THR_ANGLE=1<<2,
THR_DIHEDRAL=1<<3,THR_IMPROPER=1<<4,THR_KSPACE=1<<5,
THR_CHARMM=1<<6, /*THR_PROXY=1<<7,THR_HYBRID=1<<8, */
THR_FIX=1<<9,THR_INTGR=1<<10};
protected:
// extra ev_tally setup work for threaded styles
void ev_setup_thr(int, int, int, double *, double **, ThrData *);
// compute global per thread virial contribution from per-thread force
void virial_fdotr_compute_thr(double * const, const double * const * const,
const double * const * const,
const int, const int, const int);
// reduce per thread data as needed
void reduce_thr(void * const style, const int eflag, const int vflag,
ThrData * const thr);
// thread safe variant error abort support.
// signals an error condition in any thread by making
// thr_error > 0, if condition "cond" is true.
// will abort from thread 0 if thr_error is > 0
// otherwise return true.
// returns false if no error found on any thread.
// use return value to jump/return to end of threaded region.
bool check_error_thr(const bool cond, const int tid, const char *fname,
const int line, const char *errmsg) {
if (cond) {
#if defined(_OPENMP)
#pragma omp atomic
++thr_error;
#endif
if (tid > 0) return true;
else lmp->error->one(fname,line,errmsg);
} else {
if (thr_error > 0) {
if (tid == 0) lmp->error->one(fname,line,errmsg);
else return true;
} else return false;
}
return false;
};
protected:
// threading adapted versions of the ev_tally infrastructure
// style specific versions (need access to style class flags)
// Pair
void e_tally_thr(Pair * const, const int, const int, const int,
const int, const double, const double, ThrData * const);
void v_tally_thr(Pair * const, const int, const int, const int,
const int, const double * const, ThrData * const);
void ev_tally_thr(Pair * const, const int, const int, const int, const int,
const double, const double, const double, const double,
const double, const double, ThrData * const);
void ev_tally_xyz_thr(Pair * const, const int, const int, const int,
const int, const double, const double, const double,
const double, const double, const double,
const double, const double, ThrData * const);
void ev_tally_xyz_full_thr(Pair * const, const int, const double, const double,
const double, const double, const double,
const double, const double, const double, ThrData * const);
void ev_tally3_thr(Pair * const, const int, const int, const int, const double,
const double, const double * const, const double * const,
const double * const, const double * const, ThrData * const);
void ev_tally4_thr(Pair * const, const int, const int, const int, const int,
const double, const double * const, const double * const,
const double * const, const double * const, const double * const,
const double * const, ThrData * const);
// Bond
void ev_tally_thr(Bond * const, const int, const int, const int, const int,
const double, const double, const double, const double,
const double, ThrData * const);
// Angle
void ev_tally_thr(Angle * const, const int, const int, const int, const int, const int,
const double, const double * const, const double * const,
const double, const double, const double, const double, const double,
const double, ThrData * const thr);
void ev_tally13_thr(Angle * const, const int, const int, const int, const int,
const double, const double, const double, const double,
const double, ThrData * const thr);
// Dihedral
void ev_tally_thr(Dihedral * const, const int, const int, const int, const int, const int,
const int, const double, const double * const, const double * const,
const double * const, const double, const double, const double,
const double, const double, const double, const double, const double,
const double, ThrData * const);
// Improper
void ev_tally_thr(Improper * const, const int, const int, const int, const int, const int,
const int, const double, const double * const, const double * const,
const double * const, const double, const double, const double,
const double, const double, const double, const double, const double,
const double, ThrData * const);
// style independent versions
void v_tally2_thr(const int, const int, const double, const double * const, ThrData * const);
void v_tally3_thr(const int, const int, const int, const double * const, const double * const,
const double * const, const double * const, ThrData * const);
void v_tally4_thr(const int, const int, const int, const int, const double * const,
const double * const, const double * const, const double * const,
const double * const, const double * const, ThrData * const);
void ev_tally_list_thr(Pair * const, const int, const int * const,
const double * const, const double, const double,
ThrData * const);
};
// set loop range thread id, and force array offset for threaded runs.
static inline void loop_setup_thr(int &ifrom, int &ito, int &tid,
int inum, int nthreads)
{
#if defined(_OPENMP)
tid = omp_get_thread_num();
// each thread works on a fixed chunk of atoms.
const int idelta = 1 + inum/nthreads;
ifrom = tid*idelta;
ito = ((ifrom + idelta) > inum) ? inum : ifrom + idelta;
#else
tid = 0;
ifrom = 0;
ito = inum;
#endif
}
// helpful definitions to help compilers optimizing code better
typedef struct { double x,y,z; } dbl3_t;
typedef struct { double x,y,z,w; } dbl4_t;
typedef struct { int a,b,t; } int3_t;
typedef struct { int a,b,c,t; } int4_t;
typedef struct { int a,b,c,d,t; } int5_t;
}
#endif
|
symmetry.c | /* Copyright (C) 2008 Atsushi Togo */
/* All rights reserved. */
/* This file is part of spglib. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cell.h"
#include "delaunay.h"
#include "mathfunc.h"
#include "symmetry.h"
#include "debug.h"
#define NUM_ATOMS_CRITERION_FOR_OPENMP 1000
#define ANGLE_REDUCE_RATE 0.95
#define NUM_ATTEMPT 100
#define PI 3.14159265358979323846
/* Tolerance of angle between lattice vectors in degrees */
/* Negative value invokes converter from symprec. */
static double angle_tolerance = -1.0;
static int relative_axes[][3] = {
{ 1, 0, 0},
{ 0, 1, 0},
{ 0, 0, 1},
{-1, 0, 0},
{ 0,-1, 0}, /* 5 */
{ 0, 0,-1},
{ 0, 1, 1},
{ 1, 0, 1},
{ 1, 1, 0},
{ 0,-1,-1}, /* 10 */
{-1, 0,-1},
{-1,-1, 0},
{ 0, 1,-1},
{-1, 0, 1},
{ 1,-1, 0}, /* 15 */
{ 0,-1, 1},
{ 1, 0,-1},
{-1, 1, 0},
{ 1, 1, 1},
{-1,-1,-1}, /* 20 */
{-1, 1, 1},
{ 1,-1, 1},
{ 1, 1,-1},
{ 1,-1,-1},
{-1, 1,-1}, /* 25 */
{-1,-1, 1},
};
static int identity[3][3] = {{1, 0, 0},
{0, 1, 0},
{0, 0, 1}};
static int get_index_with_least_atoms(const Cell *cell);
static VecDBL * get_translation(SPGCONST int rot[3][3],
SPGCONST Cell *cell,
const double symprec,
const int is_identity);
static Symmetry * get_operations(SPGCONST Cell *primitive,
const double symprec,
const double angle_symprec);
static Symmetry * reduce_operation(SPGCONST Cell * primitive,
SPGCONST Symmetry * symmetry,
const double symprec,
const double angle_symprec);
static int search_translation_part(int lat_point_atoms[],
SPGCONST Cell * cell,
SPGCONST int rot[3][3],
const int min_atom_index,
const double origin[3],
const double symprec,
const int is_identity);
static int is_overlap_all_atoms(const double test_trans[3],
SPGCONST int rot[3][3],
SPGCONST Cell * cell,
const double symprec,
const int is_identity);
static PointSymmetry
transform_pointsymmetry(SPGCONST PointSymmetry * point_sym_prim,
SPGCONST double new_lattice[3][3],
SPGCONST double original_lattice[3][3]);
static Symmetry *
get_space_group_operations(SPGCONST PointSymmetry *lattice_sym,
SPGCONST Cell *primitive,
const double symprec);
static void set_axes(int axes[3][3],
const int a1, const int a2, const int a3);
static PointSymmetry get_lattice_symmetry(SPGCONST double cell_lattice[3][3],
const double symprec,
const double angle_symprec);
static int is_identity_metric(SPGCONST double metric_rotated[3][3],
SPGCONST double metric_orig[3][3],
const double symprec,
const double angle_symprec);
static double get_angle(SPGCONST double metric[3][3],
const int i,
const int j);
/* Return NULL if failed */
Symmetry * sym_alloc_symmetry(const int size)
{
Symmetry *symmetry;
symmetry = NULL;
if (size < 1) {
return NULL;
}
if ((symmetry = (Symmetry*) malloc(sizeof(Symmetry))) == NULL) {
warning_print("spglib: Memory could not be allocated ");
return NULL;
}
symmetry->size = size;
symmetry->rot = NULL;
symmetry->trans = NULL;
if ((symmetry->rot =
(int (*)[3][3]) malloc(sizeof(int[3][3]) * size)) == NULL) {
warning_print("spglib: Memory could not be allocated ");
warning_print("(line %d, %s).\n", __LINE__, __FILE__);
free(symmetry);
symmetry = NULL;
return NULL;
}
if ((symmetry->trans =
(double (*)[3]) malloc(sizeof(double[3]) * size)) == NULL) {
warning_print("spglib: Memory could not be allocated ");
warning_print("(line %d, %s).\n", __LINE__, __FILE__);
free(symmetry->rot);
symmetry->rot = NULL;
free(symmetry);
symmetry = NULL;
return NULL;
}
return symmetry;
}
void sym_free_symmetry(Symmetry *symmetry)
{
if (symmetry->size > 0) {
free(symmetry->rot);
symmetry->rot = NULL;
free(symmetry->trans);
symmetry->trans = NULL;
}
free(symmetry);
}
/* Return NULL if failed */
Symmetry * sym_get_operation(SPGCONST Cell * primitive,
const double symprec)
{
debug_print("sym_get_operations:\n");
return get_operations(primitive, symprec, angle_tolerance);
}
/* Return NULL if failed */
Symmetry * sym_reduce_operation(SPGCONST Cell * primitive,
SPGCONST Symmetry * symmetry,
const double symprec)
{
return reduce_operation(primitive, symmetry, symprec, angle_tolerance);
}
/* Return NULL if failed */
VecDBL * sym_get_pure_translation(SPGCONST Cell *cell,
const double symprec)
{
int multi;
VecDBL * pure_trans;
debug_print("sym_get_pure_translation (tolerance = %f):\n", symprec);
multi = 0;
pure_trans = NULL;
if ((pure_trans = get_translation(identity, cell, symprec, 1)) == NULL) {
warning_print("spglib: get_translation failed (line %d, %s).\n",
__LINE__, __FILE__);
return NULL;
}
multi = pure_trans->size;
if ((cell->size / multi) * multi == cell->size) {
debug_print(" sym_get_pure_translation: pure_trans->size = %d\n", multi);
} else {
;
warning_print("spglib: Finding pure translation failed (line %d, %s).\n", __LINE__, __FILE__);
warning_print(" cell->size %d, multi %d\n", cell->size, multi);
}
return pure_trans;
}
/* Return NULL if failed */
VecDBL * sym_reduce_pure_translation(SPGCONST Cell * cell,
const VecDBL * pure_trans,
const double symprec)
{
int i, multi;
Symmetry *symmetry, *symmetry_reduced;
VecDBL * pure_trans_reduced;
symmetry = NULL;
symmetry_reduced = NULL;
pure_trans_reduced = NULL;
multi = pure_trans->size;
if ((symmetry = sym_alloc_symmetry(multi)) == NULL) {
return NULL;
}
for (i = 0; i < multi; i++) {
mat_copy_matrix_i3(symmetry->rot[i], identity);
mat_copy_vector_d3(symmetry->trans[i], pure_trans->vec[i]);
}
if ((symmetry_reduced =
reduce_operation(cell, symmetry, symprec, angle_tolerance)) == NULL) {
sym_free_symmetry(symmetry);
symmetry = NULL;
return NULL;
}
sym_free_symmetry(symmetry);
symmetry = NULL;
multi = symmetry_reduced->size;
if ((pure_trans_reduced = mat_alloc_VecDBL(multi)) == NULL) {
sym_free_symmetry(symmetry_reduced);
symmetry_reduced = NULL;
return NULL;
}
for (i = 0; i < multi; i++) {
mat_copy_vector_d3(pure_trans_reduced->vec[i], symmetry_reduced->trans[i]);
}
sym_free_symmetry(symmetry_reduced);
symmetry_reduced = NULL;
return pure_trans_reduced;
}
void sym_set_angle_tolerance(double tolerance)
{
angle_tolerance = tolerance;
}
double sym_get_angle_tolerance(void)
{
return angle_tolerance;
}
/* 1) Pointgroup operations of the primitive cell are obtained. */
/* These are constrained by the input cell lattice pointgroup, */
/* i.e., even if the lattice of the primitive cell has higher */
/* symmetry than that of the input cell, it is not considered. */
/* 2) Spacegroup operations are searched for the primitive cell */
/* using the constrained point group operations. */
/* 3) The spacegroup operations for the primitive cell are */
/* transformed to those of original input cells, if the input cell */
/* was not a primitive cell. */
static Symmetry * get_operations(SPGCONST Cell *primitive,
const double symprec,
const double angle_symprec)
{
PointSymmetry lattice_sym;
Symmetry *symmetry;
debug_print("get_operations:\n");
symmetry = NULL;
lattice_sym = get_lattice_symmetry(primitive->lattice,
symprec,
angle_symprec);
if (lattice_sym.size == 0) {
return NULL;
}
if ((symmetry = get_space_group_operations(&lattice_sym,
primitive,
symprec)) == NULL) {
return NULL;
}
return symmetry;
}
/* Return NULL if failed */
static Symmetry * reduce_operation(SPGCONST Cell * primitive,
SPGCONST Symmetry * symmetry,
const double symprec,
const double angle_symprec)
{
int i, j, num_sym;
Symmetry * sym_reduced;
PointSymmetry point_symmetry;
MatINT *rot;
VecDBL *trans;
debug_print("reduce_operation:\n");
sym_reduced = NULL;
rot = NULL;
trans = NULL;
point_symmetry = get_lattice_symmetry(primitive->lattice,
symprec,
angle_symprec);
if (point_symmetry.size == 0) {
return NULL;
}
if ((rot = mat_alloc_MatINT(symmetry->size)) == NULL) {
return NULL;
}
if ((trans = mat_alloc_VecDBL(symmetry->size)) == NULL) {
mat_free_MatINT(rot);
rot = NULL;
return NULL;
}
num_sym = 0;
for (i = 0; i < point_symmetry.size; i++) {
for (j = 0; j < symmetry->size; j++) {
if (mat_check_identity_matrix_i3(point_symmetry.rot[i],
symmetry->rot[j])) {
if (is_overlap_all_atoms(symmetry->trans[j],
symmetry->rot[j],
primitive,
symprec,
0)) {
mat_copy_matrix_i3(rot->mat[num_sym], symmetry->rot[j]);
mat_copy_vector_d3(trans->vec[num_sym], symmetry->trans[j]);
num_sym++;
}
}
}
}
if ((sym_reduced = sym_alloc_symmetry(num_sym)) != NULL) {
for (i = 0; i < num_sym; i++) {
mat_copy_matrix_i3(sym_reduced->rot[i], rot->mat[i]);
mat_copy_vector_d3(sym_reduced->trans[i], trans->vec[i]);
}
}
mat_free_MatINT(rot);
rot = NULL;
mat_free_VecDBL(trans);
trans = NULL;
return sym_reduced;
}
/* Look for the translations which satisfy the input symmetry operation. */
/* This function is heaviest in this code. */
/* Return NULL if failed */
static VecDBL * get_translation(SPGCONST int rot[3][3],
SPGCONST Cell *cell,
const double symprec,
const int is_identity)
{
int i, j, k, min_atom_index, num_trans;
int *is_found;
double origin[3];
VecDBL *trans;
debug_print("get_translation (tolerance = %f):\n", symprec);
num_trans = 0;
is_found = NULL;
trans = NULL;
#ifdef _OPENMP
int num_min_type_atoms;
int *min_type_atoms;
double vec[3];
min_type_atoms = NULL;
#endif
if ((is_found = (int*) malloc(sizeof(int)*cell->size)) == NULL) {
warning_print("spglib: Memory could not be allocated ");
return NULL;
}
for (i = 0; i < cell->size; i++) {
is_found[i] = 0;
}
/* Look for the atom index with least number of atoms within same type */
min_atom_index = get_index_with_least_atoms(cell);
if (min_atom_index == -1) {
debug_print("spglib: get_index_with_least_atoms failed.\n");
goto ret;
}
/* Set min_atom_index as the origin to measure the distance between atoms. */
mat_multiply_matrix_vector_id3(origin, rot, cell->position[min_atom_index]);
#ifdef _OPENMP
if (cell->size < NUM_ATOMS_CRITERION_FOR_OPENMP) {
num_trans = search_translation_part(is_found,
cell,
rot,
min_atom_index,
origin,
symprec,
is_identity);
if (num_trans == 0) {
goto ret;
}
} else {
/* Collect indices of atoms with the type where the minimum number */
/* of atoms belong. */
if ((min_type_atoms = (int*) malloc(sizeof(int)*cell->size)) == NULL) {
warning_print("spglib: Memory could not be allocated ");
goto ret;
}
num_min_type_atoms = 0;
for (i = 0; i < cell->size; i++) {
if (cell->types[i] == cell->types[min_atom_index]) {
min_type_atoms[num_min_type_atoms] = i;
num_min_type_atoms++;
}
}
#pragma omp parallel for private(j, vec)
for (i = 0; i < num_min_type_atoms; i++) {
for (j = 0; j < 3; j++) {
vec[j] = cell->position[min_type_atoms[i]][j] - origin[j];
}
if (is_overlap_all_atoms(vec,
rot,
cell,
symprec,
is_identity)) {
is_found[min_type_atoms[i]] = 1;
}
}
free(min_type_atoms);
min_type_atoms = NULL;
for (i = 0; i < cell->size; i++) {
num_trans += is_found[i];
}
}
#else
num_trans = search_translation_part(is_found,
cell,
rot,
min_atom_index,
origin,
symprec,
is_identity);
if (num_trans == 0) {
goto ret;
}
#endif
if ((trans = mat_alloc_VecDBL(num_trans)) == NULL) {
goto ret;
}
k = 0;
for (i = 0; i < cell->size; i++) {
if (is_found[i]) {
for (j = 0; j < 3; j++) {
trans->vec[k][j] = cell->position[i][j] - origin[j];
trans->vec[k][j] -= mat_Nint(trans->vec[k][j]);
}
k++;
}
}
ret:
free(is_found);
is_found = NULL;
return trans;
}
static int search_translation_part(int lat_point_atoms[],
SPGCONST Cell * cell,
SPGCONST int rot[3][3],
const int min_atom_index,
const double origin[3],
const double symprec,
const int is_identity)
{
int i, j, num_trans;
double vec[3];
num_trans = 0;
for (i = 0; i < cell->size; i++) {
if (cell->types[i] != cell->types[min_atom_index]) {
continue;
}
for (j = 0; j < 3; j++) {
vec[j] = cell->position[i][j] - origin[j];
}
if (is_overlap_all_atoms(vec,
rot,
cell,
symprec,
is_identity)) {
lat_point_atoms[i] = 1;
num_trans++;
}
}
return num_trans;
}
static int is_overlap_all_atoms(const double trans[3],
SPGCONST int rot[3][3],
SPGCONST Cell * cell,
const double symprec,
const int is_identity)
{
int i, j, k, is_found;
double symprec2;
double pos_rot[3], d_frac[3], d[3];
symprec2 = symprec * symprec;
for (i = 0; i < cell->size; i++) {
if (is_identity) { /* Identity matrix is treated as special for speed-up. */
for (j = 0; j < 3; j++) {
pos_rot[j] = cell->position[i][j] + trans[j];
}
} else {
mat_multiply_matrix_vector_id3(pos_rot,
rot,
cell->position[i]);
for (j = 0; j < 3; j++) {
pos_rot[j] += trans[j];
}
}
is_found = 0;
for (j = 0; j < cell->size; j++) {
if (cell->types[i] == cell->types[j]) {
/* here cel_is_overlap can be used, but for the tuning */
/* purpose, write it again */
for (k = 0; k < 3; k++) {
d_frac[k] = pos_rot[k] - cell->position[j][k];
d_frac[k] -= mat_Nint(d_frac[k]);
}
mat_multiply_matrix_vector_d3(d, cell->lattice, d_frac);
if (d[0] * d[0] + d[1] * d[1] + d[2] * d[2] < symprec2) {
is_found = 1;
break;
}
}
}
if (! is_found) {
goto not_found;
}
}
return 1; /* found */
not_found:
return 0;
}
static int get_index_with_least_atoms(const Cell *cell)
{
int i, j, min, min_index;
int *mapping;
mapping = NULL;
if ((mapping = (int *) malloc(sizeof(int) * cell->size)) == NULL) {
warning_print("spglib: Memory could not be allocated ");
return -1;
}
for (i = 0; i < cell->size; i++) {
mapping[i] = 0;
}
for (i = 0; i < cell->size; i++) {
for (j = 0; j < cell->size; j++) {
if (cell->types[i] == cell->types[j]) {
mapping[j]++;
break;
}
}
}
min = mapping[0];
min_index = 0;
for (i = 0; i < cell->size; i++) {
if (min > mapping[i] && mapping[i] >0) {
min = mapping[i];
min_index = i;
}
}
free(mapping);
mapping = NULL;
return min_index;
}
/* Return NULL if failed */
static Symmetry *
get_space_group_operations(SPGCONST PointSymmetry *lattice_sym,
SPGCONST Cell *primitive,
const double symprec)
{
int i, j, num_sym, total_num_sym;
VecDBL **trans;
Symmetry *symmetry;
debug_print("get_space_group_operations (tolerance = %f):\n", symprec);
trans = NULL;
symmetry = NULL;
if ((trans = (VecDBL**) malloc(sizeof(VecDBL*) * lattice_sym->size))
== NULL) {
warning_print("spglib: Memory could not be allocated ");
return NULL;
}
for (i = 0; i < lattice_sym->size; i++) {
trans[i] = NULL;
}
total_num_sym = 0;
for (i = 0; i < lattice_sym->size; i++) {
if ((trans[i] = get_translation(lattice_sym->rot[i], primitive, symprec, 0))
!= NULL) {
debug_print(" match translation %d/%d; tolerance = %f\n",
i + 1, lattice_sym->size, symprec);
total_num_sym += trans[i]->size;
}
}
if ((symmetry = sym_alloc_symmetry(total_num_sym)) == NULL) {
goto ret;
}
num_sym = 0;
for (i = 0; i < lattice_sym->size; i++) {
if (trans[i] == NULL) {
continue;
}
for (j = 0; j < trans[i]->size; j++) {
mat_copy_vector_d3(symmetry->trans[num_sym + j], trans[i]->vec[j]);
mat_copy_matrix_i3(symmetry->rot[num_sym + j], lattice_sym->rot[i]);
}
num_sym += trans[i]->size;
}
ret:
for (i = 0; i < lattice_sym->size; i++) {
if (trans[i] != NULL) {
mat_free_VecDBL(trans[i]);
trans[i] = NULL;
}
}
free(trans);
trans = NULL;
return symmetry;
}
static PointSymmetry get_lattice_symmetry(SPGCONST double cell_lattice[3][3],
const double symprec,
const double angle_symprec)
{
int i, j, k, attempt, num_sym;
double angle_tol;
int axes[3][3];
double lattice[3][3], min_lattice[3][3];
double metric[3][3], metric_orig[3][3];
PointSymmetry lattice_sym;
debug_print("get_lattice_symmetry:\n");
lattice_sym.size = 0;
if (! del_delaunay_reduce(min_lattice, cell_lattice, symprec)) {
goto err;
}
mat_get_metric(metric_orig, min_lattice);
angle_tol = angle_symprec;
for (attempt = 0; attempt < NUM_ATTEMPT; attempt++) {
num_sym = 0;
for (i = 0; i < 26; i++) {
for (j = 0; j < 26; j++) {
for (k = 0; k < 26; k++) {
set_axes(axes, i, j, k);
if (! ((mat_get_determinant_i3(axes) == 1) ||
(mat_get_determinant_i3(axes) == -1))) {
continue;
}
mat_multiply_matrix_di3(lattice, min_lattice, axes);
mat_get_metric(metric, lattice);
if (is_identity_metric(metric, metric_orig, symprec, angle_tol)) {
if (num_sym > 47) {
angle_tol *= ANGLE_REDUCE_RATE;
warning_print("spglib: Too many lattice symmetries was found.\n");
warning_print(" Reduce angle tolerance to %f", angle_tol);
warning_print(" (line %d, %s).\n", __LINE__, __FILE__);
goto next_attempt;
}
mat_copy_matrix_i3(lattice_sym.rot[num_sym], axes);
num_sym++;
}
}
}
}
if (num_sym < 49 || angle_tol < 0) {
lattice_sym.size = num_sym;
return transform_pointsymmetry(&lattice_sym, cell_lattice, min_lattice);
}
next_attempt:
;
}
err:
debug_print("get_lattice_symmetry failed.\n");
return lattice_sym;
}
static int is_identity_metric(SPGCONST double metric_rotated[3][3],
SPGCONST double metric_orig[3][3],
const double symprec,
const double angle_symprec)
{
int i, j, k;
int elem_sets[3][2] = {{0, 1},
{0, 2},
{1, 2}};
double cos1, cos2, x, length_ave2, sin_dtheta2;
double length_orig[3], length_rot[3];
for (i = 0; i < 3; i++) {
length_orig[i] = sqrt(metric_orig[i][i]);
length_rot[i] = sqrt(metric_rotated[i][i]);
if (mat_Dabs(length_orig[i] - length_rot[i]) > symprec) {
goto fail;
}
}
for (i = 0; i < 3; i++) {
j = elem_sets[i][0];
k = elem_sets[i][1];
if (angle_symprec > 0) {
if (mat_Dabs(get_angle(metric_orig, j, k) -
get_angle(metric_rotated, j, k)) > angle_symprec) {
goto fail;
}
} else {
/* dtheta = arccos(cos(theta1) - arccos(cos(theta2))) */
/* = arccos(c1) - arccos(c2) */
/* = arccos(c1c2 + sqrt((1-c1^2)(1-c2^2))) */
/* sin(dtheta) = sin(arccos(x)) = sqrt(1 - x^2) */
cos1 = metric_orig[j][k] / length_orig[j] / length_orig[k];
cos2 = metric_rotated[j][k] / length_rot[j] / length_rot[k];
x = cos1 * cos2 + sqrt(1 - cos1 * cos1) * sqrt(1 - cos2 * cos2);
sin_dtheta2 = 1 - x * x;
length_ave2 = ((length_orig[j] + length_rot[j]) *
(length_orig[k] + length_rot[k])) / 4;
if (sin_dtheta2 > 1e-12) {
if (sin_dtheta2 * length_ave2 > symprec * symprec) {
goto fail;
}
}
}
}
return 1;
fail:
return 0;
}
static double get_angle(SPGCONST double metric[3][3],
const int i,
const int j)
{
double length_i, length_j;
length_i = sqrt(metric[i][i]);
length_j = sqrt(metric[j][j]);
return acos(metric[i][j] / length_i / length_j) / PI * 180;
}
static PointSymmetry
transform_pointsymmetry(SPGCONST PointSymmetry * lat_sym_orig,
SPGCONST double new_lattice[3][3],
SPGCONST double original_lattice[3][3])
{
int i, size;
double trans_mat[3][3], inv_mat[3][3], drot[3][3];
PointSymmetry lat_sym_new;
lat_sym_new.size = 0;
mat_inverse_matrix_d3(inv_mat, original_lattice, 0);
mat_multiply_matrix_d3(trans_mat, inv_mat, new_lattice);
size = 0;
for (i = 0; i < lat_sym_orig->size; i++) {
mat_cast_matrix_3i_to_3d(drot, lat_sym_orig->rot[i]);
mat_get_similar_matrix_d3(drot, drot, trans_mat, 0);
/* new_lattice may have lower point symmetry than original_lattice.*/
/* The operations that have non-integer elements are not counted. */
if (mat_is_int_matrix(drot, mat_Dabs(mat_get_determinant_d3(trans_mat)) / 10)) {
mat_cast_matrix_3d_to_3i(lat_sym_new.rot[size], drot);
if (abs(mat_get_determinant_i3(lat_sym_new.rot[size])) != 1) {
warning_print("spglib: A point symmetry operation is not unimodular.");
warning_print("(line %d, %s).\n", __LINE__, __FILE__);
goto err;
}
size++;
}
}
#ifdef SPGWARNING
if (! (lat_sym_orig->size == size)) {
warning_print("spglib: Some of point symmetry operations were dropped.");
warning_print("(line %d, %s).\n", __LINE__, __FILE__);
}
#endif
lat_sym_new.size = size;
return lat_sym_new;
err:
return lat_sym_new;
}
static void set_axes(int axes[3][3],
const int a1, const int a2, const int a3)
{
int i;
for (i = 0; i < 3; i++) {axes[i][0] = relative_axes[a1][i]; }
for (i = 0; i < 3; i++) {axes[i][1] = relative_axes[a2][i]; }
for (i = 0; i < 3; i++) {axes[i][2] = relative_axes[a3][i]; }
}
|
GB_unaryop__ainv_fp32_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_fp32_int16
// op(A') function: GB_tran__ainv_fp32_int16
// C type: float
// A type: int16_t
// cast: float cij = (float) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FP32 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_fp32_int16
(
float *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_fp32_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
radix_sort.h |
#pragma once
#include <omp.h>
#include <cstdint>
#include <utility>
namespace torch_ipex {
namespace cpu {
template <typename T>
using Key_Value_Weight_Tuple = std::tuple<T, T, float>;
// histogram size per thread
const int HIST_SIZE = 256;
template <typename T>
Key_Value_Weight_Tuple<T>* radix_sort_parallel(
Key_Value_Weight_Tuple<T>* inp_buf,
Key_Value_Weight_Tuple<T>* tmp_buf,
int64_t elements_count,
int64_t max_value) {
RECORD_FUNCTION(__FUNCTION__, std::vector<c10::IValue>({}));
int maxthreads = omp_get_max_threads();
alignas(64) int histogram[HIST_SIZE * maxthreads],
histogram_ps[HIST_SIZE * maxthreads + 1];
if (max_value == 0)
return inp_buf;
int num_bits = sizeof(T) * 8 - __builtin_clz(max_value);
int num_passes = (num_bits + 7) / 8;
#pragma omp parallel
{
int tid = omp_get_thread_num();
int nthreads = omp_get_num_threads();
int* local_histogram = &histogram[HIST_SIZE * tid];
int* local_histogram_ps = &histogram_ps[HIST_SIZE * tid];
int elements_count_4 = elements_count / 4 * 4;
Key_Value_Weight_Tuple<T>* input = inp_buf;
Key_Value_Weight_Tuple<T>* output = tmp_buf;
for (unsigned int pass = 0; pass < num_passes; pass++) {
/* Step 1: compute histogram
Reset histogram */
for (int i = 0; i < HIST_SIZE; i++)
local_histogram[i] = 0;
#pragma omp for schedule(static)
for (int64_t i = 0; i < elements_count_4; i += 4) {
T val_1 = std::get<0>(input[i]);
T val_2 = std::get<0>(input[i + 1]);
T val_3 = std::get<0>(input[i + 2]);
T val_4 = std::get<0>(input[i + 3]);
local_histogram[(val_1 >> (pass * 8)) & 0xFF]++;
local_histogram[(val_2 >> (pass * 8)) & 0xFF]++;
local_histogram[(val_3 >> (pass * 8)) & 0xFF]++;
local_histogram[(val_4 >> (pass * 8)) & 0xFF]++;
}
if (tid == (nthreads - 1)) {
for (int64_t i = elements_count_4; i < elements_count; i++) {
T val = std::get<0>(input[i]);
local_histogram[(val >> (pass * 8)) & 0xFF]++;
}
}
#pragma omp barrier
/* Step 2: prefix sum */
if (tid == 0) {
int sum = 0, prev_sum = 0;
for (int bins = 0; bins < HIST_SIZE; bins++)
for (int t = 0; t < nthreads; t++) {
sum += histogram[t * HIST_SIZE + bins];
histogram_ps[t * HIST_SIZE + bins] = prev_sum;
prev_sum = sum;
}
histogram_ps[HIST_SIZE * nthreads] = prev_sum;
if (prev_sum != elements_count) {
/* printf("Error1!\n"); exit(123); */
}
}
#pragma omp barrier
/* Step 3: scatter */
#pragma omp for schedule(static)
for (int64_t i = 0; i < elements_count_4; i += 4) {
T val_1 = std::get<0>(input[i]);
T val_2 = std::get<0>(input[i + 1]);
T val_3 = std::get<0>(input[i + 2]);
T val_4 = std::get<0>(input[i + 3]);
T bin_1 = (val_1 >> (pass * 8)) & 0xFF;
T bin_2 = (val_2 >> (pass * 8)) & 0xFF;
T bin_3 = (val_3 >> (pass * 8)) & 0xFF;
T bin_4 = (val_4 >> (pass * 8)) & 0xFF;
int pos;
pos = local_histogram_ps[bin_1]++;
output[pos] = input[i];
pos = local_histogram_ps[bin_2]++;
output[pos] = input[i + 1];
pos = local_histogram_ps[bin_3]++;
output[pos] = input[i + 2];
pos = local_histogram_ps[bin_4]++;
output[pos] = input[i + 3];
}
if (tid == (nthreads - 1)) {
for (int64_t i = elements_count_4; i < elements_count; i++) {
T val = std::get<0>(input[i]);
int pos = local_histogram_ps[(val >> (pass * 8)) & 0xFF]++;
output[pos] = input[i];
}
}
Key_Value_Weight_Tuple<T>* temp = input;
input = output;
output = temp;
#pragma omp barrier
}
}
return (num_passes % 2 == 0 ? inp_buf : tmp_buf);
}
} // namespace cpu
} // namespace torch_ipex
|
mlp_mpi_ovlp_example_f32.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Evangelos Georganas (Intel Corp.)
******************************************************************************/
#include <libxsmm_dnn.h>
#include <dnn_common.h>
#include <mpi.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#if defined(_OPENMP)
# include <omp.h>
#endif
#define DETAILED_PROFILE
#define N_PROF_THREADS 128
LIBXSMM_INLINE void my_init_buf_mlp(float* buf, size_t size, int initPos, int initOne)
{
int i;
zero_buf(buf, size);
for (i = 0; i < (int)size; ++i) {
buf[i] = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64()/10.0)));
}
}
int main(int argc, char* argv[])
{
/* Initialize the MPI environment */
int provided;
MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided);
if(provided < MPI_THREAD_MULTIPLE) {
printf("The threading support level is lesser than that demanded.\n");
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
}
float **act_libxsmm, **ref_act_libxsmm, **fil_libxsmm, **delact_libxsmm, **ref_delact_libxsmm, **delfil_libxsmm;
float **bias_libxsmm, **delbias_libxsmm;
unsigned char **relumask_libxsmm;
int *label_libxsmm;
void* scratch = NULL;
size_t scratch_size = 0;
libxsmm_matdiff_info norms;
libxsmm_matdiff_clear(&norms);
/* some parameters we can overwrite via cli,
default is some inner layer of overfeat */
int n_procs = 4, n_comm_threads = 2, n_comp_threads;;
int iters = 10; /* repetitions of benchmark */
int MB = 32; /* mini-batch size, "N" */
int global_MB = 32;
int fuse_type = 0; /* 0: nothing fused, 1: relu fused, 2: elementwise fused, 3: relu and elementwise fused */
char type = 'A'; /* 'A': ALL, 'F': FP, 'B': BP */
int bn = 64;
int bk = 64;
int bc = 64;
int *C; /* number of input feature maps, "C" */
int num_layers = 0;
int idx;
const char *const env_check = getenv("CHECK");
const double check = LIBXSMM_ABS(0 == env_check ? 1 : atof(env_check));
#if defined(_OPENMP)
int nThreads = omp_get_max_threads(); /* number of threads */
#else
int nThreads = 1; /* number of threads */
#endif
unsigned long long l_start, l_end;
double l_total = 0.0;
double l_fwd_fc[N_PROF_THREADS];
double l_bwdupd_fc[N_PROF_THREADS];
double l_allreduce[N_PROF_THREADS];
double l_optimizer[N_PROF_THREADS];
double l_fwd_loss[N_PROF_THREADS];
double l_bwd_loss[N_PROF_THREADS];
double first_bwdupd_compute = 0.0;
double gflop = 0.0;
int i, j, rank;
double fil_size = 0.0;
double act_size = 0.0;
float lr = 0.2f;
float loss_weight = 0.1f;
libxsmm_datatype in_dt, out_dt, comp_dt;
libxsmm_dnn_fc_eltw_fuse my_fuse;
libxsmm_dnn_fc_fwd_config* libxsmm_dnn_fc_fwd;
libxsmm_dnn_fc_bwd_config* libxsmm_dnn_fc_bwd;
libxsmm_dnn_opt_config* libxsmm_dnn_opt;
libxsmm_dnn_smax_fwd_config libxsmm_dnn_smax_fwd;
libxsmm_dnn_smax_bwd_config libxsmm_dnn_smax_bwd;
for (i = 0; i < N_PROF_THREADS; i++) {
l_fwd_fc[i] = 0.0;
l_bwdupd_fc[i] = 0.0;
l_allreduce[i] = 0.0;
l_optimizer[i] = 0.0;
l_fwd_loss[i] = 0.0;
l_bwd_loss[i] = 0.0;
}
if (argc > 1 && !strncmp(argv[1], "-h", 3)) {
printf("Usage: %s iters MB fuse_type type bn bk bc C1 C2 ... CN\n", argv[0]);
return 0;
}
libxsmm_rng_set_seed(1);
/* reading new values from cli */
i = 1;
num_layers = argc - 10;
if (argc > i) n_comm_threads = atoi(argv[i++]);
if (argc > i) iters = atoi(argv[i++]);
if (argc > i) global_MB = atoi(argv[i++]);
if (argc > i) fuse_type = atoi(argv[i++]);
if (argc > i) type = *(argv[i++]);
if (argc > i) bn = atoi(argv[i++]);
if (argc > i) bk = atoi(argv[i++]);
if (argc > i) bc = atoi(argv[i++]);
n_comp_threads = nThreads - n_comm_threads;
MPI_Request request[n_comm_threads][2];
/* Get the rank of the process */
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &n_procs);
MB = global_MB / n_procs;
/* Setup communicators for overlapping threads */
MPI_Comm comms[n_comm_threads];
for (idx = 0; idx < n_comm_threads; idx++) {
MPI_Comm_dup(MPI_COMM_WORLD, &comms[idx]);
}
/* allocate the number of channles buffer */
if ( num_layers < 1 ) {
printf("Usage: %s iters MB fuse_type type bn bk bc C1 C2 ... CN\n", argv[0]);
return 0;
}
C = (int*)malloc((num_layers+2)*sizeof(int));
for (j = 0 ; i < argc; ++i, ++j ) {
C[j] = atoi(argv[i]);
}
/* handle softmax config */
C[num_layers+1] = C[num_layers];
if (type != 'A' && type != 'F' && type != 'B') {
printf("type needs to be 'A' (All), 'F' (FP only), 'B' (BP only)\n");
return -1;
}
if ( (fuse_type < 0) || (fuse_type > 5) ) {
printf("fuse type needs to be 0 (None), 1 (Bias), 2 (ReLU), 3 (Sigmoid), 4 (Bias+ReLU), 5 (Bias+Sigmoid)\n");
return -1;
}
#if defined(__SSE3__)
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
#endif
in_dt = LIBXSMM_DATATYPE_F32;
out_dt = LIBXSMM_DATATYPE_F32;
comp_dt = LIBXSMM_DATATYPE_F32;
/* print some summary */
if (rank == 0 ) {
printf("##########################################\n");
printf("# Setting Up (Common) #\n");
printf("##########################################\n");
printf("PARAMS: N:%d\n", global_MB);
printf("PARAMS: Layers: %d\n", num_layers);
printf("PARAMS: ITERS:%d", iters); if (LIBXSMM_FEQ(0, check)) printf(" Threads:%d\n", nThreads); else printf("\n");
for (i = 0; i < num_layers; ++i ) {
if (i == 0) {
act_size += (double)(global_MB*C[i]*sizeof(float))/(1024.0*1024.0);
printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i, global_MB, C[i], (double)(global_MB*C[i]*sizeof(float))/(1024.0*1024.0) );
}
act_size += (double)(global_MB*C[i+1]*sizeof(float))/(1024.0*1024.0);
fil_size += (double)(C[i]*C[i+1]*sizeof(float))/(1024.0*1024.0);
printf("SIZE Filter %i (%dx%d): %10.2f MiB\n", i, C[i], C[i+1], (double)(C[i]*C[i+1]*sizeof(float))/(1024.0*1024.0) );
printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i+1, global_MB, C[i+1], (double)(global_MB*C[i+1]*sizeof(float))/(1024.0*1024.0) );
}
act_size += (double)(global_MB*C[num_layers+1]*sizeof(float))/(1024.0*1024.0);
printf("SIZE Activations softmax (%dx%d): %10.2f MiB\n", global_MB, C[num_layers+1], (double)(global_MB*C[num_layers+1]*sizeof(float))/(1024.0*1024.0) );
printf("\nTOTAL SIZE Activations: %10.2f MiB\n", act_size );
printf("TOTAL SIZE Filter: %10.2f MiB\n", fil_size );
printf("TOTAL SIZE delActivations: %10.2f MiB\n", act_size );
printf("TOTAL SIZE delFilter: %10.2f MiB\n", fil_size );
printf("TOTAL SIZE MLP: %10.2f MiB\n", (2.0*fil_size) + (2.0*act_size) );
}
/* allocate data */
/* +2 because of the softwax layer */
act_libxsmm = (float**)malloc( (num_layers+2)*sizeof(float*) );
ref_act_libxsmm = (float**)malloc( (num_layers+2)*sizeof(float*) );
delact_libxsmm = (float**)malloc( (num_layers+1)*sizeof(float*) );
ref_delact_libxsmm = (float**)malloc( (num_layers+1)*sizeof(float*) );
for ( i = 0 ; i < num_layers+2; ++i ) {
act_libxsmm[i] = (float*)libxsmm_aligned_malloc( MB*C[i]*sizeof(float), 2097152);
/* softmax has no incoming gradients */
if ( i < num_layers+1 ) {
delact_libxsmm[i] = (float*)libxsmm_aligned_malloc( MB*C[i]*sizeof(float), 2097152);
}
}
fil_libxsmm = (float**)malloc( num_layers*sizeof(float*) );
delfil_libxsmm = (float**)malloc( num_layers*sizeof(float*) );
for ( i = 0 ; i < num_layers; ++i ) {
fil_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(float), 2097152);
delfil_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(float), 2097152);
}
bias_libxsmm = (float**)malloc( num_layers*sizeof(float*) );
delbias_libxsmm = (float**)malloc( num_layers*sizeof(float*) );
for ( i = 0 ; i < num_layers; ++i ) {
bias_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i+1]*sizeof(float), 2097152);
delbias_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i+1]*sizeof(float), 2097152);
}
relumask_libxsmm = (unsigned char**)malloc( num_layers*sizeof(unsigned char*) );
for ( i = 0 ; i < num_layers; ++i ) {
relumask_libxsmm[i] = (unsigned char*)libxsmm_aligned_malloc( MB*C[i+1]*sizeof(unsigned char), 2097152);
}
label_libxsmm = (int*)libxsmm_aligned_malloc( MB*sizeof(int), 2097152);
/* init data on every node for numa awarness */
for ( i = 0 ; i < num_layers+2; ++i ) {
my_init_buf_mlp( act_libxsmm[i], MB*C[i], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
my_init_buf_mlp( fil_libxsmm[i], C[i]*C[i+1], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
my_init_buf_mlp( bias_libxsmm[i], C[i+1], 0, 0 );
}
for ( i = 0 ; i < num_layers+1; ++i ) {
my_init_buf_mlp( delact_libxsmm[i], MB*C[i], 0, 0 );
}
/* Serial initialization of data on proc 0 */
if (rank == 0) {
for ( i = 0 ; i < num_layers+2; ++i ) {
ref_act_libxsmm[i] = (float*)libxsmm_aligned_malloc( global_MB*C[i]*sizeof(float), 2097152);
/* softmax has no incoming gradients */
if ( i < num_layers+1 ) {
ref_delact_libxsmm[i] = (float*)libxsmm_aligned_malloc( global_MB*C[i]*sizeof(float), 2097152);
}
}
/* init data */
for ( i = 0 ; i < num_layers+2; ++i ) {
my_init_buf_mlp( ref_act_libxsmm[i], global_MB*C[i], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
my_init_buf_mlp( fil_libxsmm[i], C[i]*C[i+1], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
my_init_buf_mlp( bias_libxsmm[i], C[i+1], 0, 0 );
}
for ( i = 0 ; i < num_layers+1; ++i ) {
my_init_buf_mlp( ref_delact_libxsmm[i], global_MB*C[i], 0, 0 );
}
}
/* Scatter the activations to all processes */
for ( i = 0 ; i < num_layers+2; ++i ) {
MPI_Scatter(ref_act_libxsmm[i], MB * C[i], MPI_FLOAT, act_libxsmm[i], MB * C[i], MPI_FLOAT, 0, MPI_COMM_WORLD);
}
/* Scatter the del_activations to all processes */
for ( i = 0 ; i < num_layers+1; ++i ) {
MPI_Scatter(ref_delact_libxsmm[i], MB * C[i], MPI_FLOAT, delact_libxsmm[i], MB * C[i], MPI_FLOAT, 0, MPI_COMM_WORLD);
}
/* Now broadcast weights tensors */
for ( i = 0 ; i < num_layers; ++i ) {
MPI_Bcast(fil_libxsmm[i], C[i]*C[i+1], MPI_FLOAT, 0, MPI_COMM_WORLD);
}
/* Now broadcast bias tensors */
for ( i = 0 ; i < num_layers; ++i ) {
MPI_Bcast(bias_libxsmm[i], C[i], MPI_FLOAT, 0, MPI_COMM_WORLD);
}
if (rank == 0) {
printf("\n");
printf("##########################################\n");
printf("# Setting Up (custom-Storage) #\n");
printf("##########################################\n");
}
if ( fuse_type == 0 ) {
my_fuse = LIBXSMM_DNN_FC_ELTW_FUSE_NONE;
} else if ( fuse_type == 1 ) {
my_fuse = LIBXSMM_DNN_FC_ELTW_FUSE_BIAS;
} else if ( fuse_type == 2 ) {
my_fuse = LIBXSMM_DNN_FC_ELTW_FUSE_RELU_WITH_MASK;
} else if ( fuse_type == 3 ) {
my_fuse = LIBXSMM_DNN_FC_ELTW_FUSE_BIAS_RELU_WITH_MASK;
} else {
my_fuse = LIBXSMM_DNN_FC_ELTW_FUSE_NONE;
}
/* allocating handles */
libxsmm_dnn_fc_fwd = (libxsmm_dnn_fc_fwd_config*) malloc( num_layers*sizeof(libxsmm_dnn_fc_fwd_config) );
libxsmm_dnn_fc_bwd = (libxsmm_dnn_fc_bwd_config*) malloc( num_layers*sizeof(libxsmm_dnn_fc_bwd_config) );
libxsmm_dnn_opt = (libxsmm_dnn_opt_config*) malloc( num_layers*sizeof(libxsmm_dnn_opt_config) );
/* setting up handles + scratch */
for ( i = 0; i < num_layers; ++i ) {
libxsmm_dnn_fc_fwd[i] = setup_libxsmm_dnn_fc_fwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB,
(C[i ] % bc == 0) ? bc : C[i ],
(C[i+1] % bk == 0) ? bk : C[i+1],
nThreads, my_fuse, in_dt, out_dt, comp_dt );
libxsmm_dnn_fc_bwd[i] = setup_libxsmm_dnn_fc_bwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB,
(C[i ] % bc == 0) ? bc : C[i ],
(C[i+1] % bk == 0) ? bk : C[i+1],
n_comp_threads, my_fuse, in_dt, out_dt, comp_dt );
libxsmm_dnn_opt[i] = setup_libxsmm_dnn_opt( C[i], C[i+1], (C[i ] % bc == 0) ? bc : C[i ],
(C[i+1] % bk == 0) ? bk : C[i+1],
n_comm_threads, lr, in_dt, out_dt, comp_dt );
/* let's allocate and bind scratch */
if ( libxsmm_dnn_fc_fwd[i].scratch_size > 0 || libxsmm_dnn_fc_bwd[i].scratch_size > 0 || libxsmm_dnn_opt[i].scratch_size > 0 ) {
size_t alloc_size = LIBXSMM_MAX( LIBXSMM_MAX( libxsmm_dnn_fc_fwd[i].scratch_size, libxsmm_dnn_fc_bwd[i].scratch_size), libxsmm_dnn_opt[i].scratch_size );
if ( alloc_size > scratch_size ) {
if ( scratch != NULL ) libxsmm_free( scratch );
scratch_size = alloc_size;
scratch = libxsmm_aligned_malloc( scratch_size, 2097152 );
my_init_buf_mlp( (float*)(scratch), (scratch_size)/4, 0, 0 );
}
}
}
/* softmax+loss is treated as N+! layer */
libxsmm_dnn_smax_fwd = setup_libxsmm_dnn_smax_fwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB,
(C[num_layers+1] % bk == 0) ? bk : C[num_layers+1],
nThreads, in_dt, out_dt, comp_dt );
libxsmm_dnn_smax_bwd = setup_libxsmm_dnn_smax_bwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB,
(C[num_layers+1] % bk == 0) ? bk : C[num_layers+1],
nThreads, loss_weight, in_dt, out_dt, comp_dt );
if ( libxsmm_dnn_smax_fwd.scratch_size > 0 || libxsmm_dnn_smax_bwd.scratch_size > 0 ) {
size_t alloc_size = LIBXSMM_MAX( libxsmm_dnn_smax_fwd.scratch_size, libxsmm_dnn_smax_bwd.scratch_size );
if ( alloc_size > scratch_size ) {
if ( scratch != NULL ) libxsmm_free( scratch );
scratch_size = alloc_size;
scratch = libxsmm_aligned_malloc( scratch_size, 2097152 );
my_init_buf_mlp( (float*)(scratch), (scratch_size)/4, 0, 0 );
}
}
if (type == 'F') {
if (rank == 0) {
printf("##########################################\n");
printf("# Performance - FWD (custom-Storage) #\n");
printf("##########################################\n");
}
MPI_Barrier(MPI_COMM_WORLD);
l_start = libxsmm_timer_tick();
#if defined(_OPENMP)
# pragma omp parallel private(i,j)
#endif
{
#if defined(_OPENMP)
const int tid = omp_get_thread_num();
#else
const int tid = 0;
#endif
for (j = 0; j < iters; ++j) {
for ( i = 0; i < num_layers; ++i) {
libxsmm_dnn_fc_fwd_exec_f32( libxsmm_dnn_fc_fwd[i], fil_libxsmm[i], act_libxsmm[i], act_libxsmm[i+1],
bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch );
}
libxsmm_dnn_smax_fwd_exec_f32( libxsmm_dnn_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm, &loss_weight,
0, tid, scratch );
}
}
MPI_Barrier(MPI_COMM_WORLD);
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
gflop = 0.0;
for ( i = 0; i < num_layers; ++i) {
gflop += (2.0*(double)global_MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0);
}
if (rank == 0) {
printf("GFLOP = %.5g\n", gflop/(double)iters);
printf("fp time = %.5g\n", ((double)(l_total/iters)));
printf("GFLOPS = %.5g\n", gflop/l_total);
printf("PERFDUMP,FP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB );
for ( i = 0; i < num_layers; ++i ) {
printf("%i,", C[i] );
}
printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total);
}
}
if (type == 'B') {
if (rank == 0) {
printf("##########################################\n");
printf("# Performance - BWD (custom-Storage) #\n");
printf("##########################################\n");
}
MPI_Barrier(MPI_COMM_WORLD);
l_start = libxsmm_timer_tick();
#if defined(_OPENMP)
# pragma omp parallel private(i,j)
#endif
{
#if defined(_OPENMP)
const int tid = omp_get_thread_num();
#else
const int tid = 0;
#endif
int tid_comm = tid - n_comp_threads;
for (j = 0; j < iters; ++j) {
libxsmm_dnn_smax_bwd_exec_f32( libxsmm_dnn_smax_bwd, delact_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm,
0, tid, scratch );
for ( i = num_layers-1; i > 0; --i) {
if (tid < n_comp_threads) {
libxsmm_dnn_fc_bwd_exec_f32( libxsmm_dnn_fc_bwd[i], fil_libxsmm[i], delact_libxsmm[i], delact_libxsmm[i+1], delfil_libxsmm[i],
act_libxsmm[i], delbias_libxsmm[i], relumask_libxsmm[i], LIBXSMM_DNN_FC_PASS_BWD, 0, tid, scratch );
}
#pragma omp barrier
if (tid >= n_comp_threads) {
int n_elts = (C[i]*C[i+1])/n_comm_threads;
MPI_Allreduce(MPI_IN_PLACE, (float*)delfil_libxsmm[i]+tid_comm*n_elts, n_elts, MPI_FLOAT, MPI_SUM, comms[tid_comm]);
libxsmm_dnn_opt_exec_f32( libxsmm_dnn_opt[i], fil_libxsmm[i], delfil_libxsmm[i], 0, tid_comm, scratch );
}
}
/* Only UPD pass for first layer */
if (tid < n_comp_threads) {
libxsmm_dnn_fc_bwd_exec_f32( libxsmm_dnn_fc_bwd[0], fil_libxsmm[0], delact_libxsmm[0], delact_libxsmm[0+1], delfil_libxsmm[0],
act_libxsmm[0], delbias_libxsmm[0], relumask_libxsmm[0], LIBXSMM_DNN_FC_PASS_BWD_W, 0, tid, scratch );
}
#pragma omp barrier
if (tid >= n_comp_threads) {
int n_elts = (C[0]*C[1])/n_comm_threads;
MPI_Allreduce(MPI_IN_PLACE, (float*)delfil_libxsmm[0]+tid_comm*n_elts, n_elts, MPI_FLOAT, MPI_SUM, comms[tid_comm]);
libxsmm_dnn_opt_exec_f32( libxsmm_dnn_opt[0], fil_libxsmm[0], delfil_libxsmm[0], 0, tid_comm, scratch );
}
#pragma omp barrier
}
}
MPI_Barrier(MPI_COMM_WORLD);
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
gflop = 0.0;
for ( i = num_layers-1; i > 0; --i) {
gflop += (4.0*(double)global_MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0);
}
gflop += (2.0*(double)global_MB*(double)C[0]*(double)C[1]*(double)iters) / (1000.0*1000.0*1000.0);
if (rank == 0) {
printf("GFLOP = %.5g\n", gflop/(double)iters);
printf("fp time = %.5g\n", ((double)(l_total/iters)));
printf("GFLOPS = %.5g\n", gflop/l_total);
printf("PERFDUMP,BP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB );
for ( i = 0; i < num_layers; ++i ) {
printf("%i,", C[i] );
}
printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total);
}
MPI_Barrier(MPI_COMM_WORLD);
#if 1
if (rank == n_procs - 1) {
for ( i = 0 ; i < num_layers; ++i ) {
libxsmm_matdiff(&norms, LIBXSMM_DATATYPE_F32, C[i]*C[i+1], 1, delfil_libxsmm[i], delfil_libxsmm[i], 0, 0);
printf("L1 of layer's %d dweights after training : %.25g\n", i, norms.l1_ref);
libxsmm_matdiff_clear(&norms);
}
}
#endif
}
if (type == 'A') {
if (rank == 0) {
printf("##########################################\n");
printf("# Performance - FWD-BWD (custom-Storage) #\n");
printf("##########################################\n");
}
MPI_Barrier(MPI_COMM_WORLD);
l_start = libxsmm_timer_tick();
#if defined(_OPENMP)
# pragma omp parallel private(i,j)
#endif
{
#if defined(_OPENMP)
const int tid = omp_get_thread_num();
#else
const int tid = 0;
#endif
int tid_comm = tid - n_comp_threads;
unsigned long long t0, t1;
for (j = 0; j < iters; ++j) {
#ifdef DETAILED_PROFILE
if (tid == 0) {
t0 = libxsmm_timer_tick();
}
#endif
for ( i = 0; i < num_layers; ++i) {
libxsmm_dnn_fc_fwd_exec_f32( libxsmm_dnn_fc_fwd[i], fil_libxsmm[i], act_libxsmm[i], act_libxsmm[i+1],
bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch );
}
#ifdef DETAILED_PROFILE
if (tid == 0) {
t1 = libxsmm_timer_tick();
l_fwd_fc[0] += libxsmm_timer_duration(t0, t1);
t0 = libxsmm_timer_tick();
}
#endif
libxsmm_dnn_smax_fwd_exec_f32( libxsmm_dnn_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm, &loss_weight,
0, tid, scratch );
#ifdef DETAILED_PROFILE
if (tid == 0) {
t1 = libxsmm_timer_tick();
l_fwd_loss[0] += libxsmm_timer_duration(t0, t1);
t0 = libxsmm_timer_tick();
}
#endif
libxsmm_dnn_smax_bwd_exec_f32( libxsmm_dnn_smax_bwd, delact_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm,
0, tid, scratch );
#ifdef DETAILED_PROFILE
if (tid == 0) {
t1 = libxsmm_timer_tick();
l_bwd_loss[0] += libxsmm_timer_duration(t0, t1);
}
#endif
for ( i = num_layers-1; i > 0; --i) {
if (tid < n_comp_threads) {
#ifdef DETAILED_PROFILE
if (tid == 0) {
t0 = libxsmm_timer_tick();
}
#endif
libxsmm_dnn_fc_bwd_exec_f32( libxsmm_dnn_fc_bwd[i], fil_libxsmm[i], delact_libxsmm[i], delact_libxsmm[i+1], delfil_libxsmm[i],
act_libxsmm[i], delbias_libxsmm[i], relumask_libxsmm[i], LIBXSMM_DNN_FC_PASS_BWD, 0, tid, scratch );
#ifdef DETAILED_PROFILE
if (tid == 0) {
t1 = libxsmm_timer_tick();
l_bwdupd_fc[0] += libxsmm_timer_duration(t0, t1);
if (i == num_layers-1) {
first_bwdupd_compute += libxsmm_timer_duration(t0, t1);
}
}
#endif
}
#pragma omp barrier
if (tid >= n_comp_threads) {
#ifdef DETAILED_PROFILE
if (tid == n_comp_threads) {
t0 = libxsmm_timer_tick();
}
#endif
int n_elts = (C[i]*C[i+1])/n_comm_threads;
MPI_Allreduce(MPI_IN_PLACE, (float*)delfil_libxsmm[i]+tid_comm*n_elts, n_elts, MPI_FLOAT, MPI_SUM, comms[tid_comm]);
#ifdef DETAILED_PROFILE
if (tid == n_comp_threads) {
t1 = libxsmm_timer_tick();
l_allreduce[0] += libxsmm_timer_duration(t0, t1);
t0 = libxsmm_timer_tick();
}
#endif
libxsmm_dnn_opt_exec_f32( libxsmm_dnn_opt[i], fil_libxsmm[i], delfil_libxsmm[i], 0, tid_comm, scratch );
#ifdef DETAILED_PROFILE
if (tid == n_comp_threads) {
t1 = libxsmm_timer_tick();
l_optimizer[0] += libxsmm_timer_duration(t0, t1);
}
#endif
}
}
/* Only UPD pass for first layer */
if (tid < n_comp_threads) {
#ifdef DETAILED_PROFILE
if (tid == 0) {
t0 = libxsmm_timer_tick();
}
#endif
libxsmm_dnn_fc_bwd_exec_f32( libxsmm_dnn_fc_bwd[0], fil_libxsmm[0], delact_libxsmm[0], delact_libxsmm[0+1], delfil_libxsmm[0],
act_libxsmm[0], delbias_libxsmm[0], relumask_libxsmm[0], LIBXSMM_DNN_FC_PASS_BWD_W, 0, tid, scratch );
#ifdef DETAILED_PROFILE
if (tid == 0) {
t1 = libxsmm_timer_tick();
l_bwdupd_fc[0] += libxsmm_timer_duration(t0, t1);
}
#endif
}
#pragma omp barrier
if (tid >= n_comp_threads) {
#ifdef DETAILED_PROFILE
if (tid == n_comp_threads) {
t0 = libxsmm_timer_tick();
}
#endif
int n_elts = (C[0]*C[1])/n_comm_threads;
MPI_Allreduce(MPI_IN_PLACE, (float*)delfil_libxsmm[0]+tid_comm*n_elts, n_elts, MPI_FLOAT, MPI_SUM, comms[tid_comm]);
#ifdef DETAILED_PROFILE
if (tid == n_comp_threads) {
t1 = libxsmm_timer_tick();
l_allreduce[1] += libxsmm_timer_duration(t0, t1);
t0 = libxsmm_timer_tick();
}
#endif
libxsmm_dnn_opt_exec_f32( libxsmm_dnn_opt[0], fil_libxsmm[0], delfil_libxsmm[0], 0, tid_comm, scratch );
#ifdef DETAILED_PROFILE
if (tid == n_comp_threads) {
t1 = libxsmm_timer_tick();
l_optimizer[1] += libxsmm_timer_duration(t0, t1);
}
#endif
}
#pragma omp barrier
}
}
MPI_Barrier(MPI_COMM_WORLD);
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
gflop = 0.0;
for ( i = num_layers-1; i > 0; --i) {
gflop += (6.0*(double)global_MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0);
}
gflop += (4.0*(double)global_MB*(double)C[0]*(double)C[1]*(double)iters) / (1000.0*1000.0*1000.0);
if (rank == 0) {
printf("GFLOP = %.5g\n", gflop/(double)iters);
printf("fp time = %.5g\n", ((double)(l_total/iters)));
printf("GFLOPS = %.5g\n", gflop/l_total);
printf("PERFDUMP,BP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB );
for ( i = 0; i < num_layers; ++i ) {
printf("%i,", C[i] );
}
printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total);
#ifdef DETAILED_PROFILE
double tot = /*l_allreduce[0] + l_optimizer[0] +*/ l_fwd_fc[0] + l_bwdupd_fc[0] + l_fwd_loss[0] + l_bwd_loss[0];
printf("FC time compute/loss = %.5g\n", ((double)(tot/iters)));
tot = l_allreduce[0] + l_optimizer[0];
printf("All-reduce + optimizer time overlaped = %.5g\n", ((double)(tot/iters)));
printf("Bwdupd compute time overlaped = %.5g\n", ((double)((l_bwdupd_fc[0]-first_bwdupd_compute)/iters)));
tot = l_optimizer[0] ;
printf("Optimizer time= %.5g\n", ((double)(tot/iters)));
tot = l_fwd_fc[0] + LIBXSMM_MAX( l_bwdupd_fc[0] - first_bwdupd_compute, l_allreduce[0] + l_optimizer[0]) + first_bwdupd_compute + l_fwd_loss[0] + l_bwd_loss[0] + l_allreduce[1] + l_optimizer[1];
printf("Total time on critical path = %.5g (exposed all_reduce + optimizer = %.5g) \n", ((double)(tot/iters)), (double)((l_allreduce[1] + l_optimizer[1])/iters));
#endif
}
MPI_Barrier(MPI_COMM_WORLD);
#if 0
if (rank == n_procs - 1) {
for ( i = 0 ; i < num_layers; ++i ) {
libxsmm_matdiff(&norms, LIBXSMM_DATATYPE_F32, C[i]*C[i+1], 1, delfil_libxsmm[i], delfil_libxsmm[i], 0, 0);
printf("L1 of layer's %d dweights after training : %.25g\n", i, norms.l1_ref);
libxsmm_matdiff_clear(&norms);
}
}
#endif
}
/* deallocate data */
if ( scratch != NULL ) {
libxsmm_free(scratch);
}
for ( i = 0; i < num_layers; ++i ) {
if ( i == 0 ) {
libxsmm_free(act_libxsmm[i]);
libxsmm_free(delact_libxsmm[i]);
}
libxsmm_free(act_libxsmm[i+1]);
libxsmm_free(delact_libxsmm[i+1]);
libxsmm_free(fil_libxsmm[i]);
libxsmm_free(delfil_libxsmm[i]);
libxsmm_free(bias_libxsmm[i]);
libxsmm_free(delbias_libxsmm[i]);
libxsmm_free(relumask_libxsmm[i]);
}
libxsmm_free(act_libxsmm[num_layers+1]);
libxsmm_free(label_libxsmm);
free( act_libxsmm );
free( delact_libxsmm );
free( fil_libxsmm );
free( delfil_libxsmm );
free( bias_libxsmm );
free( delbias_libxsmm );
free( relumask_libxsmm );
free( libxsmm_dnn_opt );
free( libxsmm_dnn_fc_fwd );
free( libxsmm_dnn_fc_bwd );
free( C );
if (rank == 0) {
for ( i = 0 ; i < num_layers+2; ++i ) {
libxsmm_free(ref_act_libxsmm[i]);
}
free(ref_act_libxsmm);
for ( i = 0 ; i < num_layers+1; ++i ) {
libxsmm_free(ref_delact_libxsmm[i]);
}
free(ref_delact_libxsmm);
}
/* Finalize the MPI environment */
MPI_Finalize();
return 0;
}
|
utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file utils.h
* \brief Basic utilility functions.
*/
#ifndef MXNET_COMMON_UTILS_H_
#define MXNET_COMMON_UTILS_H_
#include <dmlc/logging.h>
#include <dmlc/omp.h>
#include <nnvm/graph.h>
#include <nnvm/node.h>
#include <mxnet/imperative.h>
#include <mxnet/engine.h>
#include <mxnet/ndarray.h>
#include <mxnet/storage.h>
#include <mxnet/op_attr_types.h>
#include <mxnet/graph_attr_types.h>
#include <nnvm/graph_attr_types.h>
#include <memory>
#include <vector>
#include <type_traits>
#include <utility>
#include <random>
#include <string>
#include <thread>
#include <algorithm>
#include <functional>
#include <limits>
#include "../operator/mxnet_op.h"
#if MXNET_USE_ONEDNN == 1
#include "../operator/nn/dnnl/dnnl_base-inl.h"
#endif
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
#include <windows.h>
#else
#include <unistd.h>
#endif
namespace mxnet {
namespace common {
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
inline size_t current_process_id() {
return ::GetCurrentProcessId();
}
#else
inline size_t current_process_id() {
return getpid();
}
#endif
/*!
* \brief IndPtr should be non-negative, in non-decreasing order, start with 0
* and end with value equal with size of indices.
*/
struct csr_indptr_check {
template <typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i,
DType* out,
const IType* indptr,
const nnvm::dim_t end,
const nnvm::dim_t idx_size) {
if (indptr[i + 1] < 0 || indptr[i + 1] < indptr[i] || (i == 0 && indptr[i] != 0) ||
(i == end - 1 && indptr[end] != idx_size))
*out = kCSRIndPtrErr;
}
};
/*!
* \brief Indices should be non-negative, less than the number of columns
* and in ascending order per row.
*/
struct csr_idx_check {
template <typename DType, typename IType, typename RType>
MSHADOW_XINLINE static void Map(int i,
DType* out,
const IType* idx,
const RType* indptr,
const nnvm::dim_t ncols) {
for (RType j = indptr[i]; j < indptr[i + 1]; j++) {
if (idx[j] >= ncols || idx[j] < 0 || (j < indptr[i + 1] - 1 && idx[j] >= idx[j + 1])) {
*out = kCSRIdxErr;
break;
}
}
}
};
/*!
* \brief Indices of RSPNDArray should be non-negative,
* less than the size of first dimension and in ascending order
*/
struct rsp_idx_check {
template <typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i,
DType* out,
const IType* idx,
const nnvm::dim_t end,
const nnvm::dim_t nrows) {
if ((i < end && idx[i + 1] <= idx[i]) || idx[i] < 0 || idx[i] >= nrows)
*out = kRSPIdxErr;
}
};
template <typename xpu>
void CheckFormatWrapper(const RunContext& rctx,
const NDArray& input,
const TBlob& err_cpu,
const bool full_check);
/*!
* \brief Check the validity of CSRNDArray.
* \param rctx Execution context.
* \param input Input NDArray of CSRStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template <typename xpu>
void CheckFormatCSRImpl(const RunContext& rctx,
const NDArray& input,
const TBlob& err_cpu,
const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kCSRStorage) << "CheckFormatCSRImpl is for CSRNDArray";
const mxnet::TShape shape = input.shape();
const mxnet::TShape idx_shape = input.aux_shape(csr::kIdx);
const mxnet::TShape indptr_shape = input.aux_shape(csr::kIndPtr);
const mxnet::TShape storage_shape = input.storage_shape();
if ((shape.ndim() != 2) ||
(idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) ||
(indptr_shape[0] != shape[0] + 1) || (idx_shape[0] != storage_shape[0])) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kCSRShapeErr;
});
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, {
mshadow::Stream<xpu>* s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<csr_indptr_check, xpu>::Launch(s,
indptr_shape[0] - 1,
val_xpu.dptr<DType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(),
indptr_shape[0] - 1,
idx_shape[0]);
// no need to check indices if indices are empty
if (idx_shape[0] != 0) {
Kernel<csr_idx_check, xpu>::Launch(s,
indptr_shape[0] - 1,
val_xpu.dptr<DType>(),
input.aux_data(csr::kIdx).dptr<IType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(),
shape[1]);
}
mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s);
});
});
});
}
}
/*!
* \brief Check the validity of RowSparseNDArray.
* \param rctx Execution context.
* \param input Input NDArray of RowSparseStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template <typename xpu>
void CheckFormatRSPImpl(const RunContext& rctx,
const NDArray& input,
const TBlob& err_cpu,
const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kRowSparseStorage) << "CheckFormatRSPImpl is for RSPNDArray";
const mxnet::TShape idx_shape = input.aux_shape(rowsparse::kIdx);
if (idx_shape[0] != input.storage_shape()[0]) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kRSPShapeErr;
});
return;
}
if (idx_shape[0] == 0) {
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, {
mshadow::Stream<xpu>* s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<rsp_idx_check, xpu>::Launch(s,
idx_shape[0],
val_xpu.dptr<DType>(),
input.aux_data(rowsparse::kIdx).dptr<IType>(),
idx_shape[0] - 1,
input.shape()[0]);
mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s);
});
});
}
}
template <typename xpu>
void CheckFormatImpl(const RunContext& rctx,
const NDArray& input,
const TBlob& err_cpu,
const bool full_check) {
int stype = input.storage_type();
if (stype == kCSRStorage) {
CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kRowSparseStorage) {
CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kDefaultStorage) {
// no-op for default storage
} else {
LOG(FATAL) << "Unknown storage type " << stype;
}
}
/*! \brief Pick rows specified by user input index array from a row sparse ndarray
* and save them in the output sparse ndarray.
*/
template <typename xpu>
void SparseRetainOpForwardRspWrapper(mshadow::Stream<xpu>* s,
const NDArray& input_nd,
const TBlob& idx_data,
const OpReqType req,
NDArray* output_nd);
/* \brief Casts tensor storage type to the new type.
*/
template <typename xpu>
void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output);
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype`.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage, const NDArrayStorageType stype) {
if (!vstorage.empty()) {
for (const auto& i : vstorage) {
if (i != stype)
return false;
}
return true;
}
return false;
}
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype1`
* or `stype2'. Sets boolean if both found.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool* has_both) {
if (has_both) {
*has_both = false;
}
if (!vstorage.empty()) {
uint8_t has = 0;
for (const auto i : vstorage) {
if (i == stype1) {
has |= 1;
} else if (i == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as target `stype`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() != stype) {
return false;
}
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as targets `stype1` or `stype2`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool* has_both) {
if (has_both) {
*has_both = false;
}
if (!ndarrays.empty()) {
uint8_t has = 0;
for (const auto& nd : ndarrays) {
const NDArrayStorageType stype = nd.storage_type();
if (stype == stype1) {
has |= 1;
} else if (stype == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if storage type of any array in `ndarrays`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() == stype) {
return true;
}
}
}
return false;
}
/*! \brief returns true if any storage type `ndstype` in `ndstypes`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<int>& ndstypes, const NDArrayStorageType stype) {
if (!ndstypes.empty()) {
for (const auto& ndstype : ndstypes) {
if (ndstype == stype) {
return true;
}
}
}
return false;
}
/*! \brief get string representation of dispatch_mode */
inline std::string dispatch_mode_string(const DispatchMode x) {
switch (x) {
case DispatchMode::kFCompute:
return "fcompute";
case DispatchMode::kFComputeEx:
return "fcompute_ex";
case DispatchMode::kFComputeFallback:
return "fcompute_fallback";
case DispatchMode::kVariable:
return "variable";
case DispatchMode::kUndefined:
return "undefined";
}
return "unknown";
}
/*! \brief get string representation of storage_type */
inline std::string stype_string(const int x) {
switch (x) {
case kDefaultStorage:
return "default";
case kCSRStorage:
return "csr";
case kRowSparseStorage:
return "row_sparse";
}
return "unknown";
}
/*! \brief get string representation of device type */
inline std::string dev_type_string(const int dev_type) {
switch (dev_type) {
case Context::kCPU:
return "cpu";
case Context::kGPU:
return "gpu";
case Context::kCPUPinned:
return "cpu_pinned";
case Context::kCPUShared:
return "cpu_shared";
}
return "unknown";
}
inline std::string attr_value_string(const nnvm::NodeAttrs& attrs,
const std::string& attr_name,
std::string default_val = "") {
if (attrs.dict.find(attr_name) == attrs.dict.end()) {
return default_val;
}
return attrs.dict.at(attr_name);
}
/*! \brief get string representation of the operator stypes */
inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>& in_attrs,
const std::vector<int>& out_attrs) {
std::ostringstream os;
os << "operator = " << attrs.op->name << "\ninput storage types = [";
for (const int attr : in_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "output storage types = [";
for (const int attr : out_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "params = {";
for (auto kv : attrs.dict) {
os << "\"" << kv.first << "\" : " << kv.second << ", ";
}
os << "}\n"
<< "context.dev_mask = " << dev_type_string(dev_mask);
return os.str();
}
/*! \brief get string representation of the operator */
inline std::string operator_string(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
std::string result = "";
std::vector<int> in_stypes;
std::vector<int> out_stypes;
in_stypes.reserve(inputs.size());
out_stypes.reserve(outputs.size());
auto xform = [](const NDArray arr) -> int { return arr.storage_type(); };
std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform);
std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform);
result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes);
return result;
}
/*! \brief log message once. Intended for storage fallback warning messages. */
inline void LogOnce(const std::string& message) {
typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore;
auto log_store = LogStore::Get();
if (log_store->find(message) == log_store->end()) {
LOG(INFO) << message;
log_store->insert(message);
}
}
/*! \brief log storage fallback event
*/
inline void LogStorageFallback(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>* in_attrs,
const std::vector<int>* out_attrs) {
static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true);
if (!log)
return;
const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs);
std::ostringstream os;
const char* warning =
"\n WARNING:\n"
"Execution of the operator above will fallback to the generic implementation "
#if MXNET_USE_ONEDNN == 1
"(not utilizing kernels from oneDNN library) "
#endif
"with default dense storage type. You are seeing this warning message because "
#if MXNET_USE_ONEDNN == 1
"MXNET_ONEDNN_ENABLED flag is set to 0, in which case you can re-enable the default "
"execution path by setting MXNET_ONEDNN_ENABLED back to 1, or "
#endif
"the operator above is unable to process the given ndarrays with specified storage types, "
"context and/or parameter, in which case temporary dense ndarrays are generated in order to "
"execute the operator. The fallback does not affect the correctness of the programme. Using "
"default storage type performance degradation might be observed. \nYou can set environment "
"variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to 0 to suppress this warning.";
os << "\nStorage type fallback detected:\n" << op_str << warning;
LogOnce(os.str());
#if MXNET_USE_ONEDNN == 1
if (GetDNNLCacheSize() != -1)
common::LogOnce(
"MXNET_ONEDNN_CACHE_NUM is set."
"Should only be set if "
"your model has variable input shapes, "
"as cache size may grow unbounded");
#endif
}
// heuristic to dermine number of threads per GPU
inline int GetNumThreadsPerGPU() {
// This is resource efficient option.
return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2);
}
// heuristic to get number of matching colors.
// this decides how much parallelism we can get in each GPU.
inline int GetExecNumMatchColor() {
// This is resource efficient option.
int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1);
return std::min(num_match_color, GetNumThreadsPerGPU());
}
template <typename T, typename V>
V ParallelAccumulate(const T* a, const int n, V start) {
V sum = start;
#pragma omp parallel for reduction(+ : sum)
for (int i = 0; i < n; ++i) {
sum += a[i];
}
return sum;
}
/*!
* \brief
* Helper function for ParallelSort.
* DO NOT call this function directly.
* Use the interface ParallelSort instead.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template <typename RandomIt, typename Compare>
void ParallelSortHelper(RandomIt first, size_t len, size_t grainsize, const Compare& comp) {
if (len < grainsize) {
std::sort(first, first + len, comp);
} else {
std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len / 2, grainsize, comp);
ParallelSortHelper(first + len / 2, len - len / 2, grainsize, comp);
thr.join();
std::inplace_merge(first, first + len / 2, first + len, comp);
}
}
/*!
* \brief
* Sort the elements in the range [first, last) into the ascending order defined by
* the comparator comp.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template <typename RandomIt, typename Compare>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) {
const auto num = std::distance(first, last);
size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024 * 16));
ParallelSortHelper(first, num, grainsize, comp);
}
/*!
* \brief
* Sort the elements in the range [first, last) into ascending order.
* The elements are compared using the default < operator.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template <typename RandomIt>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) {
ParallelSort(
first, last, num_threads, std::less<typename std::iterator_traits<RandomIt>::value_type>());
}
/*!
* \brief Random Engine
*/
typedef std::mt19937 RANDOM_ENGINE;
/*!
* \brief Helper functions.
*/
namespace helper {
/*!
* \brief Helper for non-array type `T`.
*/
template <class T>
struct UniqueIf {
/*!
* \brief Type of `T`.
*/
using SingleObject = std::unique_ptr<T>;
};
/*!
* \brief Helper for an array of unknown bound `T`.
*/
template <class T>
struct UniqueIf<T[]> {
/*!
* \brief Type of `T`.
*/
using UnknownBound = std::unique_ptr<T[]>;
};
/*!
* \brief Helper for an array of known bound `T`.
*/
template <class T, size_t kSize>
struct UniqueIf<T[kSize]> {
/*!
* \brief Type of `T`.
*/
using KnownBound = void;
};
} // namespace helper
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs a non-array type `T`. The arguments `args` are passed to the
* constructor of `T`. The function does not participate in the overload
* resolution if `T` is an array type.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param n The size of the array to construct.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs an array of unknown bound `T`. The function does not participate
* in the overload resolution unless `T` is an array of unknown bound.
*/
template <class T>
typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) {
using U = typename std::remove_extent<T>::type;
return std::unique_ptr<T>(new U[n]{});
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
*
* Constructs an arrays of known bound is disallowed.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete;
template <typename FCompType>
FCompType GetFCompute(const nnvm::Op* op, const std::string& name, const Context& ctx) {
static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>");
static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>");
if (ctx.dev_mask() == cpu::kDevMask) {
return fcompute_cpu.get(op, nullptr);
} else if (ctx.dev_mask() == gpu::kDevMask) {
return fcompute_gpu.get(op, nullptr);
} else {
LOG(FATAL) << "Unknown device mask " << ctx.dev_mask();
return nullptr;
}
}
/*!
* \brief Return the max integer value representable in the type `T` without loss of precision.
*/
template <typename T>
constexpr size_t MaxIntegerValue() {
return std::is_integral<T>::value ? std::numeric_limits<T>::max() :
size_t(2) << (std::numeric_limits<T>::digits - 1);
}
template <>
constexpr size_t MaxIntegerValue<mshadow::half::half_t>() {
return size_t(2) << 10;
}
template <>
constexpr size_t MaxIntegerValue<mshadow::bfloat::bf16_t>() {
return size_t(2) << 14;
}
MSHADOW_XINLINE int ilog2ul(size_t a) {
int k = 1;
while (a >>= 1)
++k;
return k;
}
MSHADOW_XINLINE int ilog2ui(unsigned int a) {
int k = 1;
while (a >>= 1)
++k;
return k;
}
/*!
* \brief Return an NDArray of all zeros.
*/
inline NDArray InitZeros(const NDArrayStorageType stype,
const mxnet::TShape& shape,
const Context& ctx,
const int dtype) {
// NDArray with default storage
if (stype == kDefaultStorage) {
NDArray ret(shape, ctx, false, dtype);
ret = 0;
return ret;
}
// NDArray with non-default storage. Storage allocation is always delayed.
return NDArray(stype, shape, ctx, true, dtype);
}
/*!
* \brief Helper to add a NDArray of zeros to a std::vector.
*/
inline void EmplaceBackZeros(const NDArrayStorageType stype,
const mxnet::TShape& shape,
const Context& ctx,
const int dtype,
std::vector<NDArray>* vec) {
// NDArray with default storage
if (stype == kDefaultStorage) {
vec->emplace_back(shape, ctx, false, dtype);
vec->back() = 0;
} else {
// NDArray with non-default storage. Storage allocation is always delayed.
vec->emplace_back(stype, shape, ctx, true, dtype);
}
}
/*!
* \brief parallelize copy by OpenMP.
*/
template <typename DType>
inline void ParallelCopy(DType* dst, const DType* src, index_t size) {
static index_t copy_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000);
if (size >= copy_block_size) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t i = 0; i < size; ++i) {
dst[i] = src[i];
}
} else {
#pragma GCC diagnostic push
#if __GNUC__ >= 8
#pragma GCC diagnostic ignored "-Wclass-memaccess"
#endif
std::memcpy(dst, src, sizeof(DType) * size);
#pragma GCC diagnostic pop
}
}
/*!
* \breif parallelize add by OpenMP
*/
template <typename DType>
inline void ParallelAdd(DType* dst, const DType* src, index_t size) {
static index_t add_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000);
if (size >= add_block_size) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t i = 0; i < size; ++i) {
dst[i] += src[i];
}
} else {
for (index_t i = 0; i < size; ++i) {
dst[i] += src[i];
}
}
}
/*!
* \brief If numpy compatibility is turned off (default), the shapes passed in
* by users follow the legacy shape definition:
* 1. 0 ndim means the shape is completely unknown.
* 2. 0 dim size means the dim size is unknown.
* We need to convert those shapes to use the numpy shape definition:
* 1. 0 ndim means it's a scalar tensor.
* 2. -1 ndim means the shape is unknown.
* 3. 0 dim size means no elements in that dimension.
* 4. -1 dim size means the dimension's size is unknown.
* so that operator's infer shape function can work in backend.
* \param shape to be converted.
* Note: It is possible that the shape to be converted is already
* numpy compatible. For example, when a subgraph operator's infer
* shape function is called from the infer shape pass of the whole
* graph, its input/output shapes have been converted to numpy
* compatible shapes.
*/
inline void ConvertToNumpyShape(mxnet::TShape* shape) {
if (shape->ndim() == 0) { // legacy shape ndim = 0 means unknown
*shape = mxnet::TShape(); // unknown shape ndim = -1
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if ((*shape)[j] == 0) { // legacy shape dim_size = 0 means unknown
(*shape)[j] = -1; // unknown dim size = -1
}
}
}
}
inline void ConvertToNumpyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToNumpyShape(&(shapes->at(i)));
}
}
/*!
* \brief This is function is used to convert shapes returned by
* the infer shape functions/pass to the legacy shape definition.
*/
inline void ConvertToLegacyShape(mxnet::TShape* shape) {
if (!mxnet::ndim_is_known(*shape)) {
*shape = mxnet::TShape(0, -1);
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if (!mxnet::dim_size_is_known(*shape, j)) {
(*shape)[j] = 0;
}
}
}
}
inline void ConvertToLegacyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToLegacyShape(&(shapes->at(i)));
}
}
void ExecuteMonInputCallback(
const nnvm::IndexedGraph& idx,
const std::vector<NDArray*>& state_arrays,
size_t nid,
const std::function<void(const char*, const char*, void*)>& monitor_callback);
void ExecuteMonOutputCallback(
const nnvm::IndexedGraph& idx,
const std::vector<NDArray*>& state_arrays,
size_t nid,
const std::function<void(const char*, const char*, void*)>& monitor_callback);
inline mxnet::TShape CanonicalizeAxes(const mxnet::TShape& src) {
// convert negative axes to positive values
const int ndim = src.ndim();
mxnet::TShape axes = src;
for (int i = 0; i < ndim; ++i) {
if (axes[i] < 0) {
axes[i] += ndim;
}
CHECK(axes[i] >= 0 && axes[i] < ndim)
<< "axes[" << i << "]=" << axes[i] << " exceeds the range [" << 0 << ", " << ndim << ")";
}
return axes;
}
inline bool is_float(const int dtype) {
return dtype == mshadow::kFloat32 || dtype == mshadow::kFloat64 || dtype == mshadow::kFloat16;
}
inline bool is_int(const int dtype) {
return dtype == mshadow::kUint8 || dtype == mshadow::kInt8 || dtype == mshadow::kUint16 ||
dtype == mshadow::kInt16 || dtype == mshadow::kUint32 || dtype == mshadow::kInt32 ||
dtype == mshadow::kUint64 || dtype == mshadow::kInt64;
}
inline bool is_signed_int(const int dtype) {
return dtype == mshadow::kInt8 || dtype == mshadow::kInt16 || dtype == mshadow::kInt32 ||
dtype == mshadow::kInt64;
}
inline bool is_unsigned_int(const int dtype) {
return dtype == mshadow::kUint8 || dtype == mshadow::kUint16 || dtype == mshadow::kUint32 ||
dtype == mshadow::kUint64;
}
static int bits_of(const int type_flag) {
switch (type_flag) {
case mshadow::kFloat32:
return sizeof(float) * CHAR_BIT;
case mshadow::kFloat64:
return sizeof(double) * CHAR_BIT;
case mshadow::kUint8:
return sizeof(uint8_t) * CHAR_BIT;
case mshadow::kInt32:
return sizeof(int32_t) * CHAR_BIT;
case mshadow::kInt8:
return sizeof(int8_t) * CHAR_BIT;
case mshadow::kInt64:
return sizeof(int64_t) * CHAR_BIT;
case mshadow::kBool:
return sizeof(bool) * CHAR_BIT;
case mshadow::kInt16:
return sizeof(int16_t) * CHAR_BIT;
case mshadow::kUint16:
return sizeof(uint16_t) * CHAR_BIT;
case mshadow::kUint32:
return sizeof(uint32_t) * CHAR_BIT;
case mshadow::kUint64:
return sizeof(uint64_t) * CHAR_BIT;
default: {
LOG(FATAL) << "Unknown type_flag=" << type_flag;
return -1;
}
}
}
inline int type_promotion(const int type1, const int type2) {
if (type1 == type2)
return type1;
if (is_float(type1) && is_float(type2)) {
if (type1 == mshadow::kFloat64 || type2 == mshadow::kFloat64) {
return mshadow::kFloat64;
}
if (type1 == mshadow::kFloat32 || type2 == mshadow::kFloat32) {
return mshadow::kFloat32;
}
return mshadow::kFloat16;
} else if (is_float(type1) || is_float(type2)) {
return is_float(type1) ? type1 : type2;
}
if (is_signed_int(type1) && is_signed_int(type2)) {
if (type1 == mshadow::kInt64 || type2 == mshadow::kInt64) {
return mshadow::kInt64;
}
if (type1 == mshadow::kInt32 || type2 == mshadow::kInt32) {
return mshadow::kInt32;
}
if (type1 == mshadow::kInt16 || type2 == mshadow::kInt16) {
return mshadow::kInt16;
}
return mshadow::kInt8;
} else if (is_unsigned_int(type1) && is_unsigned_int(type2)) {
if (type1 == mshadow::kUint64 || type2 == mshadow::kUint64) {
return mshadow::kUint64;
}
if (type1 == mshadow::kUint32 || type2 == mshadow::kUint32) {
return mshadow::kUint32;
}
if (type1 == mshadow::kUint16 || type2 == mshadow::kUint16) {
return mshadow::kUint16;
}
return mshadow::kUint8;
} else if (type1 == mshadow::kBool) {
return type2;
} else if (type2 == mshadow::kBool) {
return type1;
} else if (is_unsigned_int(type1) || is_unsigned_int(type2)) {
if (bits_of(type1) < bits_of(type2)) {
if (type1 == mshadow::kInt8 && type2 == mshadow::kUint16) {
return mshadow::kInt32;
} else if (type1 == mshadow::kInt8 && type2 == mshadow::kUint32) {
return mshadow::kInt64;
} else if (type1 == mshadow::kInt16 && type2 == mshadow::kUint32) {
return mshadow::kInt64;
} else if (type2 == mshadow::kUint64) {
LOG(FATAL) << "Unsupported type promotions between " << mshadow::dtype_string(type1)
<< " and " << mshadow::dtype_string(type2);
} else {
return type2;
}
} else if (bits_of(type2) < bits_of(type1)) {
if (type2 == mshadow::kInt8 && type1 == mshadow::kUint16) {
return mshadow::kInt32;
} else if (type2 == mshadow::kInt8 && type1 == mshadow::kUint32) {
return mshadow::kInt64;
} else if (type2 == mshadow::kInt16 && type1 == mshadow::kUint32) {
return mshadow::kInt64;
} else if (type1 == mshadow::kUint64) {
LOG(FATAL) << "Unsupported type promotions between " << mshadow::dtype_string(type1)
<< " and " << mshadow::dtype_string(type2);
} else {
return type1;
}
} else {
if (type1 == mshadow::kUint8 || type2 == mshadow::kUint8) {
return mshadow::kInt16;
}
if (type1 == mshadow::kUint16 || type2 == mshadow::kUint16) {
return mshadow::kInt32;
}
if (type1 == mshadow::kUint32 || type2 == mshadow::kUint32) {
return mshadow::kInt64;
}
}
}
LOG(FATAL) << "Unsupported type promotions between " << mshadow::dtype_string(type1) << " and "
<< mshadow::dtype_string(type2);
return -1;
}
inline const std::string NodeAttrsGetProfilerScope(const nnvm::NodeAttrs& attrs) {
// obtain the profiler scope name, if assigned previously
std::string profiler_scope = MXNET_STORAGE_DEFAULT_PROFILER_SCOPE_CSTR;
const std::unordered_map<std::string, std::string>& node_attrs_dict = attrs.dict;
const std::unordered_map<std::string, std::string>::const_iterator profiler_scope_iter =
node_attrs_dict.find("__profiler_scope__");
if (profiler_scope_iter != node_attrs_dict.end()) {
profiler_scope = profiler_scope_iter->second;
}
return profiler_scope;
}
inline int GetDefaultDtype() {
return Imperative::Get()->is_np_default_dtype() ? mshadow::kFloat64 : mshadow::kFloat32;
}
inline int GetDefaultDtype(int dtype) {
if (dtype != -1)
return dtype;
return Imperative::Get()->is_np_default_dtype() ? mshadow::kFloat64 : mshadow::kFloat32;
}
struct MShadowTypeInfo {
std::string name;
int size;
int acc_size;
MShadowTypeInfo(const std::string name, const int size, const int acc_size)
: name(std::move(name)), size(size), acc_size(acc_size) {}
MShadowTypeInfo(const std::string name, const int size) : MShadowTypeInfo(name, size, size) {}
};
MShadowTypeInfo mshadow_type_info(const int type_flag);
inline bool AlignedMemAlloc(void** ptr, size_t size, size_t alignment) {
#if _MSC_VER
*ptr = _aligned_malloc(size, alignment);
if (*ptr == nullptr)
return false;
#else
int res = posix_memalign(ptr, alignment, size);
if (res != 0)
return false;
#endif
return true;
}
inline void AlignedMemFree(void* ptr) {
#if _MSC_VER
_aligned_free(ptr);
#else
free(ptr);
#endif
}
inline index_t div_round(const index_t a, const index_t b) {
return (a + b - 1) / b;
}
inline bool IsPower2(size_t N) {
return ((N & (N - 1)) == 0) && N != 0;
}
inline size_t RoundToPower2(size_t N) {
size_t ret = 1;
size_t copyN = N;
while (N >= 2) {
ret *= 2;
N /= 2;
}
if (ret < copyN) {
ret *= 2;
}
return ret;
}
} // namespace common
} // namespace mxnet
#endif // MXNET_COMMON_UTILS_H_
|
glutils.h | // -*- C++ -*-
#ifndef classutils_h_
#define classutils_h_
#include <unistd.h>
#include <sys/stat.h>
#include <cctype>
#include "glinerec.h"
#ifndef _OPENMP
#define OCRO_THREAD 0
#define OCRO_NTHREADS 1
#else
#include <omp.h>
#define OCRO_THREAD omp_get_thread_num()
#define OCRO_NTHREADS omp_get_num_threads()
#endif
namespace {
using namespace iulib;
using namespace narray_ops;
using namespace narray_io;
using namespace ocropus;
// remove trailing newline
void chomp(char *p) {
while(*p) {
if(*p=='\n') { *p = 0; return; }
p++;
}
}
void remove_spaces(char *p) {
char *q = p;
while(*p) {
if(!std::isspace(*p)) { *q++ = *p; }
p++;
}
*q = 0;
}
inline float rnormal() {
float x,y,s;
do {
x = 2*drand48()-1;
y = 2*drand48()-1;
s = x*x+y*y;
} while(s>1.0);
return x*sqrt(-log(s)/s);
}
inline float rnormal(float mu,float sigma) {
return rnormal()*sigma+mu;
}
inline float rlognormal(float x,float r) {
CHECK(r>1.0);
float n = rnormal(log(x),log(r));
float result = exp(n);
CHECK(!isnan(result));
return result;
}
void fill_random(floatarray &v,float lo,float hi) {
for(int i=0;i<v.length1d();i++)
v.at1d(i) = (hi-lo)*drand48()+lo;
}
// report statistics on some variable
struct Report {
const char *name;
double count,sum,sum2,lo,hi,last_count;
Report(const char *s) {
name = s;
count = 0;
last_count = count;
sum = 0;
sum2 = 0;
lo = 1e300;
hi = -1e300;
}
~Report() {
report();
}
void report() {
if(count>last_count) {
fprintf(stderr,"REPORT %s %g mean %g stddev %g lo %g hi %g\n",name,
count,sum/count,sqrt(sum2/count-sqr(sum/count)),lo,hi);
last_count = count;
}
}
template <class T>
T operator+=(T value) {
count++;
sum += value;
sum2 += sqr(value);
if(value<lo) lo = value;
if(value>hi) hi = value;
return value;
}
};
// check for absence of NaNs
inline bool valid(floatarray &v) {
for(int i=0;i<v.length1d();i++)
if(isnan(v.at1d(i)))
return 0;
return 1;
}
// indent a line by n steps
void indent(int n) {
for(int i=0;i<n;i++) printf(" ");
}
template <class T,class S>
void get_rectangle(narray<T> &out,narray<S> &in,rectangle b) {
out.resize(b.width(),b.height());
for(int i=0;i<out.dim(0);i++)
for(int j=0;j<out.dim(1);j++)
out(i,j) = bat(in,b.x0+i,b.y0+j,0);
}
// estimate the nearest neighbor classification error for
// a particular classification problem
double nn_error(floatarray &data,intarray &classes) {
int total = 0;
int n = data.dim(0);
floatarray dists(n);
for(int i=0;i<n;i++) {
floatarray u;
rowget(u,data,i);
#pragma omp parallel for
for(int j=0;j<n;j++) {
if(i==j) { dists(j) = 1e30; continue; }
floatarray v;
rowget(v,data,j);
dists(j) = dist2squared(u,v);
}
int index = argmin(dists);
if(classes(index)!=classes(i)) total++;
}
return total/double(data.dim(0));
}
// given a set of classes and distances, estimate the posterior
// probability for the k nearest neighbors (by counting)
void knn_posterior(floatarray &posterior,intarray &classes,floatarray &costs,int k) {
NBest nbest(k);
for(int i=0;i<costs.length();i++)
nbest.add(i,-costs(i));
posterior.resize(max(classes)+1);
posterior.fill(0);
for(int i=0;i<nbest.length();i++) {
int index = nbest[i];
posterior(classes(index))++;
}
posterior /= sum(posterior);
}
// perform k nearest neighbor classification
int knn_classify(intarray &classes,floatarray &costs,int k) {
floatarray p;
knn_posterior(p,classes,costs,k);
return argmax(p);
}
// debugging printout of an array
void dprint(intarray &a,int n) {
if(a.rank()==1) {
printf("[");
int i;
for(i=0;i<min(n,a.dim(0));i++) {
if(i>0) printf(" ");
printf("%d",a(i));
}
if(i<a.dim(0)) printf("...");
printf("]");
} else if(a.rank()==2) {
for(int i=0;i<min(n,a.dim(0));i++) {
printf("%4d [",i);
int j;
for(j=0;i<min(n,a.dim(j));i++) {
if(j>0) printf(" ");
printf("%d",a(i,j));
}
if(j<a.dim(1)) printf("...");
printf("]");
}
} else throw "unimplemented";
}
void dprint(floatarray &a,int n) {
if(a.rank()==1) {
printf("[");
int i;
for(i=0;i<min(n,a.dim(0));i++) {
if(i>0) printf(" ");
printf("%g",a(i));
}
if(i<a.dim(0)) printf("...");
printf("]");
} else if(a.rank()==2) {
for(int i=0;i<min(n,a.dim(0));i++) {
printf("%4d [",i);
int j;
for(j=0;i<min(n,a.dim(j));i++) {
if(j>0) printf(" ");
printf("%g",a(i,j));
}
if(j<a.dim(1)) printf("...");
printf("]");
}
} else throw "unimplemented";
}
void dprint(const char *where,const char *label,intarray &a,int n) {
// FIXME
printf("[%s] %s ",where,label);
dprint(a,n);
printf("\n");
}
void dprint(const char *where,const char *label,floatarray &a,int n) {
// FIXME
printf("[%s] %s ",where,label);
dprint(a,n);
printf("\n");
}
// compute, then print a histogram
void print_hist(intarray &a) {
intarray hist(max(a)+1);
hist.fill(0);
for(int i=0;i<a.length();i++)
hist(a(i))++;
dprint(hist,hist.length());
}
// compute, then print a histogram of characters
void print_charhist(intarray &a) {
intarray hist(max(a)+1);
hist.fill(0);
for(int i=0;i<a.length();i++)
hist(a(i))++;
printf("[charhist");
for(int i=0;i<hist.length();i++) {
if(hist(i)==0) continue;
if(i<=32)
printf(" %d:%d",i,hist(i));
else
printf(" '%c':%d",i,hist(i));
}
printf("]\n");
}
// randomly split data (e.g., into a test set and training set)
void split_data(floatarray &vs1,intarray &cs1,
floatarray &vs2,intarray &cs2,
floatarray &vs,intarray &cs,
float frac) {
int n = vs.dim(0), d = vs.dim(1);
intarray selection;
rpermutation(selection,n);
int split = int(n*frac);
vs1.resize(split,d);
cs1.resize(split);
for(int i=0;i<split;i++) {
for(int j=0;j<d;j++) vs1(i,j) = vs(i,j);
cs1(i) = cs(i);
}
vs2.resize(n-split,d);
cs2.resize(n-split);
for(int i=0;i<n-split;i++) {
for(int j=0;j<d;j++) vs2(i,j) = vs(i+split,j);
cs2(i) = cs(i+split);
}
}
void split_classes(intarray &cs1,intarray &cs2,intarray &cs,float frac) {
CHECK(frac>1e-6 && frac<1.0);
int n = cs.length();
intarray selection;
cs1.copy(cs);
cs2.copy(cs);
int split = int(n*frac);
rpermutation(selection,n);
for(int i=0;i<split;i++)
cs2(selection(i)) = -1;
for(int i=split;i<n;i++)
cs1(selection(i)) = -1;
}
void split_samples(intarray &training,intarray &testing,intarray &samples,float frac=0.1) {
intarray a;
a = samples;
shuffle(a);
training.clear();
testing.clear();
int split = int(frac*a.length());
for(int i=0;i<split;i++)
testing.push(samples(i));
for(int i=split;i<a.length();i++)
training.push(samples(i));
ASSERT(training.length()+testing.length()==samples.length());
}
// given a rank 3 array (e.g. MNIST list of images), extract
// a rank two array data(which,:,:)
void get_img(floatarray &image,floatarray &data,int which) {
int h = data.dim(2);
image.resize(data.dim(2),data.dim(1));
for(int i=0;i<image.dim(0);i++)
for(int j=0;j<image.dim(1);j++)
image(i,j) = data(which,h-j-1,i);
}
void get_img(bytearray &image,bytearray &data,int which) {
int h = data.dim(2);
image.resize(data.dim(2),data.dim(1));
for(int i=0;i<image.dim(0);i++)
for(int j=0;j<image.dim(1);j++)
image(i,j) = data(which,h-j-1,i);
}
void get_vec(floatarray &v,floatarray &data,int which) {
int h = data.dim(2);
CHECK(data.rank()==3);
v.resize(data.dim(2)*data.dim(1));
for(int i=0;i<data.dim(1);i++)
for(int j=0;j<data.dim(2);j++)
v(i*h+j) = data(which,h-j-1,i);
}
inline float norm(floatarray &v) {
double total = 0.0;
for(int i=0;i<v.length1d();i++) {
float value = v.at1d(i);
total += value*value;
}
return sqrt(total);
}
void rowmean(floatarray &mean,floatarray &data) {
mean.resize(data.dim(1));
mean.fill(0);
for(int i=0;i<data.dim(0);i++) {
for(int j=0;j<data.dim(1);j++)
mean(j) += data(i,j);
}
mean /= data.dim(0);
}
void rownormalize1(floatarray &data) {
for(int i=0;i<data.dim(0);i++) {
double total = 0.0;
for(int j=0;j<data.dim(1);j++)
total += data(i,j);
for(int j=0;j<data.dim(1);j++)
data(i,j) /= total;
}
}
void rownormalize2(floatarray &data) {
for(int i=0;i<data.dim(0);i++) {
double total = 0.0;
for(int j=0;j<data.dim(1);j++)
total += sqr(data(i,j));
total = sqrt(total);
for(int j=0;j<data.dim(1);j++)
data(i,j) /= total;
}
}
struct GaussModel {
double count,sx,sx2;
GaussModel() {
count = 0;
sx = 0;
sx2 = 0;
}
void operator+=(double x) {
count++;
sx += x;
sx2 += x*x;
}
double cost(double x) {
double mean = sx/count;
double var = sx2/count - sqr(mean);
return sqr(x-mean)/var;
}
};
struct GaussDiagModel {
float count;
floatarray sx,sx2;
GaussDiagModel() {
}
void clear(int n) {
count = 0;
sx.resize(n);
sx.fill(0);
sx2.resize(n);
sx2.fill(0);
}
void operator+=(floatarray &v) {
if(sx.length()==0) clear(v.length());
count++;
sx += v;
for(int i=0;i<v.length();i++)
sx2(i) += sqr(v(i));
}
float cost(floatarray &v) {
if(sx.length()==0) return 1e30;
float total = 0.0;
for(int i=0;i<v.length();i++) {
double mean = sx(i)/count;
double var = sx2(i)/count - sqr(mean);
if(var<1e-6) var = mean;
if(var<1e-6) var = 1.0;
total += sqr(v(i)-mean)/var;
}
CHECK(total>=0);
return total;
}
void save(FILE *stream) {
scalar_write(stream,count);
narray_write(stream,sx);
narray_write(stream,sx2);
}
void load(FILE *stream) {
scalar_read(stream,count);
narray_read(stream,sx);
narray_read(stream,sx2);
}
};
inline int fclamp(float x,int lo,int hi) {
int result = int(x);
if(result<0) return lo;
if(result>=hi) return hi-1;
return result;
}
template <class T>
rectangle bbox(narray<T> &in) {
int w = in.dim(0), h = in.dim(1);
int x0=w,y0=h,x1=0,y1=0;
int threshold = min(in);
for(int i=0;i<w;i++) {
for(int j=0;j<h;j++) {
if(in(i,j)<=threshold) continue;
if(i<x0) x0 = i;
if(i>x1) x1 = i;
if(j<y0) y0 = j;
if(j>y1) y1 = j;
}
}
return rectangle(x0,y0,x1+1,y1+1);
}
void fit_into_box(bytearray &out,bytearray &in,rectangle b,int background=0) {
int w = in.dim(0), h = in.dim(1);
float xscale = float(b.width())/out.dim(0);
float yscale = float(b.height())/out.dim(1);
float scale = max(xscale,yscale);
float dx = (w-scale*out.dim(0))/2;
float dy = (h-scale*out.dim(1))/2;
out.fill(background);
for(int i=0;i<out.dim(0);i++) {
for(int j=0;j<out.dim(1);j++) {
float x = scale*i+dx;
float y = scale*j+dy;
if(x<0||x>w) continue;
if(y<0||y>h) continue;
out(i,j) = bilin(in,x,y);
}
}
}
void extract_box(bytearray &out,bytearray &in,rectangle b,int background=0) {
int w=b.width(),h=b.height();
out.resize(w,h);
out.fill(background);
for(int i=0;i<w;i++) {
for(int j=0;j<w;j++) {
int x = i+b.x0;
int y = j+b.y0;
if(x<0||x>=w) continue;
if(y<0||y>=h) continue;
out(i,j) = in(x,y);
}
}
}
template <class T>
int count(narray<T> &v,float eps=1e-6) {
int count = 0;
for(int i=0;i<v.length1d();i++)
if(fabs(v.at1d(i))>eps)
count++;
return count;
}
point centroid(bytearray &a) {
double sx=0,sy=0,w=0;
for(int i=0;i<a.dim(0);i++) {
for(int j=0;j<a.dim(1);j++) {
double v = a(i,j);
sx += v * i;
sy += v * j;
w += v;
}
}
return point(sx/w,sy/w);
}
void center(bytearray &out,bytearray &in,int size=32,int background=0) {
rectangle b = bbox(in);
point c = centroid(in);
// in(c.x,c.y) = 96;
int rx = max(c.x-b.x0,b.x1-c.x+1);
int ry = max(c.y-b.y0,b.y1-c.y+1);
int r = max(rx,ry);
float scale = float(r)/(size/2);
scale = max(scale,1.0);
// should do some anti-aliasing on the input here
out.resize(size,size);
out.fill(background);
for(int i=0;i<out.dim(0);i++) {
for(int j=0;j<out.dim(1);j++) {
// vector from center of output
float ox = i-size/2;
float oy = j-size/2;
// vector from center of input
float ix = scale*ox;
float iy = scale*oy;
// converted back to input coordinates
int x = int(ix+c.x+0.5);
int y = int(iy+c.y+0.5);
if(x<0 || x>=in.dim(0)) continue;
if(y<0 || y>=in.dim(1)) continue;
out(i,j) = in(x,y);
}
}
}
void center(floatarray &out,bytearray &bin,int size=32,int background=0,int pad=5) {
rectangle b = bbox(bin);
point c = centroid(bin);
int rx = max(c.x-b.x0,b.x1-c.x+1);
int ry = max(c.y-b.y0,b.y1-c.y+1);
int r = max(rx,ry);
float scale = float(r)/(size/2);
scale = max(scale,1.0);
floatarray in;
in.copy(bin);
pad_by(in,pad,pad);
// gauss2d(in,scale,scale);
out.resize(size,size);
out.fill(background);
for(int i=0;i<out.dim(0);i++) {
for(int j=0;j<out.dim(1);j++) {
// vector from center of output
float ox = i-size/2;
float oy = j-size/2;
// vector from center of input
float ix = scale*ox;
float iy = scale*oy;
// converted back to input coordinates
float x = ix+c.x+0.5;
float y = iy+c.y+0.5;
out(i,j) = bilin(in,x+pad,y+pad);
}
}
}
void debug_baseline(bytearray &baseline,float slope,float intercept,float xheight,bytearray &image) {
baseline.copy(image);
int h = baseline.dim(1);
for(int x=0;x<baseline.dim(0);x++) {
int y = fclamp(x*slope+intercept,0,baseline.dim(1)-1);
baseline(x,y) = 128;
if(y+1<h) baseline(x,y+1) = 128;
if(y+xheight<h) baseline(x,int(y+xheight)) = 128;
if(y+xheight+1<h) baseline(x,int(y+xheight)+1) = 128;
}
}
void show_baseline(float slope,float intercept,float xheight,bytearray &image,const char *where="") {
bytearray baseline;
debug_baseline(baseline,slope,intercept,xheight,image);
dshow(baseline,where);
}
void segmentation_as_bitmap(bytearray &image,intarray &cseg) {
image.makelike(cseg);
for(int i=0;i<image.length1d();i++) {
int value = cseg.at1d(i);
if(value==0||value<0xffffff) image.at1d(i) = 255;
}
}
void dump_fst(const char *file,IGenericFst &fst) {
stdio stream(file,"w");
intarray ids,targets,outputs;
floatarray costs;
intarray index(fst.nStates());
floatarray mincost(fst.nStates());
fprintf(stream,"digraph fst {\n");
fprintf(stream,"rankdir = LR;\n");
for(int i=0;i<fst.nStates();i++) {
fst.arcs(ids,targets,outputs,costs,i);
index.fill(-1);
mincost.fill(INFINITY);
for(int j=0;j<targets.length();j++) {
int target = targets[j];
if(mincost[target]>costs[j]) {
index[target] = j;
mincost[target] = costs[j];
}
}
for(int j=0;j<index.length();j++) {
char buf[100]; // costs[k]
int k = index[j];
if(k<0) continue;
sprintf(buf,"%d",int(costs[k]));
// for(int r=0;r<log(costs[k])/log(10);r++) strcat(buf,"*");
if(outputs[k]<32||outputs[k]>=127) {
fprintf(stream," %d -> %d [label=\"%d %s\"];\n",
i,targets[k],outputs[k],buf);
} else if(outputs[k]==32) {
fprintf(stream," %d -> %d [label=\"_ %s\"];\n",
i,targets[k],buf);
} else {
fprintf(stream," %d -> %d [label=\"%c %s\"];\n",
i,targets[k],outputs[k],buf);
}
}
}
fprintf(stream,"}\n");
}
void fst_to_image(intarray &image,IGenericFst &fst) {
dump_fst("/tmp/_temp_.fst",fst);
system("dot -Tpng /tmp/_temp_.fst > /tmp/_temp_.png");
system("convert -geometry '50%x50%' -depth 8 /tmp/_temp_.png /tmp/_temp2_.png");
read_image_packed(image,"/tmp/_temp2_.png");
}
void print_fst_simple(IGenericFst &fst) {
printf("void construct_fst(IGenericFst &fst) {\n");
printf(" int nstates = %d;\n",fst.nStates());
printf(" for(int i=0;i<nstates;i++) ASSERT(i==fst.newState();)\n");
printf(" fst.setStart(%d);\n",fst.getStart());
for(int i=0;i<fst.nStates();i++) {
intarray ids,targets,outputs;
floatarray costs;
if(fst.getAcceptCost(i)<1e30)
printf(" fst.setAccept(%d,%g);\n",
i,fst.getAcceptCost(i));
fst.arcs(ids,targets,outputs,costs,i);
for(int j=0;j<outputs.length();j++)
printf(" fst.addTransition(%d,%d,%d/*%c*/,%g,%d);\n",
i,targets(j),
outputs(j),(outputs(j)<32?'?':outputs(j)),
costs(j),
outputs(j)); // FIXME
}
printf("}\n");
}
struct DebugFst : IGenericFst {
int state;
DebugFst() { state = 0; }
const char *description() { return "DebugFst"; }
int special(const char *) { throw "unimplemented"; }
void load(const char *) { throw "unimplemented"; }
void save(const char *) { throw "unimplemented"; }
void clear() {}
int newState() { return state++; }
void addTransition(int from,int to,int output,float cost,int input) {
fprintf(stderr,"%4d -> %4d [%d]'%c' %g [%d]'%c'\n",
from,to,
output,(output<32?'?':output),
cost,
input,(input<32?'?':input));
}
void setStart(int node) {
fprintf(stderr,"START %d\n",node);
}
void setAccept(int node,float cost) {
fprintf(stderr,"ACCEPT %d %g\n",node,cost);
}
void bestpath(ustrg &result) {
result.clear();
result.utf8Decode("debug-fst-string",16);
}
};
}
extern void init_classutils();
#endif
|
ligra.h | // This code is part of the project "Ligra: A Lightweight Graph Processing
// Framework for Shared Memory", presented at Principles and Practice of
// Parallel Programming, 2013.
// Copyright (c) 2013 Julian Shun and Guy Blelloch
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights (to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#ifndef LIGRA_H
#define LIGRA_H
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <cstring>
#include <string>
#include <algorithm>
#include <cassert>
#include "parallel.h"
#include "gettime.h"
#include "timer.h" //timer from GAP
#include "utils.h"
#include "vertex.h"
#include "compressedVertex.h"
#include "vertexSubset.h"
#include "graph.h"
#include "IO.h"
#include "parseCommandLine.h"
#include "gettime.h"
#include "index_map.h"
#include "edgeMap_utils.h"
using namespace std;
//*****START FRAMEWORK*****
typedef uint32_t flags;
const flags no_output = 1;
const flags pack_edges = 2;
const flags sparse_no_filter = 4;
const flags dense_forward = 8;
const flags dense_parallel = 16;
const flags remove_duplicates = 32;
inline bool should_output(const flags& fl) { return !(fl & no_output); }
const int dynChunkSz = 64; //chunk size for openmp's dynamic scheduling
template <class data, class vertex, class VS, class F>
vertexSubsetData<data> edgeMapDense(graph<vertex> GA, VS& vertexSubset, F &f, const flags fl) {
using D = tuple<bool, data>;
long n = GA.n;
vertex *G = GA.V;
if (should_output(fl)) {
D* next = newA(D, n);
auto g = get_emdense_gen<data>(next);
#pragma omp parallel for schedule (dynamic, dynChunkSz)
for (long v=0; v<n; v++) {
std::get<0>(next[v]) = 0;
if (f.cond(v)) {
G[v].decodeInNghBreakEarly(v, vertexSubset, f, g, fl & dense_parallel);
}
}
return vertexSubsetData<data>(n, next);
} else {
auto g = get_emdense_nooutput_gen<data>();
#pragma omp parallel for schedule (dynamic, dynChunkSz)
for (long v=0; v<n; v++) {
if (f.cond(v)) {
G[v].decodeInNghBreakEarly(v, vertexSubset, f, g, fl & dense_parallel);
}
}
return vertexSubsetData<data>(n);
}
}
template <class data, class vertex, class VS, class F>
vertexSubsetData<data> edgeMapDenseForward(graph<vertex> GA, VS& vertexSubset, F &f, const flags fl) {
using D = tuple<bool, data>;
long n = GA.n;
vertex *G = GA.V;
if (should_output(fl)) {
D* next = newA(D, n);
auto g = get_emdense_forward_gen<data>(next);
parallel_for(long i=0;i<n;i++) { std::get<0>(next[i]) = 0; }
#pragma omp parallel for schedule (dynamic, dynChunkSz)
for (long i=0; i<n; i++) {
if (vertexSubset.isIn(i)) {
G[i].decodeOutNgh(i, f, g);
}
}
return vertexSubsetData<data>(n, next);
} else {
auto g = get_emdense_forward_nooutput_gen<data>();
#pragma omp parallel for schedule (dynamic, dynChunkSz)
for (long i=0; i<n; i++) {
if (vertexSubset.isIn(i)) {
G[i].decodeOutNgh(i, f, g);
}
}
return vertexSubsetData<data>(n);
}
}
template <class data, class vertex, class VS, class F>
vertexSubsetData<data> edgeMapSparse(graph<vertex>& GA, vertex* frontierVertices, VS& indices,
uintT* degrees, uintT m, F &f, const flags fl) {
using S = tuple<uintE, data>;
long n = indices.n;
S* outEdges;
long outEdgeCount = 0;
if (should_output(fl)) {
uintT* offsets = degrees;
outEdgeCount = sequence::plusScan(offsets, offsets, m);
outEdges = newA(S, outEdgeCount);
auto g = get_emsparse_gen<data>(outEdges);
#pragma omp parallel for schedule (dynamic, dynChunkSz)
for (size_t i = 0; i < m; i++) {
uintT v = indices.vtx(i), o = offsets[i];
vertex vert = frontierVertices[i];
vert.decodeOutNghSparse(v, o, f, g);
}
} else {
auto g = get_emsparse_nooutput_gen<data>();
#pragma omp parallel for schedule (dynamic, dynChunkSz)
for (size_t i = 0; i < m; i++) {
uintT v = indices.vtx(i);
vertex vert = frontierVertices[i];
vert.decodeOutNghSparse(v, 0, f, g);
}
}
if (should_output(fl)) {
S* nextIndices = newA(S, outEdgeCount);
if (fl & remove_duplicates) {
if (GA.flags == NULL) {
GA.flags = newA(uintE, n);
parallel_for(long i=0;i<n;i++) { GA.flags[i]=UINT_E_MAX; }
}
auto get_key = [&] (size_t i) -> uintE& { return std::get<0>(outEdges[i]); };
remDuplicates(get_key, GA.flags, outEdgeCount, n);
}
auto p = [] (tuple<uintE, data>& v) { return std::get<0>(v) != UINT_E_MAX; };
size_t nextM = pbbs::filterf(outEdges, nextIndices, outEdgeCount, p);
free(outEdges);
return vertexSubsetData<data>(n, nextM, nextIndices);
} else {
return vertexSubsetData<data>(n);
}
}
template <class data, class vertex, class VS, class F>
vertexSubsetData<data> edgeMapSparse_no_filter(graph<vertex>& GA,
vertex* frontierVertices, VS& indices, uintT* offsets, uintT m, F& f,
const flags fl) {
using S = tuple<uintE, data>;
long n = indices.n;
long outEdgeCount = sequence::plusScan(offsets, offsets, m);
S* outEdges = newA(S, outEdgeCount);
auto g = get_emsparse_no_filter_gen<data>(outEdges);
// binary-search into scan to map workers->chunks
size_t b_size = 10000;
size_t n_blocks = nblocks(outEdgeCount, b_size);
uintE* cts = newA(uintE, n_blocks+1);
size_t* block_offs = newA(size_t, n_blocks+1);
auto offsets_m = make_in_imap<uintT>(m, [&] (size_t i) { return offsets[i]; });
auto lt = [] (const uintT& l, const uintT& r) { return l < r; };
parallel_for(size_t i=0; i<n_blocks; i++) {
size_t s_val = i*b_size;
block_offs[i] = pbbs::binary_search(offsets_m, s_val, lt);
}
block_offs[n_blocks] = m;
#pragma omp parallel for schedule (dynamic, dynChunkSz / 8)
for (size_t i=0; i<n_blocks; i++) {
if ((i == n_blocks-1) || block_offs[i] != block_offs[i+1]) {
// start and end are offsets in [m]
size_t start = block_offs[i];
size_t end = block_offs[i+1];
uintT start_o = offsets[start];
uintT k = start_o;
for (size_t j=start; j<end; j++) {
uintE v = indices.vtx(j);
size_t num_in = frontierVertices[j].decodeOutNghSparseSeq(v, k, f, g);
k += num_in;
}
cts[i] = (k - start_o);
} else {
cts[i] = 0;
}
}
long outSize = sequence::plusScan(cts, cts, n_blocks);
cts[n_blocks] = outSize;
S* out = newA(S, outSize);
parallel_for (size_t i=0; i<n_blocks; i++) {
if ((i == n_blocks-1) || block_offs[i] != block_offs[i+1]) {
size_t start = block_offs[i];
size_t start_o = offsets[start];
size_t out_off = cts[i];
size_t block_size = cts[i+1] - out_off;
for (size_t j=0; j<block_size; j++) {
out[out_off + j] = outEdges[start_o + j];
}
}
}
free(outEdges); free(cts); free(block_offs);
if (fl & remove_duplicates) {
if (GA.flags == NULL) {
GA.flags = newA(uintE, n);
parallel_for(size_t i=0;i<n;i++) { GA.flags[i]=UINT_E_MAX; }
}
auto get_key = [&] (size_t i) -> uintE& { return std::get<0>(out[i]); };
remDuplicates(get_key, GA.flags, outSize, n);
S* nextIndices = newA(S, outSize);
auto p = [] (tuple<uintE, data>& v) { return std::get<0>(v) != UINT_E_MAX; };
size_t nextM = pbbs::filterf(out, nextIndices, outSize, p);
free(out);
return vertexSubsetData<data>(n, nextM, nextIndices);
}
return vertexSubsetData<data>(n, outSize, out);
}
// Decides on sparse or dense base on number of nonzeros in the active vertices.
template <class data, class vertex, class VS, class F>
vertexSubsetData<data> edgeMapData(graph<vertex>& GA, VS &vs, F f,
intT threshold = -1, const flags& fl=0) {
long numVertices = GA.n, numEdges = GA.m, m = vs.numNonzeros();
if(threshold == -1) threshold = numEdges/20; //default threshold
vertex *G = GA.V;
if (numVertices != vs.numRows()) {
cout << "edgeMap: Sizes Don't match" << endl;
abort();
}
if (vs.size() == 0) return vertexSubsetData<data>(numVertices);
vs.toSparse();
uintT* degrees = newA(uintT, m);
vertex* frontierVertices = newA(vertex,m);
{parallel_for (size_t i=0; i < m; i++) {
uintE v_id = vs.vtx(i);
vertex v = G[v_id];
degrees[i] = v.getOutDegree();
frontierVertices[i] = v;
}}
uintT outDegrees = sequence::plusReduce(degrees, m);
if (outDegrees == 0) return vertexSubsetData<data>(numVertices);
if (m + outDegrees > threshold) {
vs.toDense();
free(degrees); free(frontierVertices);
return (fl & dense_forward) ?
edgeMapDenseForward<data, vertex, VS, F>(GA, vs, f, fl) :
edgeMapDense<data, vertex, VS, F>(GA, vs, f, fl);
} else {
auto vs_out =
(should_output(fl) && fl & sparse_no_filter) ? // only call snof when we output
edgeMapSparse_no_filter<data, vertex, VS, F>(GA, frontierVertices, vs, degrees, vs.numNonzeros(), f, fl) :
edgeMapSparse<data, vertex, VS, F>(GA, frontierVertices, vs, degrees, vs.numNonzeros(), f, fl);
free(degrees); free(frontierVertices);
return vs_out;
}
}
// Regular edgeMap, where no extra data is stored per vertex.
template <class vertex, class VS, class F>
vertexSubset edgeMap(graph<vertex> GA, VS& vs, F f,
intT threshold = -1, const flags& fl=0) {
return edgeMapData<pbbs::empty>(GA, vs, f, threshold, fl);
}
/* General function to print stats about frontier size */
template <class VS>
void frontierStats(VS& vs, long numVertices) {
double percent = (static_cast<double>(vs.size()) / static_cast<double>(numVertices)) * 100;
if (vs.dense()) {
std::cout << "PULL iteration. Frontier size = " << percent << std::endl;
}
else {
std::cout << "PUSH iteration. Frontier size = " << percent << std::endl;
}
return;
}
// Packs out the adjacency lists of all vertex in vs. A neighbor, ngh, is kept
// in the new adjacency list if p(ngh) is true.
// Weighted graphs are not yet supported, but this should be easy to do.
template <class vertex, class P>
vertexSubsetData<uintE> packEdges(graph<vertex>& GA, vertexSubset& vs, P& p, const flags& fl=0) {
using S = tuple<uintE, uintE>;
vs.toSparse();
vertex* G = GA.V; long m = vs.numNonzeros(); long n = vs.numRows();
if (vs.size() == 0) {
return vertexSubsetData<uintE>(n);
}
auto degrees = array_imap<uintT>(m);
granular_for(i, 0, m, (m > 2000), {
uintE v = vs.vtx(i);
degrees[i] = G[v].getOutDegree();
});
long outEdgeCount = pbbs::scan_add(degrees, degrees);
S* outV;
if (should_output(fl)) {
outV = newA(S, vs.size());
}
bool* bits = newA(bool, outEdgeCount);
uintE* tmp1 = newA(uintE, outEdgeCount);
uintE* tmp2 = newA(uintE, outEdgeCount);
if (should_output(fl)) {
parallel_for (size_t i=0; i<m; i++) {
uintE v = vs.vtx(i);
size_t offset = degrees[i];
auto bitsOff = &(bits[offset]); auto tmp1Off = &(tmp1[offset]);
auto tmp2Off = &(tmp2[offset]);
size_t ct = G[v].packOutNgh(v, p, bitsOff, tmp1Off, tmp2Off);
outV[i] = make_tuple(v, ct);
}
} else {
parallel_for (size_t i=0; i<m; i++) {
uintE v = vs.vtx(i);
size_t offset = degrees[i];
auto bitsOff = &(bits[offset]); auto tmp1Off = &(tmp1[offset]);
auto tmp2Off = &(tmp2[offset]);
size_t ct = G[v].packOutNgh(v, p, bitsOff, tmp1Off, tmp2Off);
}
}
free(bits); free(tmp1); free(tmp2);
if (should_output(fl)) {
return vertexSubsetData<uintE>(n, m, outV);
} else {
return vertexSubsetData<uintE>(n);
}
}
template <class vertex, class P>
vertexSubsetData<uintE> edgeMapFilter(graph<vertex>& GA, vertexSubset& vs, P& p, const flags& fl=0) {
vs.toSparse();
if (fl & pack_edges) {
return packEdges<vertex, P>(GA, vs, p, fl);
}
vertex* G = GA.V; long m = vs.numNonzeros(); long n = vs.numRows();
using S = tuple<uintE, uintE>;
if (vs.size() == 0) {
return vertexSubsetData<uintE>(n);
}
S* outV;
if (should_output(fl)) {
outV = newA(S, vs.size());
}
if (should_output(fl)) {
parallel_for (size_t i=0; i<m; i++) {
uintE v = vs.vtx(i);
size_t ct = G[v].countOutNgh(v, p);
outV[i] = make_tuple(v, ct);
}
} else {
parallel_for (size_t i=0; i<m; i++) {
uintE v = vs.vtx(i);
size_t ct = G[v].countOutNgh(v, p);
}
}
if (should_output(fl)) {
return vertexSubsetData<uintE>(n, m, outV);
} else {
return vertexSubsetData<uintE>(n);
}
}
//*****VERTEX FUNCTIONS*****
template <class F, class VS, typename std::enable_if<
!std::is_same<VS, vertexSubset>::value, int>::type=0 >
void vertexMap(VS& V, F f) {
size_t n = V.numRows(), m = V.numNonzeros();
if(V.dense()) {
parallel_for(long i=0;i<n;i++) {
if(V.isIn(i)) {
f(i, V.ithData(i));
}
}
} else {
parallel_for(long i=0;i<m;i++) {
f(V.vtx(i), V.vtxData(i));
}
}
}
template <class VS, class F, typename std::enable_if<
std::is_same<VS, vertexSubset>::value, int>::type=0 >
void vertexMap(VS& V, F f) {
size_t n = V.numRows(), m = V.numNonzeros();
if(V.dense()) {
parallel_for(long i=0;i<n;i++) {
if(V.isIn(i)) {
f(i);
}
}
} else {
parallel_for(long i=0;i<m;i++) {
f(V.vtx(i));
}
}
}
//Note: this is the version of vertexMap in which only a subset of the
//input vertexSubset is returned
template <class F>
vertexSubset vertexFilter(vertexSubset V, F filter) {
long n = V.numRows(), m = V.numNonzeros();
V.toDense();
bool* d_out = newA(bool,n);
{parallel_for(long i=0;i<n;i++) d_out[i] = 0;}
{parallel_for(long i=0;i<n;i++)
if(V.d[i]) d_out[i] = filter(i);}
return vertexSubset(n,d_out);
}
template <class F>
vertexSubset vertexFilter2(vertexSubset V, F filter) {
long n = V.numRows(), m = V.numNonzeros();
if (m == 0) {
return vertexSubset(n);
}
bool* bits = newA(bool, m);
V.toSparse();
{parallel_for(size_t i=0; i<m; i++) {
uintE v = V.vtx(i);
bits[i] = filter(v);
}}
auto v_imap = make_in_imap<uintE>(m, [&] (size_t i) { return V.vtx(i); });
auto bits_m = make_in_imap<bool>(m, [&] (size_t i) { return bits[i]; });
auto out = pbbs::pack(v_imap, bits_m);
out.alloc = false;
free(bits);
return vertexSubset(n, out.size(), out.s);
}
template <class data, class F>
vertexSubset vertexFilter2(vertexSubsetData<data> V, F filter) {
long n = V.numRows(), m = V.numNonzeros();
if (m == 0) {
return vertexSubset(n);
}
bool* bits = newA(bool, m);
V.toSparse();
parallel_for(size_t i=0; i<m; i++) {
auto t = V.vtxAndData(i);
bits[i] = filter(std::get<0>(t), std::get<1>(t));
}
auto v_imap = make_in_imap<uintE>(m, [&] (size_t i) { return V.vtx(i); });
auto bits_m = make_in_imap<bool>(m, [&] (size_t i) { return bits[i]; });
auto out = pbbs::pack(v_imap, bits_m);
out.alloc = false;
free(bits);
return vertexSubset(n, out.size(), out.s);
}
//cond function that always returns true
inline bool cond_true (intT d) { return 1; }
template<class vertex>
void Compute(graph<vertex>&, commandLine, pvector<uintE> &new_ids);
int parallel_main(int argc, char* argv[]) {
commandLine P(argc,argv," [-s] <inFile>");
char* iFile = P.getArgument(0);
bool symmetric = P.getOptionValue("-s");
bool compressed = P.getOptionValue("-c");
bool binary = P.getOptionValue("-b");
bool mmap = P.getOptionValue("-m");
//cout << "mmap = " << mmap << endl;
bool isPageRank = (P.getOptionIntValue("-pagerank", -1) == 1);
/* preprocessing options : 0 - outdegsort, 1 - indegsort, anything else - no-preprocessing */
int preprocess = P.getOptionIntValue("-preprocess", -1);
long rounds = P.getOptionLongValue("-rounds",3);
if (compressed) {
assert(false); //preprocessing currently only supported for uncompressed graphs
#if 0
if (symmetric) {
graph<compressedSymmetricVertex> G =
readCompressedGraph<compressedSymmetricVertex>(iFile,symmetric,mmap); //symmetric graph
Compute(G,P);
for(int r=0;r<rounds;r++) {
startTime();
Compute(G,P);
nextTime("Running time");
}
G.del();
} else {
graph<compressedAsymmetricVertex> G =
readCompressedGraph<compressedAsymmetricVertex>(iFile,symmetric,mmap); //asymmetric graph
Compute(G,P);
if(G.transposed) G.transpose();
for(int r=0;r<rounds;r++) {
startTime();
Compute(G,P);
nextTime("Running time");
if(G.transposed) G.transpose();
}
G.del();
}
#endif
} else {
if (symmetric) {
graph<symmetricVertex> G =
readGraph<symmetricVertex>(iFile,compressed,symmetric,binary,mmap); //symmetric graph
pvector<uintE> new_ids(G.n, 0);
if (preprocess == 0 || preprocess == 1) {
if (computePackingFactor<symmetricVertex>(G, symmetric, (preprocess == 0), sizeof(double)) == true) {
/* This is a high packing factor graph - worth reordering the graph*/
graph<symmetricVertex> newG = preprocessGraph<symmetricVertex>(G, symmetric, (preprocess == 0), new_ids);
G.del();
Compute(newG,P,new_ids);
for(int r=0;r<rounds;r++) {
//startTime();
Compute(newG,P,new_ids);
//nextTime("Running time");
}
newG.del();
}
else {
/* This is a low packing factor graph - the limited benefits are unlikely to justify reordering costs*/
Compute(G,P,new_ids);
for(int r=0;r<rounds;r++) {
//startTime();
Compute(G,P,new_ids);
//nextTime("Running time");
}
G.del();
}
}
else {
Compute(G,P,new_ids);
for(int r=0;r<rounds;r++) {
//startTime();
Compute(G,P,new_ids);
//nextTime("Running time");
}
G.del();
}
} else {
graph<asymmetricVertex> G =
readGraph<asymmetricVertex>(iFile,compressed,symmetric,binary,mmap); //asymmetric graph
pvector<uintE> new_ids(G.n, 0);
if (preprocess == 0 || preprocess == 1) {
if (computePackingFactor<asymmetricVertex>(G, symmetric, (preprocess == 0), sizeof(double)) == true) {
/* This is a high packing factor graph - worth reordering the graph*/
graph<asymmetricVertex> newG = preprocessGraph<asymmetricVertex>(G, symmetric, (preprocess == 0), new_ids, isPageRank);
G.del();
Compute(newG,P,new_ids);
if(newG.transposed) newG.transpose();
for(int r=0;r<rounds;r++) {
//startTime();
Compute(newG,P,new_ids);
if(newG.transposed) newG.transpose();
//nextTime("Running time");
}
newG.del();
}
else {
/* This is a low packing factor graph - the limited benefits are unlikely to justify reordering costs*/
Compute(G,P,new_ids);
if(G.transposed) G.transpose();
for(int r=0;r<rounds;r++) {
//startTime();
Compute(G,P,new_ids);
if(G.transposed) G.transpose();
//nextTime("Running time");
}
G.del();
}
}
else {
Compute(G,P,new_ids);
if(G.transposed) G.transpose();
for(int r=0;r<rounds;r++) {
//startTime();
Compute(G,P,new_ids);
if(G.transposed) G.transpose();
//nextTime("Running time");
}
G.del();
}
}
}
}
#endif
|
data.h | /*!
* Copyright (c) 2015 by Contributors
* \file data.h
* \brief The input data structure of xgboost.
* \author Tianqi Chen
*/
#ifndef XGBOOST_DATA_H_
#define XGBOOST_DATA_H_
#include <dmlc/base.h>
#include <dmlc/data.h>
#include <dmlc/serializer.h>
#include <xgboost/base.h>
#include <xgboost/span.h>
#include <xgboost/host_device_vector.h>
#include <memory>
#include <numeric>
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
namespace xgboost {
// forward declare dmatrix.
class DMatrix;
/*! \brief data type accepted by xgboost interface */
enum class DataType : uint8_t {
kFloat32 = 1,
kDouble = 2,
kUInt32 = 3,
kUInt64 = 4,
kStr = 5
};
enum class FeatureType : uint8_t {
kNumerical,
kCategorical
};
/*!
* \brief Meta information about dataset, always sit in memory.
*/
class MetaInfo {
public:
/*! \brief number of data fields in MetaInfo */
static constexpr uint64_t kNumField = 11;
/*! \brief number of rows in the data */
uint64_t num_row_{0}; // NOLINT
/*! \brief number of columns in the data */
uint64_t num_col_{0}; // NOLINT
/*! \brief number of nonzero entries in the data */
uint64_t num_nonzero_{0}; // NOLINT
/*! \brief label of each instance */
HostDeviceVector<bst_float> labels_; // NOLINT
/*!
* \brief the index of begin and end of a group
* needed when the learning task is ranking.
*/
std::vector<bst_group_t> group_ptr_; // NOLINT
/*! \brief weights of each instance, optional */
HostDeviceVector<bst_float> weights_; // NOLINT
/*!
* \brief initialized margins,
* if specified, xgboost will start from this init margin
* can be used to specify initial prediction to boost from.
*/
HostDeviceVector<bst_float> base_margin_; // NOLINT
/*!
* \brief lower bound of the label, to be used for survival analysis (censored regression)
*/
HostDeviceVector<bst_float> labels_lower_bound_; // NOLINT
/*!
* \brief upper bound of the label, to be used for survival analysis (censored regression)
*/
HostDeviceVector<bst_float> labels_upper_bound_; // NOLINT
/*!
* \brief Name of type for each feature provided by users. Eg. "int"/"float"/"i"/"q"
*/
std::vector<std::string> feature_type_names;
/*!
* \brief Name for each feature.
*/
std::vector<std::string> feature_names;
/*
* \brief Type of each feature. Automatically set when feature_type_names is specifed.
*/
HostDeviceVector<FeatureType> feature_types;
/*
* \brief Weight of each feature, used to define the probability of each feature being
* selected when using column sampling.
*/
HostDeviceVector<float> feature_weigths;
/*! \brief default constructor */
MetaInfo() = default;
MetaInfo(MetaInfo&& that) = default;
MetaInfo& operator=(MetaInfo&& that) = default;
MetaInfo& operator=(MetaInfo const& that) = delete;
/*!
* \brief Validate all metainfo.
*/
void Validate(int32_t device) const;
MetaInfo Slice(common::Span<int32_t const> ridxs) const;
/*!
* \brief Get weight of each instances.
* \param i Instance index.
* \return The weight.
*/
inline bst_float GetWeight(size_t i) const {
return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f;
}
/*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */
inline const std::vector<size_t>& LabelAbsSort() const {
if (label_order_cache_.size() == labels_.Size()) {
return label_order_cache_;
}
label_order_cache_.resize(labels_.Size());
std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0);
const auto& l = labels_.HostVector();
XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(),
[&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);});
return label_order_cache_;
}
/*! \brief clear all the information */
void Clear();
/*!
* \brief Load the Meta info from binary stream.
* \param fi The input stream
*/
void LoadBinary(dmlc::Stream* fi);
/*!
* \brief Save the Meta info to binary stream
* \param fo The output stream.
*/
void SaveBinary(dmlc::Stream* fo) const;
/*!
* \brief Set information in the meta info.
* \param key The key of the information.
* \param dptr The data pointer of the source array.
* \param dtype The type of the source data.
* \param num Number of elements in the source array.
*/
void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num);
/*!
* \brief Set information in the meta info with array interface.
* \param key The key of the information.
* \param interface_str String representation of json format array interface.
*
* [ column_0, column_1, ... column_n ]
*
* Right now only 1 column is permitted.
*/
void SetInfo(const char* key, std::string const& interface_str);
void GetInfo(char const* key, bst_ulong* out_len, DataType dtype,
const void** out_dptr) const;
void SetFeatureInfo(const char *key, const char **info, const bst_ulong size);
void GetFeatureInfo(const char *field, std::vector<std::string>* out_str_vecs) const;
/*
* \brief Extend with other MetaInfo.
*
* \param that The other MetaInfo object.
*
* \param accumulate_rows Whether rows need to be accumulated in this function. If
* client code knows number of rows in advance, set this parameter to false.
*/
void Extend(MetaInfo const& that, bool accumulate_rows);
private:
/*! \brief argsort of labels */
mutable std::vector<size_t> label_order_cache_;
};
/*! \brief Element from a sparse vector */
struct Entry {
/*! \brief feature index */
bst_feature_t index;
/*! \brief feature value */
bst_float fvalue;
/*! \brief default constructor */
Entry() = default;
/*!
* \brief constructor with index and value
* \param index The feature or row index.
* \param fvalue The feature value.
*/
XGBOOST_DEVICE Entry(bst_feature_t index, bst_float fvalue) : index(index), fvalue(fvalue) {}
/*! \brief reversely compare feature values */
inline static bool CmpValue(const Entry& a, const Entry& b) {
return a.fvalue < b.fvalue;
}
inline bool operator==(const Entry& other) const {
return (this->index == other.index && this->fvalue == other.fvalue);
}
};
/*!
* \brief Parameters for constructing batches.
*/
struct BatchParam {
/*! \brief The GPU device to use. */
int gpu_id;
/*! \brief Maximum number of bins per feature for histograms. */
int max_bin{0};
/*! \brief Page size for external memory mode. */
size_t gpu_page_size;
BatchParam() = default;
BatchParam(int32_t device, int32_t max_bin, size_t gpu_page_size = 0)
: gpu_id{device}, max_bin{max_bin}, gpu_page_size{gpu_page_size} {}
inline bool operator!=(const BatchParam& other) const {
return gpu_id != other.gpu_id || max_bin != other.max_bin ||
gpu_page_size != other.gpu_page_size;
}
};
struct HostSparsePageView {
using Inst = common::Span<Entry const>;
common::Span<bst_row_t const> offset;
common::Span<Entry const> data;
Inst operator[](size_t i) const {
auto size = *(offset.data() + i + 1) - *(offset.data() + i);
return {data.data() + *(offset.data() + i),
static_cast<Inst::index_type>(size)};
}
size_t Size() const { return offset.size() == 0 ? 0 : offset.size() - 1; }
};
/*!
* \brief In-memory storage unit of sparse batch, stored in CSR format.
*/
class SparsePage {
public:
// Offset for each row.
HostDeviceVector<bst_row_t> offset;
/*! \brief the data of the segments */
HostDeviceVector<Entry> data;
size_t base_rowid {0};
/*! \brief an instance of sparse vector in the batch */
using Inst = common::Span<Entry const>;
/*! \brief get i-th row from the batch */
inline Inst operator[](size_t i) const {
const auto& data_vec = data.HostVector();
const auto& offset_vec = offset.HostVector();
size_t size = offset_vec[i + 1] - offset_vec[i];
return {data_vec.data() + offset_vec[i],
static_cast<Inst::index_type>(size)};
}
HostSparsePageView GetView() const {
return {offset.ConstHostSpan(), data.ConstHostSpan()};
}
/*! \brief constructor */
SparsePage() {
this->Clear();
}
/*! \return Number of instances in the page. */
inline size_t Size() const {
return offset.Size() == 0 ? 0 : offset.Size() - 1;
}
/*! \return estimation of memory cost of this page */
inline size_t MemCostBytes() const {
return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry);
}
/*! \brief clear the page */
inline void Clear() {
base_rowid = 0;
auto& offset_vec = offset.HostVector();
offset_vec.clear();
offset_vec.push_back(0);
data.HostVector().clear();
}
/*! \brief Set the base row id for this page. */
inline void SetBaseRowId(size_t row_id) {
base_rowid = row_id;
}
SparsePage GetTranspose(int num_columns) const;
void SortRows() {
auto ncol = static_cast<bst_omp_uint>(this->Size());
#pragma omp parallel for default(none) shared(ncol) schedule(dynamic, 1)
for (bst_omp_uint i = 0; i < ncol; ++i) {
if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) {
std::sort(
this->data.HostVector().begin() + this->offset.HostVector()[i],
this->data.HostVector().begin() + this->offset.HostVector()[i + 1],
Entry::CmpValue);
}
}
}
/**
* \brief Pushes external data batch onto this page
*
* \tparam AdapterBatchT
* \param batch
* \param missing
* \param nthread
*
* \return The maximum number of columns encountered in this input batch. Useful when pushing many adapter batches to work out the total number of columns.
*/
template <typename AdapterBatchT>
uint64_t Push(const AdapterBatchT& batch, float missing, int nthread);
/*!
* \brief Push a sparse page
* \param batch the row page
*/
void Push(const SparsePage &batch);
/*!
* \brief Push a SparsePage stored in CSC format
* \param batch The row batch to be pushed
*/
void PushCSC(const SparsePage& batch);
};
class CSCPage: public SparsePage {
public:
CSCPage() : SparsePage() {}
explicit CSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
class SortedCSCPage : public SparsePage {
public:
SortedCSCPage() : SparsePage() {}
explicit SortedCSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
class EllpackPageImpl;
/*!
* \brief A page stored in ELLPACK format.
*
* This class uses the PImpl idiom (https://en.cppreference.com/w/cpp/language/pimpl) to avoid
* including CUDA-specific implementation details in the header.
*/
class EllpackPage {
public:
/*!
* \brief Default constructor.
*
* This is used in the external memory case. An empty ELLPACK page is constructed with its content
* set later by the reader.
*/
EllpackPage();
/*!
* \brief Constructor from an existing DMatrix.
*
* This is used in the in-memory case. The ELLPACK page is constructed from an existing DMatrix
* in CSR format.
*/
explicit EllpackPage(DMatrix* dmat, const BatchParam& param);
/*! \brief Destructor. */
~EllpackPage();
EllpackPage(EllpackPage&& that);
/*! \return Number of instances in the page. */
size_t Size() const;
/*! \brief Set the base row id for this page. */
void SetBaseRowId(size_t row_id);
const EllpackPageImpl* Impl() const { return impl_.get(); }
EllpackPageImpl* Impl() { return impl_.get(); }
private:
std::unique_ptr<EllpackPageImpl> impl_;
};
template<typename T>
class BatchIteratorImpl {
public:
virtual ~BatchIteratorImpl() = default;
virtual T& operator*() = 0;
virtual const T& operator*() const = 0;
virtual void operator++() = 0;
virtual bool AtEnd() const = 0;
};
template<typename T>
class BatchIterator {
public:
using iterator_category = std::forward_iterator_tag; // NOLINT
explicit BatchIterator(BatchIteratorImpl<T>* impl) { impl_.reset(impl); }
void operator++() {
CHECK(impl_ != nullptr);
++(*impl_);
}
T& operator*() {
CHECK(impl_ != nullptr);
return *(*impl_);
}
const T& operator*() const {
CHECK(impl_ != nullptr);
return *(*impl_);
}
bool operator!=(const BatchIterator&) const {
CHECK(impl_ != nullptr);
return !impl_->AtEnd();
}
bool AtEnd() const {
CHECK(impl_ != nullptr);
return impl_->AtEnd();
}
private:
std::shared_ptr<BatchIteratorImpl<T>> impl_;
};
template<typename T>
class BatchSet {
public:
explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(std::move(begin_iter)) {}
BatchIterator<T> begin() { return begin_iter_; } // NOLINT
BatchIterator<T> end() { return BatchIterator<T>(nullptr); } // NOLINT
private:
BatchIterator<T> begin_iter_;
};
struct XGBAPIThreadLocalEntry;
/*!
* \brief Internal data structured used by XGBoost during training.
*/
class DMatrix {
public:
/*! \brief default constructor */
DMatrix() = default;
/*! \brief meta information of the dataset */
virtual MetaInfo& Info() = 0;
virtual void SetInfo(const char *key, const void *dptr, DataType dtype,
size_t num) {
this->Info().SetInfo(key, dptr, dtype, num);
}
virtual void SetInfo(const char* key, std::string const& interface_str) {
this->Info().SetInfo(key, interface_str);
}
/*! \brief meta information of the dataset */
virtual const MetaInfo& Info() const = 0;
/*! \brief Get thread local memory for returning data from DMatrix. */
XGBAPIThreadLocalEntry& GetThreadLocal() const;
/**
* \brief Gets batches. Use range based for loop over BatchSet to access individual batches.
*/
template<typename T>
BatchSet<T> GetBatches(const BatchParam& param = {});
template <typename T>
bool PageExists() const;
// the following are column meta data, should be able to answer them fast.
/*! \return Whether the data columns single column block. */
virtual bool SingleColBlock() const = 0;
/*! \brief virtual destructor */
virtual ~DMatrix();
/*! \brief Whether the matrix is dense. */
bool IsDense() const {
return Info().num_nonzero_ == Info().num_row_ * Info().num_col_;
}
/*!
* \brief Load DMatrix from URI.
* \param uri The URI of input.
* \param silent Whether print information during loading.
* \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode.
* \param file_format The format type of the file, used for dmlc::Parser::Create.
* By default "auto" will be able to load in both local binary file.
* \param page_size Page size for external memory.
* \return The created DMatrix.
*/
static DMatrix* Load(const std::string& uri,
bool silent,
bool load_row_split,
const std::string& file_format = "auto",
size_t page_size = kPageSize);
/**
* \brief Creates a new DMatrix from an external data adapter.
*
* \tparam AdapterT Type of the adapter.
* \param [in,out] adapter View onto an external data.
* \param missing Values to count as missing.
* \param nthread Number of threads for construction.
* \param cache_prefix (Optional) The cache prefix for external memory.
* \param page_size (Optional) Size of the page.
*
* \return a Created DMatrix.
*/
template <typename AdapterT>
static DMatrix* Create(AdapterT* adapter, float missing, int nthread,
const std::string& cache_prefix = "",
size_t page_size = kPageSize);
/**
* \brief Create a new Quantile based DMatrix used for histogram based algorithm.
*
* \tparam DataIterHandle External iterator type, defined in C API.
* \tparam DMatrixHandle DMatrix handle, defined in C API.
* \tparam DataIterResetCallback Callback for reset, prototype defined in C API.
* \tparam XGDMatrixCallbackNext Callback for next, prototype defined in C API.
*
* \param iter External data iterator
* \param proxy A hanlde to ProxyDMatrix
* \param reset Callback for reset
* \param next Callback for next
* \param missing Value that should be treated as missing.
* \param nthread number of threads used for initialization.
* \param max_bin Maximum number of bins.
*
* \return A created quantile based DMatrix.
*/
template <typename DataIterHandle, typename DMatrixHandle,
typename DataIterResetCallback, typename XGDMatrixCallbackNext>
static DMatrix *Create(DataIterHandle iter, DMatrixHandle proxy,
DataIterResetCallback *reset,
XGDMatrixCallbackNext *next, float missing,
int nthread,
int max_bin);
virtual DMatrix *Slice(common::Span<int32_t const> ridxs) = 0;
/*! \brief Number of rows per page in external memory. Approximately 100MB per page for
* dataset with 100 features. */
static const size_t kPageSize = 32UL << 12UL;
protected:
virtual BatchSet<SparsePage> GetRowBatches() = 0;
virtual BatchSet<CSCPage> GetColumnBatches() = 0;
virtual BatchSet<SortedCSCPage> GetSortedColumnBatches() = 0;
virtual BatchSet<EllpackPage> GetEllpackBatches(const BatchParam& param) = 0;
virtual bool EllpackExists() const = 0;
virtual bool SparsePageExists() const = 0;
};
template<>
inline BatchSet<SparsePage> DMatrix::GetBatches(const BatchParam&) {
return GetRowBatches();
}
template<>
inline bool DMatrix::PageExists<EllpackPage>() const {
return this->EllpackExists();
}
template<>
inline bool DMatrix::PageExists<SparsePage>() const {
return this->SparsePageExists();
}
template<>
inline BatchSet<CSCPage> DMatrix::GetBatches(const BatchParam&) {
return GetColumnBatches();
}
template<>
inline BatchSet<SortedCSCPage> DMatrix::GetBatches(const BatchParam&) {
return GetSortedColumnBatches();
}
template<>
inline BatchSet<EllpackPage> DMatrix::GetBatches(const BatchParam& param) {
return GetEllpackBatches(param);
}
} // namespace xgboost
namespace dmlc {
DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true);
namespace serializer {
template <>
struct Handler<xgboost::Entry> {
inline static void Write(Stream* strm, const xgboost::Entry& data) {
strm->Write(data.index);
strm->Write(data.fvalue);
}
inline static bool Read(Stream* strm, xgboost::Entry* data) {
return strm->Read(&data->index) && strm->Read(&data->fvalue);
}
};
} // namespace serializer
} // namespace dmlc
#endif // XGBOOST_DATA_H_
|
ten_tusscher_2004_epi_S2_17.c | //Original Ten Tusscher
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_2004_epi_S2_17.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
//TODO: this should be called only once for the whole mesh, like in the GPU code
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5413847249581,0.00129749333274554,0.779049320216350,0.778898229736094,0.000175386577749891,0.484810494199284,0.00294593233512741,0.999998339142758,1.94217050104558e-08,1.89785399117775e-05,0.999772756079176,1.00727534190360,0.999997440785955,4.09273550037733e-05,0.410743063693995,10.9424848182514,138.731054625637};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
/// real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
//#ifdef EPI
real Gto=0.294;
//#endif
// #ifdef ENDO
// real Gto=0.073;
//#endif
//#ifdef MCELL
// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.6526831901002,0.000336603613824894,0.000142032316714142,0.000147797037794095,0.244877435259635,0.136552852378623,0.180909422982719,4.68260453463487,0.0136308755837635,1.00097696778612,1088.15434244063,0.000484016332794955,0.441709817218134,0.0199531034368028,0.00354996431590630,4.97623621373625e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
mscash2_fmt_plug.c | /* MSCASH2 patch for John the Ripper written by S3nf in 2010, 2011
* a slow but working version
*
* Cracking Domain Cached Credentials for modern Windows operating systems, supporting:
* - Windows Vista
* - Windows 7
* - Windows Server 2008
*
* This software was written by S3nf in 2010, 2011. No copyright is claimed, and the software is hereby placed in
* the public domain. In case this attempt to disclaim copyright and place the software in the public domain
* is deemed null and void, then the software is Copyright (c) 2010, 2011 S3nf and it is hereby released to the
* general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without modification, are permitted.
*
* Modified for optional utf-8 support by magnum 2011, same terms as above
*
* Code redone/optimized by JimF June 2011. (2x to 10x improvement in speed)
* - Code converted to oSSL (for non-sse builds). The inline MD4/SHA1 replaced. This reduced
* about 900 lines down to 60 or so, which were much easier to follow. This was a preliminary
* step to getting SSE2 added. Once done, this ended up faster than the original, so the new
* simplified code was kept.
* - Setup of ipad/opad only done once per PW/Salt about 10-15% speedup
* - 1/2 of the encryption performed within inner loop was moved outside of inner loop (nearly doubles speed)
* - changed signature from M$salt#hash to $DCC2$iterations#salt#hash
* - variable iterations now 'possible'. Default is 10240
* - increased salt (user name) upto 22 UC2 characters. Bug in original code only allowed up to 8 chars.
* - Added SSE2(/MMX) and SSE2i to the deep inner loop. 2x to 4x speedup.
* - total about 2x to 10x improvment in speed (depending upon CPU and compiler). Some compilers
* were more efficient with original code, and thus received less of a performance boost. Others
* got a signicant improvment.
* - The utf8 code was greatly simplified. There was no reason to try to optimized the UTF code as
* the format is so slow that utf8 conversion is a non-issue. Thus we always call the enc_to_utf16()
* at the proper locations, and let that function deal with being in --encoding=utf8 switch mode or not.
* - Fixed code to properly work with BE systems, and alignment required systems.
* - Made some 'interface' changes to the SSE2i for SHA1, and to the sha-mmx.S code, to make it work
* properly, and to make it more efficient. We deal with 2 SHA1 states, and alternate back and forth
* between them. The changes to the SSE2i code, were to optimize this dual state, and the changes
* to the .S code were simply to make it work at all and the same optimizations were placed there.
* - the OMP code was removed during initial re-write, and was properly re-incorporated by magnum.
*
* In June 2013, salt length (Username) increased from 22 to 128, and max password length increased
* from 27 to 125 bytes (unicode bytes, so 250 ?)
*
* This module is based on:
* - the MSCASH patch for john written by Alain Espinosa <alainesp at gmail.com> in 2007
* - RFC 1320 - The MD4 Message-Digest Algorithm
* - RFC 2104 - HMAC: Keyed-Hashing for Message Authentication
* - RFC 3174 - US Secure Hash Algorithm 1 (SHA1)
* - the HMAC-SHA1 implementation of the PolarSSL open source cryptographic library (http://polarssl.org/)
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_mscash2;
#elif FMT_REGISTERS_H
john_register_one(&fmt_mscash2);
#else
#include <string.h>
#include "arch.h"
#include "misc.h"
#include "memory.h"
#include "common.h"
#include "formats.h"
#include "unicode.h"
#include "options.h"
#include "unicode.h"
#include "sha.h"
#include "md4.h"
#include "simd-intrinsics.h"
#include "loader.h"
#include "mscash_common.h"
#if defined (_OPENMP)
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 8 // Tuned on Corei7 Quad-HT
#endif
#endif
#include "memdbg.h"
#define ITERATIONS 10240
static unsigned iteration_cnt = (ITERATIONS); /* this will get changed at runtime, salt loading */
#define FORMAT_LABEL "mscash2"
#define FORMAT_NAME "MS Cache Hash 2 (DCC2)"
#define MAX_SALT_LEN 128
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE (MAX_SALT_LEN*2+4)
#define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME
#ifdef SIMD_COEF_32
#define MS_NUM_KEYS (SIMD_COEF_32*SIMD_PARA_SHA1)
#if ARCH_LITTLE_ENDIAN==1
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3) )*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 )
#else
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3) )*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 )
#endif
static unsigned char (*sse_hash1);
static unsigned char (*sse_crypt1);
static unsigned char (*sse_crypt2);
#else
#define MS_NUM_KEYS 1
#endif
#define MIN_KEYS_PER_CRYPT MS_NUM_KEYS
#define MAX_KEYS_PER_CRYPT MS_NUM_KEYS
#define HASH_LEN (16+48)
static unsigned char *salt_buffer;
static unsigned int salt_len;
static unsigned char(*key);
static unsigned int new_key = 1;
static unsigned char(*md4hash); // allows the md4 of user, and salt to be appended to it. the md4 is ntlm, with the salt is DCC1
static unsigned int (*crypt_out);
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
if (omp_t < 1)
omp_t = 1;
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
key = mem_calloc(self->params.max_keys_per_crypt,
(PLAINTEXT_LENGTH + 1));
md4hash = mem_calloc(self->params.max_keys_per_crypt,
HASH_LEN);
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
BINARY_SIZE);
#if defined (SIMD_COEF_32)
sse_hash1 = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*sse_hash1)*SHA_BUF_SIZ*4,
MEM_ALIGN_SIMD);
sse_crypt1 = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*sse_crypt1) * 20, MEM_ALIGN_SIMD);
sse_crypt2 = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*sse_crypt2) * 20, MEM_ALIGN_SIMD);
{
int index;
for (index = 0; index < self->params.max_keys_per_crypt; ++index) {
// set the length of all hash1 SSE buffer to 64+20 * 8 bits
// The 64 is for the ipad/opad, the 20 is for the length of the SHA1 buffer that also gets into each crypt
// this works for SSEi
((unsigned int *)sse_hash1)[15*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] = (84<<3); // all encrypts are 64+20 bytes.
sse_hash1[GETPOS(20,index)] = 0x80;
}
}
// From this point on, we ONLY touch the first 20 bytes (* SIMD_COEF_32) of each buffer 'block'. If !SHA_PARA', then only the first
// block is written to after this, if there are more that one SHA_PARA, then the start of each para block will be updated inside the inner loop.
#endif
mscash2_adjust_tests(options.target_enc, PLAINTEXT_LENGTH, MAX_SALT_LEN);
}
static void done(void)
{
#ifdef SIMD_COEF_32
MEM_FREE(sse_crypt2);
MEM_FREE(sse_crypt1);
MEM_FREE(sse_hash1);
#endif
MEM_FREE(crypt_out);
MEM_FREE(md4hash);
MEM_FREE(key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
return mscash2_common_valid(ciphertext, MAX_SALT_LEN, self);
}
static void set_salt(void *salt) {
UTF16 *p = (UTF16*)salt;
salt_len = *p++;
iteration_cnt = *p++;
salt_buffer = (unsigned char*)p;
}
static void *get_salt(char *ciphertext)
{
static UTF16 out[130+1];
unsigned char input[MAX_SALT_LEN*3+1];
int i, iterations, utf16len;
char *lasth = strrchr(ciphertext, '#');
memset(out, 0, sizeof(out));
sscanf(&ciphertext[6], "%d", &iterations);
ciphertext = strchr(ciphertext, '#') + 1;
for (i = 0; &ciphertext[i] < lasth; i++)
input[i] = (unsigned char)ciphertext[i];
input[i] = 0;
utf16len = enc_to_utf16(&out[2], MAX_SALT_LEN, input, i);
if (utf16len < 0)
utf16len = strlen16(&out[2]);
out[0] = utf16len << 1;
out[1] = iterations;
return out;
}
static void *get_binary(char *ciphertext)
{
static unsigned int out[BINARY_SIZE / sizeof(unsigned int)];
unsigned int i;
unsigned int temp;
/* We need to allow salt containing '#' so we search backwards */
ciphertext = strrchr(ciphertext, '#') + 1;
for (i = 0; i < 4 ;i++)
{
#if ARCH_LITTLE_ENDIAN
temp = ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 0])])) << 4;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 1])]));
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 2])])) << 12;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 3])])) << 8;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 4])])) << 20;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 5])])) << 16;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 6])])) << 28;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 7])])) << 24;
#else
temp = ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 6])])) << 4;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 7])]));
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 4])])) << 12;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 5])])) << 8;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 2])])) << 20;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 3])])) << 16;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 0])])) << 28;
temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i * 8 + 1])])) << 24;
#endif
out[i] = temp;
}
#if defined(SIMD_COEF_32) && ARCH_LITTLE_ENDIAN==1
alter_endianity(out, BINARY_SIZE);
#endif
return out;
}
static int binary_hash_0(void *binary)
{
return ((unsigned int*)binary)[3] & PH_MASK_0;
}
static int binary_hash_1(void *binary)
{
return ((unsigned int*)binary)[3] & PH_MASK_1;
}
static int binary_hash_2(void *binary)
{
return ((unsigned int*)binary)[3] & PH_MASK_2;
}
static int binary_hash_3(void *binary)
{
return ((unsigned int*)binary)[3] & PH_MASK_3;
}
static int binary_hash_4(void *binary)
{
return ((unsigned int*)binary)[3] & PH_MASK_4;
}
static int binary_hash_5(void *binary)
{
return ((unsigned int*)binary)[3] & PH_MASK_5;
}
static int binary_hash_6(void *binary)
{
return ((unsigned int*)binary)[3] & PH_MASK_6;
}
static int get_hash_0(int index)
{
return crypt_out[4 * index + 3] & PH_MASK_0;
}
static int get_hash_1(int index)
{
return crypt_out[4 * index + 3] & PH_MASK_1;
}
static int get_hash_2(int index)
{
return crypt_out[4 * index + 3] & PH_MASK_2;
}
static int get_hash_3(int index)
{
return crypt_out[4 * index + 3] & PH_MASK_3;
}
static int get_hash_4(int index)
{
return crypt_out[4 * index + 3] & PH_MASK_4;
}
static int get_hash_5(int index)
{
return crypt_out[4 * index + 3] & PH_MASK_5;
}
static int get_hash_6(int index)
{
return crypt_out[4 * index + 3] & PH_MASK_6;
}
static int cmp_all(void *binary, int count)
{
unsigned int i = 0;
unsigned int d = ((unsigned int *)binary)[3];
for (; i < count; i++)
if (d == crypt_out[i * 4 + 3])
return 1;
return 0;
}
static int cmp_one(void * binary, int index)
{
unsigned int *t = (unsigned int *)binary;
unsigned int a = crypt_out[4 * index + 0];
unsigned int b = crypt_out[4 * index + 1];
unsigned int c = crypt_out[4 * index + 2];
unsigned int d = crypt_out[4 * index + 3];
if (d != t[3])
return 0;
if (c != t[2])
return 0;
if (b != t[1])
return 0;
return (a == t[0]);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void set_key(char *_key, int index)
{
strnzcpy ((char*)&key[index*(PLAINTEXT_LENGTH + 1)], _key, (PLAINTEXT_LENGTH + 1));
new_key = 1;
}
static char *get_key(int index)
{
return (char*)&key[index*(PLAINTEXT_LENGTH + 1)];
}
// Public domain hash function by DJ Bernstein (salt is a username)
static int salt_hash(void *salt)
{
UTF16 *n = salt, i;
unsigned char *s = (unsigned char*)n;
unsigned int hash = 5381;
for (i = 0; i < (*n+2); ++i)
hash = ((hash<<5)+hash) ^ s[i];
return hash & (SALT_HASH_SIZE - 1);
}
#ifdef SIMD_COEF_32
// NOTE, in the end, this block will move above the pbkdf2() function, and the #else and #endif wrapping that function will be
// uncommented. Thus, if built for SSE2 (mmx, or intrisic), we get this function. Otherwise we get the pbkdf2() function which
// uses OpenSSL. However to get the 'layout' right, The code here will walk through the array buffer, calling the pbkdf2
// function.
static void pbkdf2_sse2(int t)
{
// Thread safe, t is our thread number.
// All indexes into buffers are offset by (t * MS_NUM_KEYS * (size))
SHA_CTX ctx1, ctx2;
unsigned int ipad[SHA_LBLOCK], opad[SHA_LBLOCK];
unsigned int tmp_hash[SHA_DIGEST_LENGTH/4];
unsigned int i, j, k, *i1, *i2, *o1, *t_crypt;
unsigned char *t_sse_crypt1, *t_sse_crypt2, *t_sse_hash1;
memset(&ipad[4], 0x36, SHA_CBLOCK-16);
memset(&opad[4], 0x5C, SHA_CBLOCK-16);
// All pointers get their offset for this thread here. No further offsetting below.
t_crypt = &crypt_out[t * MS_NUM_KEYS * 4];
t_sse_crypt1 = &sse_crypt1[t * MS_NUM_KEYS * 20];
t_sse_crypt2 = &sse_crypt2[t * MS_NUM_KEYS * 20];
t_sse_hash1 = &sse_hash1[t * MS_NUM_KEYS * SHA_BUF_SIZ * 4];
i1 = (unsigned int*)t_sse_crypt1;
i2 = (unsigned int*)t_sse_crypt2;
o1 = (unsigned int*)t_sse_hash1;
for (k = 0; k < MS_NUM_KEYS; ++k)
{
for (i = 0;i < 4;i++) {
ipad[i] = t_crypt[k*4+i]^0x36363636;
opad[i] = t_crypt[k*4+i]^0x5C5C5C5C;
}
SHA1_Init(&ctx1);
SHA1_Init(&ctx2);
SHA1_Update(&ctx1,ipad,SHA_CBLOCK);
SHA1_Update(&ctx2,opad,SHA_CBLOCK);
// we memcopy from flat into SIMD_COEF_32 output buffer's (our 'temp' ctx buffer).
// This data will NOT need to be BE swapped (it already IS BE swapped).
i1[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))] = ctx1.h0;
i1[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))+SIMD_COEF_32] = ctx1.h1;
i1[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))+(SIMD_COEF_32<<1)] = ctx1.h2;
i1[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))+SIMD_COEF_32*3] = ctx1.h3;
i1[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))+(SIMD_COEF_32<<2)] = ctx1.h4;
i2[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))] = ctx2.h0;
i2[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))+SIMD_COEF_32] = ctx2.h1;
i2[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))+(SIMD_COEF_32<<1)] = ctx2.h2;
i2[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))+SIMD_COEF_32*3] = ctx2.h3;
i2[(k/SIMD_COEF_32)*SIMD_COEF_32*5+(k&(SIMD_COEF_32-1))+(SIMD_COEF_32<<2)] = ctx2.h4;
SHA1_Update(&ctx1,salt_buffer,salt_len);
SHA1_Update(&ctx1,"\x0\x0\x0\x1",4);
SHA1_Final((unsigned char*)tmp_hash,&ctx1);
SHA1_Update(&ctx2,(unsigned char*)tmp_hash,SHA_DIGEST_LENGTH);
SHA1_Final((unsigned char*)tmp_hash,&ctx2);
// now convert this from flat into SIMD_COEF_32 buffers.
// Also, perform the 'first' ^= into the crypt buffer. NOTE, we are doing that in BE format
// so we will need to 'undo' that in the end.
o1[(k/SIMD_COEF_32)*SIMD_COEF_32*SHA_BUF_SIZ+(k&(SIMD_COEF_32-1))] = t_crypt[k*4+0] = ctx2.h0;
o1[(k/SIMD_COEF_32)*SIMD_COEF_32*SHA_BUF_SIZ+(k&(SIMD_COEF_32-1))+SIMD_COEF_32] = t_crypt[k*4+1] = ctx2.h1;
o1[(k/SIMD_COEF_32)*SIMD_COEF_32*SHA_BUF_SIZ+(k&(SIMD_COEF_32-1))+(SIMD_COEF_32<<1)] = t_crypt[k*4+2] = ctx2.h2;
o1[(k/SIMD_COEF_32)*SIMD_COEF_32*SHA_BUF_SIZ+(k&(SIMD_COEF_32-1))+SIMD_COEF_32*3] = t_crypt[k*4+3] = ctx2.h3;
o1[(k/SIMD_COEF_32)*SIMD_COEF_32*SHA_BUF_SIZ+(k&(SIMD_COEF_32-1))+(SIMD_COEF_32<<2)] = ctx2.h4;
}
for (i = 1; i < iteration_cnt; i++)
{
SIMDSHA1body((unsigned int*)t_sse_hash1, (unsigned int*)t_sse_hash1, (unsigned int*)t_sse_crypt1, SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT);
SIMDSHA1body((unsigned int*)t_sse_hash1, (unsigned int*)t_sse_hash1, (unsigned int*)t_sse_crypt2, SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT);
// only xor first 16 bytes, since that is ALL this format uses
for (k = 0; k < MS_NUM_KEYS; k++) {
unsigned *p = &((unsigned int*)t_sse_hash1)[k/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32 + (k&(SIMD_COEF_32-1))];
for (j = 0; j < 4; j++)
t_crypt[k*4+j] ^= p[(j*SIMD_COEF_32)];
}
}
}
#else
/*
* This function is derived from IEEE Std 802.11-2004, Clause H.4.
* The main construction is from PKCS#5 v2.0. It is tweaked a little
* to remove some code not needed for our SHA1-128 output.
*/
static void pbkdf2(unsigned int _key[]) // key is also 'final' digest.
{
SHA_CTX ctx1, ctx2, tmp_ctx1, tmp_ctx2;
unsigned char ipad[SHA_CBLOCK], opad[SHA_CBLOCK];
unsigned int tmp_hash[SHA_DIGEST_LENGTH/4];
unsigned i, j;
unsigned char *key = (unsigned char*)_key;
for (i = 0; i < 16; i++) {
ipad[i] = key[i]^0x36;
opad[i] = key[i]^0x5C;
}
memset(&ipad[16], 0x36, sizeof(ipad)-16);
memset(&opad[16], 0x5C, sizeof(opad)-16);
SHA1_Init(&ctx1);
SHA1_Init(&ctx2);
SHA1_Update(&ctx1, ipad, SHA_CBLOCK);
SHA1_Update(&ctx2, opad, SHA_CBLOCK);
memcpy(&tmp_ctx1, &ctx1, sizeof(SHA_CTX));
memcpy(&tmp_ctx2, &ctx2, sizeof(SHA_CTX));
SHA1_Update(&ctx1, salt_buffer, salt_len);
SHA1_Update(&ctx1, "\x0\x0\x0\x1", 4);
SHA1_Final((unsigned char*)tmp_hash,&ctx1);
SHA1_Update(&ctx2, (unsigned char*)tmp_hash, SHA_DIGEST_LENGTH);
// we have to sha1 final to a 'temp' buffer, since we can only overwrite first 16 bytes
// of the _key buffer. If we overwrote 20 bytes, then we would lose the first 4 bytes
// of the next element (and overwrite end of buffer on last element).
SHA1_Final((unsigned char*)tmp_hash, &ctx2);
// only copy first 16 bytes, since that is ALL this format uses
memcpy(_key, tmp_hash, 16);
for (i = 1; i < iteration_cnt; i++)
{
// we only need to copy the accumulator data from the CTX, since
// the original encryption was a full block of 64 bytes.
memcpy(&ctx1, &tmp_ctx1, sizeof(SHA_CTX)-(64+sizeof(unsigned int)));
SHA1_Update(&ctx1, (unsigned char*)tmp_hash, SHA_DIGEST_LENGTH);
SHA1_Final((unsigned char*)tmp_hash, &ctx1);
memcpy(&ctx2, &tmp_ctx2, sizeof(SHA_CTX)-(64+sizeof(unsigned int)));
SHA1_Update(&ctx2, (unsigned char*)tmp_hash, SHA_DIGEST_LENGTH);
SHA1_Final((unsigned char*)tmp_hash, &ctx2);
// only xor first 16 bytes, since that is ALL this format uses
for (j = 0; j < 4; j++)
_key[j] ^= tmp_hash[j];
}
}
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int i, t, t1;
// Note, for a format like DCC2, there is little reason to optimize anything other
// than the pbkdf2 inner loop. The one exception to that, is the NTLM can be done
// and known when to be done, only when the
// now get NTLM of the password (MD4 of unicode)
if (new_key) {
#if MS_NUM_KEYS > 1 && defined(_OPENMP)
#pragma omp parallel for default(none) private(i) shared(count, key, md4hash)
#endif
for (i = 0; i < count; ++i) {
int utf16len;
UTF16 pass_unicode[PLAINTEXT_LENGTH+1];
MD4_CTX ctx;
utf16len = enc_to_utf16(pass_unicode, PLAINTEXT_LENGTH, &key[(PLAINTEXT_LENGTH + 1)*i], strlen((char*)&key[(PLAINTEXT_LENGTH + 1)*i]));
if (utf16len <= 0) {
key[(PLAINTEXT_LENGTH + 1)*i-utf16len] = 0;
if (utf16len != 0)
utf16len = strlen16(pass_unicode);
}
MD4_Init(&ctx);
MD4_Update(&ctx, pass_unicode, utf16len<<1);
MD4_Final(&md4hash[HASH_LEN*i], &ctx);
}
new_key = 0;
}
#ifdef _OPENMP
#if defined(WITH_UBSAN)
#pragma omp parallel for
#else
#pragma omp parallel for default(none) private(t) shared(count, salt_buffer, salt_len, crypt_out, md4hash)
#endif
#endif
for (t1 = 0; t1 < count; t1 += MS_NUM_KEYS) {
MD4_CTX ctx;
int i;
t = t1 / MS_NUM_KEYS;
for (i = 0; i < MS_NUM_KEYS; ++i) {
// Get DCC1. That is MD4( NTLM . unicode(lc username) )
MD4_Init(&ctx);
MD4_Update(&ctx, &md4hash[(t * MS_NUM_KEYS + i) * HASH_LEN], 16);
MD4_Update(&ctx, salt_buffer, salt_len);
MD4_Final((unsigned char*)&crypt_out[(t * MS_NUM_KEYS + i) * 4], &ctx);
// now we have DCC1 (mscash) which is MD4 (MD4(unicode(pass)) . unicode(lc username))
#ifndef SIMD_COEF_32
// Non-SSE: Compute DCC2 one at a time
pbkdf2(&crypt_out[(t * MS_NUM_KEYS + i) * 4]);
#endif
}
#ifdef SIMD_COEF_32
// SSE: Compute DCC2 in parallel, once per thread
pbkdf2_sse2(t);
#endif
}
return count;
}
struct fmt_main fmt_mscash2 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_UNICODE | FMT_UTF8,
{ NULL },
{ FORMAT_TAG2 },
mscash2_common_tests
}, {
init,
done,
fmt_default_reset,
mscash2_common_prepare,
valid,
mscash2_common_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
binary_hash_4,
binary_hash_5,
binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
vector.h | // BLAS like functions
#ifndef __cg_VECTOR_H__
#define __cg_VECTOR_H__
static inline void vector_set(int n, DOUBLE a, DOUBLE *x) {
for (int i = 0; i < n; i++) x[i] = a;
}
static inline void vector_rand(int n, DOUBLE *x) {
for (int i = 0; i < n; i++) x[i] = rand() / (double)RAND_MAX;
}
static inline void vector_copy(int n, DOUBLE *x, DOUBLE *y) {
for (int i = 0; i < n; i++) y[i] = x[i];
}
static inline void vector_axpy(int n, DOUBLE a, DOUBLE *x, DOUBLE *y) {
for (int i = 0; i < n; i++) y[i] = y[i] + x[i] * a;
}
static inline void vector_xpby(int n, DOUBLE *x, DOUBLE b, DOUBLE *y) {
for (int i = 0; i < n; i++) y[i] = x[i] + b * y[i];
}
// i.e. inner product
static inline DOUBLE vector_dot(int n, DOUBLE *x, DOUBLE *y) {
DOUBLE r = 0.0;
for (int i = 0; i < n; i++) r += y[i] * x[i];
return r;
}
static inline DOUBLE vector_norm2(int n, DOUBLE *x) {
return sqrt(vector_dot(n, x, x));
}
// limited precision version
static inline void floatm_set(int n, FLOAT a, FLOAT *x) {
FLOAT b = a;
for (int i = 0; i < n; i++) x[i] = b;
}
static inline void floatm_rand(int n, FLOAT *x) {
for (int i = 0; i < n; i++) x[i] = rand() / (double)RAND_MAX;
}
// y = x
static inline void floatm_copy(int n, FLOAT *x, FLOAT *y) {
for (int i = 0; i < n; i++) y[i] = x[i];
}
static inline void floatm_parallel_copy(int n, FLOAT *x, FLOAT *y) {
#pragma omp parallel for
for (int i = 0; i < n; i++) y[i] = x[i];
}
// y = y + x*a
static inline void floatm_axpy(int n, FLOAT a, FLOAT *x, FLOAT *y) {
for (int i = 0; i < n; i++) y[i] = y[i] + x[i] * a;
}
// y = x + b*y
static inline void floatm_xpby(int n, FLOAT *x, FLOAT b, FLOAT *y) {
for (int i = 0; i < n; i++) y[i] = x[i] + b * y[i];
}
// the dot product can be computed with more precision than FLOAT
// inner product
static inline FLOAT2 floatm_dot(int n, FLOAT *x, FLOAT *y) {
FLOAT2 r = 0.0;
for (int i = 0; i < n; i++) r += y[i] * x[i];
return r;
}
// sqrt(inner product(x))
static inline FLOAT2 floatm_norm2(int n, FLOAT *x) {
return sqrt(floatm_dot(n, x, x));
}
// sum(y*(xa))
static inline FLOAT2 floatm_axpy_dot(int n, FLOAT a, FLOAT *x, FLOAT *y) {
FLOAT2 r = 0.0;
for (int i = 0; i < n; i++) {
FLOAT2 t = y[i] + x[i] * a;
y[i] = t;
r += t * t;
}
return r;
}
// sqrt( sum((x-y)^2) )
static inline FLOAT2 floatm_diff_norm2(int n, FLOAT *x, FLOAT *y) {
FLOAT2 r = 0.0;
for (int i = 0; i < n; i++) {
FLOAT2 t = x[i] - y[i];
r += t * t;
}
return sqrt(r);
}
// z = max(abs(x - y), tol)
static inline void floatm_max_abs_diff(int n, FLOAT *x, FLOAT *y, FLOAT2* z, FLOAT2 tol) {
for(int i = 0; i < n; i++) {
z[i] = std::max(fabs(((double)x[i] - (double)y[i])), tol);
}
}
static inline FLOAT floatm_max_diff_and_copy(int n, FLOAT *x, FLOAT *y) {
float maxv = -__FLT_MIN__;
#pragma omp parallel for reduction(max: maxv)
for(int i = 0; i < n; i++) {
float val = fabsf(x[i] - y[i]);
y[i] = x[i];
if(val > maxv)
maxv = val;
}
return maxv;
}
// this will be more aggressive than max diff in terms of switching
// since we are only looking for the smallest value
// not that the "biggest" value is smaller than threshold
static inline FLOAT floatm_min_diff(int n, FLOAT *x, FLOAT *y) {
float max = __FLT_MAX__;
// #pragma omp parallel for reduction(min: max)
for(int i = 0; i < n; i++) {
float val = fabsf(x[i] - y[i]);
if(val < max)
max = val;
}
return max;
}
// z = y ./ x
static inline void floatm_ratio(int n, FLOAT *x, FLOAT *y, FLOAT2 *z) {
for(int i = 0; i < n; i++) z[i] = (y[i] / x[i]);
}
// z = abs(x - y)
static inline void floatm_abs_diff(int n, FLOAT *x, FLOAT *y, FLOAT *z) {
for(int i = 0; i < n; i++) z[i] = fabsf(x[i] - y[i]);
}
static inline DOUBLE double_norm_diff(int n, DOUBLE *x, DOUBLE *y) {
DOUBLE d = 0.0;
DOUBLE err = 0.0;
for(int i = 0; i < n; i++) {
DOUBLE temp = d;
DOUBLE r = std::fabs(y[i] - x[i]) + err;
d = temp + r;
err = temp - d;
err += r;
}
return d;
}
static inline FLOAT2 float_norm_diff(int n, FLOAT *x, FLOAT *y) {
FLOAT2 d = 0.0;
FLOAT2 err = 0.0;
for(int i = 0; i < n; i++) {
FLOAT2 temp = d;
FLOAT2 r = std::fabs(y[i] - x[i]) + err;
d = temp + r;
err = temp - d;
err += r;
}
return d;
}
// mixed precision routines
static inline void mixed_copy(int n, DOUBLE *x, FLOAT *y) {
for (int i = 0; i < n; i++) y[i] = x[i];
}
static inline void mixed_axpy(int n, DOUBLE a, FLOAT *x, DOUBLE *y) {
for (int i = 0; i < n; i++) y[i] += x[i] * a;
}
static inline void mixed_xpby(int n, DOUBLE *x, DOUBLE b, FLOAT *y) {
for (int i = 0; i < n; i++) y[i] = x[i] + b * y[i];
}
#endif
|
9.race2.c | // RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
#include <omp.h>
#define N 100
int main() {
int A[N];
#pragma omp for simd
for (int i = 3; i < N; i++) {
A[i] = A[i - 3] + 2;
}
return 0;
}
// CHECK: Data Race detected
// END
|
level.c | // RUN: %compile-run-and-check
#include <omp.h>
#include <stdio.h>
const int MaxThreads = 1024;
const int NumThreads = 64;
int main(int argc, char *argv[]) {
int level = -1, activeLevel = -1;
// The expected value is -1, initialize to different value.
int ancestorTNumNeg = 1, teamSizeNeg = 1;
int ancestorTNum0 = -1, teamSize0 = -1;
// The expected value is -1, initialize to different value.
int ancestorTNum1 = 1, teamSize1 = 1;
int check1[MaxThreads];
int check2[MaxThreads];
int check3[MaxThreads];
int check4[MaxThreads];
for (int i = 0; i < MaxThreads; i++) {
check1[i] = check2[i] = check3[i] = check4[i] = 0;
}
#pragma omp target map(level, activeLevel, ancestorTNumNeg, teamSizeNeg) \
map(ancestorTNum0, teamSize0, ancestorTNum1, teamSize1) \
map(check1[:], check2[:], check3[:], check4[:])
{
level = omp_get_level();
activeLevel = omp_get_active_level();
// Expected to return -1.
ancestorTNumNeg = omp_get_ancestor_thread_num(-1);
teamSizeNeg = omp_get_team_size(-1);
// Expected to return 0 and 1.
ancestorTNum0 = omp_get_ancestor_thread_num(0);
teamSize0 = omp_get_team_size(0);
// Expected to return -1 because the requested level is larger than
// the nest level.
ancestorTNum1 = omp_get_ancestor_thread_num(1);
teamSize1 = omp_get_team_size(1);
// Expecting active parallel region.
#pragma omp parallel num_threads(NumThreads)
{
int id = omp_get_thread_num();
// Multiply return value of omp_get_level by 5 to avoid that this test
// passes if both API calls return wrong values.
check1[id] += omp_get_level() * 5 + omp_get_active_level();
// Expected to return 0 and 1.
check2[id] += omp_get_ancestor_thread_num(0) + 5 * omp_get_team_size(0);
// Expected to return the current thread num.
check2[id] += (omp_get_ancestor_thread_num(1) - id);
// Expected to return the current number of threads.
check2[id] += 3 * omp_get_team_size(1);
// Expected to return -1, see above.
check2[id] += omp_get_ancestor_thread_num(2) + omp_get_team_size(2);
// Expecting serialized parallel region.
#pragma omp parallel
{
#pragma omp atomic
check3[id] += omp_get_level() * 5 + omp_get_active_level();
// Expected to return 0 and 1.
int check4Inc = omp_get_ancestor_thread_num(0) + 5 * omp_get_team_size(0);
// Expected to return the parent thread num.
check4Inc += (omp_get_ancestor_thread_num(1) - id);
// Expected to return the number of threads in the active parallel region.
check4Inc += 3 * omp_get_team_size(1);
// Exptected to return 0 and 1.
check4Inc += omp_get_ancestor_thread_num(2) + 3 * omp_get_team_size(2);
// Expected to return -1, see above.
check4Inc += omp_get_ancestor_thread_num(3) + omp_get_team_size(3);
#pragma omp atomic
check4[id] += check4Inc;
}
}
}
// CHECK: target: level = 0, activeLevel = 0
printf("target: level = %d, activeLevel = %d\n", level, activeLevel);
// CHECK: level = -1: ancestorTNum = -1, teamSize = -1
printf("level = -1: ancestorTNum = %d, teamSize = %d\n", ancestorTNumNeg, teamSizeNeg);
// CHECK: level = 0: ancestorTNum = 0, teamSize = 1
printf("level = 0: ancestorTNum = %d, teamSize = %d\n", ancestorTNum0, teamSize0);
// CHECK: level = 1: ancestorTNum = -1, teamSize = -1
printf("level = 1: ancestorTNum = %d, teamSize = %d\n", ancestorTNum1, teamSize1);
// CHECK-NOT: invalid
for (int i = 0; i < MaxThreads; i++) {
// Check active parallel region:
// omp_get_level() = 1, omp_get_active_level() = 1
const int Expected1 = 6;
if (i < NumThreads) {
if (check1[i] != Expected1) {
printf("invalid: check1[%d] should be %d, is %d\n", i, Expected1, check1[i]);
}
} else if (check1[i] != 0) {
printf("invalid: check1[%d] should be 0, is %d\n", i, check1[i]);
}
// 5 * 1 + 3 * 64 - 1 - 1 (see above)
const int Expected2 = 195;
if (i < NumThreads) {
if (check2[i] != Expected2) {
printf("invalid: check2[%d] should be %d, is %d\n", i, Expected2, check2[i]);
}
} else if (check2[i] != 0) {
printf("invalid: check2[%d] should be 0, is %d\n", i, check2[i]);
}
// Check serialized parallel region:
// omp_get_level() = 2, omp_get_active_level() = 1
const int Expected3 = 11;
if (i < NumThreads) {
if (check3[i] != Expected3) {
printf("invalid: check3[%d] should be %d, is %d\n", i, Expected3, check3[i]);
}
} else if (check3[i] != 0) {
printf("invalid: check3[%d] should be 0, is %d\n", i, check3[i]);
}
// 5 * 1 + 3 * 64 + 3 * 1 - 1 - 1 (see above)
const int Expected4 = 198;
if (i < NumThreads) {
if (check4[i] != Expected4) {
printf("invalid: check4[%d] should be %d, is %d\n", i, Expected4, check4[i]);
}
} else if (check4[i] != 0) {
printf("invalid: check4[%d] should be 0, is %d\n", i, check4[i]);
}
}
// Check for paraller level in non-SPMD kernels.
level = 0;
#pragma omp target teams distribute num_teams(1) thread_limit(32) reduction(+:level)
for (int i=0; i<5032; i+=32) {
int ub = (i+32 > 5032) ? 5032 : i+32;
#pragma omp parallel for schedule(dynamic)
for (int j=i ; j < ub; j++) ;
level += omp_get_level();
}
// CHECK: Integral level = 0.
printf("Integral level = %d.\n", level);
return 0;
}
|
activ_functions.c |
/*
Copyright (C) 2020 David Cornu
for the Convolutional Interactive Artificial
Neural Networks by/for Astrophysicists (CIANNA) Code
(https://github.com/Deyht/CIANNA)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "prototypes.h"
//public are in "prototypes.h"
//private prototypes
void linear_activation(layer *current);
void linear_deriv(layer *previous);
void linear_deriv_output_error(layer* current);
void linear_output_error(layer* current);
void ReLU_activation(layer *current);
void ReLU_deriv(layer *previous);
void ReLU_deriv_output_error(layer* current);
void ReLU_output_error(layer* current);
void logistic_activation(layer *current);
void logistic_deriv(layer *previous);
void logistic_deriv_output_error(layer* current);
void logistic_output_error(layer* current);
void softmax_activation(layer *current);
void softmax_deriv(layer *previous);
void softmax_deriv_output_error(layer *current);
void softmax_output_error(layer *current);
void YOLO_activation(layer *current);
void YOLO_deriv(layer *previous);
void YOLO_deriv_output_error(layer *current);
void YOLO_output_error(layer *current);
void ReLU_activation_fct(void *tab, int len, int dim, float saturation, float leaking_factor);
void ReLU_deriv_fct(void *deriv, void *value, int len, int dim, float saturation, float leaking_factor, int size);
void quadratic_deriv_output_error(void *delta_o, void *output, void *target,
int dim, int len, int size);
void quadratic_output_error(void *output_error, void *output, void *target,
int dim, int len, int size);
void logistic_activation_fct(void *tab, float beta, float saturation, int dim, int len, int size);
void logistic_deriv_fct(void *deriv, void* value, float beta, int len, int dim, int size);
void softmax_activation_fct(void *tab, int len, int dim, int size);
void cross_entropy_deriv_output_error(void *delta_o, void *output, void *target, int len, int dim, int size);
void cross_entropy_output_error(void *output_error, void *output, void *target, int len, int dim, int size);
//#####################################################
void define_activation(layer *current)
{
switch(current->activation_type)
{
case RELU:
case RELU_6:
current->activation = ReLU_activation;
current->deriv_activation = ReLU_deriv;
break;
case LOGISTIC:
current->activation = logistic_activation;
current->deriv_activation = logistic_deriv;
break;
case SOFTMAX:
current->activation = softmax_activation;
current->deriv_activation = softmax_deriv;
break;
case YOLO:
current->activation = YOLO_activation;
current->deriv_activation = YOLO_deriv; //should not be needed
//YOLO_activ_init(current); //needed ?
break;
case LINEAR:
default:
current->activation = linear_activation;
current->deriv_activation = linear_deriv;
break;
}
}
void deriv_output_error(layer *current)
{
switch(current->activation_type)
{
case RELU:
case RELU_6:
ReLU_deriv_output_error(current);
break;
case LOGISTIC:
logistic_deriv_output_error(current);
break;
case SOFTMAX:
softmax_deriv_output_error(current);
break;
case YOLO:
YOLO_deriv_output_error(current);
break;
case LINEAR:
default:
linear_deriv_output_error(current);
break;
}
}
void output_error_fct(layer* current)
{
switch(current->activation_type)
{
case RELU:
case RELU_6:
ReLU_output_error(current);
break;
case LOGISTIC:
logistic_output_error(current);
break;
case SOFTMAX:
softmax_output_error(current);
break;
case YOLO:
YOLO_output_error(current);
break;
case LINEAR:
default:
linear_output_error(current);
break;
}
}
void output_deriv_error(layer* current)
{
switch(current->c_network->compute_method)
{
case C_CUDA:
#ifdef CUDA
cuda_deriv_output_error(current);
#endif
break;
case C_NAIV:
case C_BLAS:
deriv_output_error(current);
break;
default:
deriv_output_error(current);
break;
}
}
void output_error(layer* current)
{
switch(current->c_network->compute_method)
{
case C_CUDA:
#ifdef CUDA
cuda_output_error_fct(current);
#endif
break;
case C_NAIV:
case C_BLAS:
output_error_fct(current);
break;
default:
output_error_fct(current);
break;
}
}
void print_activ_param(FILE *f, int type)
{
switch(type)
{
case LOGISTIC:
fprintf(f,"(LOGI)");
break;
case SOFTMAX:
fprintf(f,"(SMAX)");
break;
case LINEAR:
fprintf(f,"(LIN)");
break;
case YOLO:
fprintf(f,"(YOLO)");
break;
case RELU_6:
fprintf(f,"(RELU_6)");
break;
case RELU:
default:
fprintf(f,"(RELU)");
break;
}
}
void get_string_activ_param(char* activ, int type)
{
switch(type)
{
case LOGISTIC:
sprintf(activ,"(LOGI)");
break;
case SOFTMAX:
sprintf(activ,"(SMAX)");
break;
case LINEAR:
sprintf(activ,"(LIN)");
break;
case YOLO:
sprintf(activ,"(YOLO)");
break;
case RELU_6:
sprintf(activ,"(RELU_6)");
break;
case RELU:
default:
sprintf(activ,"(RELU)");
break;
}
}
int load_activ_param(char *type)
{
if(strcmp(type, "(SMAX)") == 0)
return SOFTMAX;
else if(strcmp(type, "(LIN)") == 0)
return LINEAR;
else if(strcmp(type, "(LOGI)") == 0)
return LOGISTIC;
else if(strcmp(type, "(YOLO)") == 0)
return YOLO;
else if(strcmp(type, "(RELU_6)") == 0)
return RELU_6;
else if(strcmp(type, "(RELU)") == 0)
return RELU;
else
return RELU;
}
//#####################################################
// Linear activation related functions
//#####################################################
void linear_activation(layer *current)
{
//empty on purpose
}
void linear_deriv(layer *previous)
{
//empty on purpose
}
void linear_deriv_output_error(layer *current)
{
linear_param *param = (linear_param*)current->activ_param;
quadratic_deriv_output_error(current->delta_o, current->output,
current->c_network->target, (param->biased_dim)*current->c_network->length, param->dim, param->size);
}
void linear_output_error(layer *current)
{
linear_param *param = (linear_param*)current->activ_param;
quadratic_output_error(current->c_network->output_error,
current->output, current->c_network->target, (param->biased_dim)*current->c_network->length, param->dim, param->size);
}
//#####################################################
//#####################################################
// ReLU activation related functions
//#####################################################
void ReLU_activation(layer *current)
{
ReLU_param *param = (ReLU_param*)current->activ_param;
ReLU_activation_fct(current->output, param->size, param->dim,
param->saturation, param->leaking_factor);
}
//Is in fact a leaky ReLU, to obtain true ReLU define leaking_factor to 0
void ReLU_activation_fct(void *tab, int len, int dim, float saturation, float leaking_factor)
{
int i;
int pos;
float *f_tab = (float*) tab;
#pragma omp parallel for private(pos) schedule(guided,4)
for(i = 0; i < len; i++)
{
pos = i + i/dim;
if(f_tab[pos] <= 0.0f)
f_tab[pos] *= leaking_factor;
else if(f_tab[pos] > saturation)
f_tab[pos] = saturation + (f_tab[pos] - saturation)*leaking_factor;
}
}
void ReLU_deriv(layer *previous)
{
ReLU_param *param = (ReLU_param*)previous->activ_param;
ReLU_deriv_fct(previous->delta_o, previous->output, param->size, param->dim,
param->saturation, param->leaking_factor, param->size);
}
//should be adapted for both conv and dense layer if dim is properly defined
void ReLU_deriv_fct(void *deriv, void *value, int len, int dim, float saturation, float leaking_factor, int size)
{
int i;
float *f_deriv = (float*) deriv;
float *f_value = (float*) value;
#pragma omp parallel for schedule(guided,4)
for(i = 0; i < size; i++)
{
if(i < len && (i+1)%(dim+1) != 0)
{
if(f_value[i] <= 0.0f)
f_deriv[i] *= leaking_factor;
else if(f_deriv[i] > saturation)
f_deriv[i] *= leaking_factor;
}
else
f_deriv[i] = 0.0f;
}
}
// Should re write a output function to take into account ReLU for Conv output format
void ReLU_deriv_output_error(layer* current)
{
ReLU_param *param = (ReLU_param*)current->activ_param;
quadratic_deriv_output_error(current->delta_o, current->output, current->c_network->target,
(param->biased_dim) * current->c_network->length, param->dim, param->size);
ReLU_deriv_fct(current->delta_o, current->output,
param->size, param->dim, param->saturation, param->leaking_factor, param->size);
}
void ReLU_output_error(layer* current)
{
ReLU_param *param = (ReLU_param*)current->activ_param;
quadratic_output_error(current->c_network->output_error,
current->output, current->c_network->target, (param->biased_dim)*current->c_network->length,
param->dim, param->size);
}
void quadratic_deriv_output_error(void *delta_o, void *output, void *target, int len, int dim, int size)
{
int i;
int pos;
float *f_delta_o = (float*) delta_o;
float *f_output = (float*) output;
float *f_target = (float*) target;
#pragma omp parallel for private(pos) schedule(guided,4)
for(i = 0; i < size; i++)
{
if(i < len && (i+1)%(dim+1) != 0)
{
pos = i - i/(dim+1);
f_delta_o[i] = (f_output[i] - f_target[pos]);
}
else
{
f_delta_o[i] = 0.0;
}
}
}
void quadratic_output_error(void *output_error, void *output, void *target, int len, int dim, int size)
{
int i;
int pos;
float *f_output_error = (float*) output_error;
float *f_output = (float*) output;
float *f_target = (float*) target;
#pragma omp parallel for private(pos) schedule(guided,4)
for(i = 0; i < size; i++)
{
if(i < len && (i+1)%(dim+1) != 0)
{
pos = i - i/(dim+1);
f_output_error[i] = 0.5*(f_output[i] - f_target[pos])*(f_output[i] - f_target[pos]);
}
else
f_output_error[i] = 0.0f;
}
}
//#####################################################
//#####################################################
// Logistic activation related functions
//#####################################################
void logistic_activation(layer *current)
{
logistic_param *param = (logistic_param*)current->activ_param;
logistic_activation_fct(current->output, param->beta, param->saturation, (param->biased_dim)*current->c_network->length, param->dim, param->size);
}
void logistic_activation_fct(void *tab, float beta, float saturation, int len, int dim, int size)
{
int i = 0;
int pos;
float *f_tab = (float*) tab;
#pragma omp parallel for private(pos) schedule(guided,4)
for(i = 0; i < size; i++)
{
if(i < len)
{
pos = i + i / dim;
f_tab[pos] = -beta*f_tab[pos];
if(f_tab[pos] > saturation)
f_tab[pos] = saturation;
f_tab[pos] = 1.0/(1.0 + expf(f_tab[pos]));
}
else
{
f_tab[i] = 0.0;
}
}
}
void logistic_deriv(layer *previous)
{
logistic_param *param = (logistic_param*)previous->activ_param;
logistic_deriv_fct(previous->delta_o, previous->output, param->beta,
(param->biased_dim)*previous->c_network->length, param->dim, param->size);
}
void logistic_deriv_fct(void *deriv, void* value, float beta, int len, int dim, int size)
{
int i;
float *f_deriv = (float*) deriv;
float *f_value = (float*) value;
#pragma omp parallel for schedule(guided,4)
for(i = 0; i < size; i++)
{
if(i < len && (i+1)%(dim+1) != 0)
{
f_deriv[i] *= beta*f_value[i]*(1.0-f_value[i]);
}
else
f_deriv[i] = 0.0;
}
}
void logistic_deriv_output_error(layer* current)
{
logistic_param *param = (logistic_param*)current->activ_param;
quadratic_deriv_output_error(current->delta_o, current->output,
current->c_network->target, (param->biased_dim)*current->c_network->length, param->dim, param->size);
logistic_deriv_fct(current->delta_o, current->output, param->beta,
(param->biased_dim)*current->c_network->length, param->dim, param->size);
}
void logistic_output_error(layer* current)
{
logistic_param *param = (logistic_param*)current->activ_param;
quadratic_output_error(current->c_network->output_error,
current->output, current->c_network->target, (param->biased_dim)*current->c_network->length,
param->dim, param->size);
}
//#####################################################
//#####################################################
// Soft-Max activation related functions
//#####################################################
void softmax_activation(layer *current)
{
softmax_param *param = (softmax_param*)current->activ_param;
softmax_activation_fct(current->output, current->c_network->length, param->dim, current->c_network->batch_size);
}
void softmax_activation_fct(void *tab, int len, int dim, int size)
{
//difficult to optimize but can be invastigated
//provides a probabilistic output
int i;
int j;
float *pos;
float vmax;
float normal = 0.0000001f;
#pragma omp parallel for private(j, pos, vmax, normal) schedule(guided,4)
for(i = 0; i < size; i++)
{
normal = 0.0000001f;
if(i < len)
{
pos = (float*)tab + i*(dim+1);
vmax = pos[0];
for(j = 1; j < dim; j++)
if(pos[j] > vmax)
vmax = pos[j];
for(j = 0; j < dim; j++)
{
pos[j] = expf(pos[j]-vmax);
normal += pos[j];
}
pos[dim] = 0.0f;
for(j = 0; j < dim; j++)
pos[j] /= normal;
pos[dim] = 0.0f;
}
else
{
pos = (float*)tab + i*(dim+1);
for(j = 0; j < dim; j++)
pos[j] = 0.0f;
pos[dim] = 0.0f;
}
}
}
void softmax_deriv(layer *previous)
{
printf("Error : Softmax can not be used in the middle of the network !\n");
exit(EXIT_FAILURE);
}
void softmax_deriv_output_error(layer *current)
{
//use by default a cross entropy error
softmax_param *param = (softmax_param*)current->activ_param;
cross_entropy_deriv_output_error(current->delta_o, current->output,
current->c_network->target, (param->biased_dim)*current->c_network->length, param->dim,
(param->biased_dim)*current->c_network->batch_size);
}
void softmax_output_error(layer *current)
{
//use by default a cross entropy error
softmax_param *param = (softmax_param*)current->activ_param;
cross_entropy_output_error(current->c_network->output_error,
current->output, current->c_network->target, (param->biased_dim)*current->c_network->length,
param->dim, (param->biased_dim)*current->c_network->batch_size);
}
void cross_entropy_deriv_output_error(void *delta_o, void *output, void *target, int len, int dim, int size)
{
int i;
int pos;
float *f_delta_o = (float*) delta_o;
float *f_output = (float*) output;
float *f_target = (float*) target;
#pragma omp parallel for private(pos) schedule(guided,4)
for(i = 0; i < size; i++)
{
if(i < len && (i+1)%(dim+1) != 0)
{
pos = i - i/(dim+1);
f_delta_o[i] = (f_output[i] - f_target[pos]);
}
else
f_delta_o[i] = 0.0f;
}
}
void cross_entropy_output_error(void *output_error, void *output, void *target, int len, int dim, int size)
{
int i;
int pos;
float *f_output_error = (float*) output_error;
float *f_output = (float*) output;
float *f_target = (float*) target;
#pragma omp parallel for private(pos) schedule(guided,4)
for(i = 0; i < size; i++)
{
if(i < len && (i+1)%(dim+1) != 0)
{
pos = i - i/(dim+1);
if(f_output[i] > 0.00001f)
f_output_error[i] = -f_target[pos]*logf(f_output[i]);
else
f_output_error[i] = -f_target[pos]*logf(0.00001f);
}
else
f_output_error[i] = 0.0f;
}
}
//#####################################################
//#####################################################
// YOLO activation related functions
//#####################################################
float IoU_fct(float* output, float* target)
{
float inter_w, inter_h, inter_d, inter_3d, uni_3d;
inter_w = fmaxf(0.0f, fminf(output[3], target[3]) - fmaxf(output[0], target[0]));
inter_h = fmaxf(0.0f, fminf(output[4], target[4]) - fmaxf(output[1], target[1]));
inter_d = fmaxf(0.0f, fminf(output[5], target[5]) - fmaxf(output[2], target[2]));
inter_3d = inter_w * inter_h * inter_d;
uni_3d = abs(output[3]-output[0])*abs(output[4]-output[1])*abs(output[5]-output[2])
+ abs(target[3]-target[0])*abs(target[4]-target[1])*abs(target[5]-target[2])
- inter_3d;
return ((float)inter_3d)/(float)uni_3d;
}
float GIoU_fct(float* output, float* target)
{
float inter_w, inter_h, inter_d, inter_3d, uni_3d, enclose_3d, enclose_w, enclose_h, enclose_d;
inter_w = fmaxf(0.0f, fminf(output[3], target[3]) - fmaxf(output[0], target[0]));
inter_h = fmaxf(0.0f, fminf(output[4], target[4]) - fmaxf(output[1], target[1]));
inter_d = fmaxf(0.0f, fminf(output[5], target[5]) - fmaxf(output[2], target[2]));
inter_3d = inter_w * inter_h * inter_d;
uni_3d = abs(output[3]-output[0])*abs(output[4]-output[1])*abs(output[5]-output[2])
+ abs(target[3]-target[0])*abs(target[4]-target[1])*abs(target[5]-target[2])
- inter_3d;
enclose_w = (fmaxf(output[3], target[3]) - fminf(output[0], target[0]));
enclose_h = (fmaxf(output[4], target[4]) - fminf(output[1], target[1]));
enclose_d = (fmaxf(output[5], target[5]) - fminf(output[2], target[2]));
enclose_3d = enclose_w * enclose_h * enclose_d;
return (((float)inter_3d)/(float)uni_3d - (float)(enclose_3d - uni_3d)/(float)enclose_3d);
}
//order: xmin, ymin, zmin, xmax, ymax, zmax
float DIoU_fct(float* output, float* target)
{
float inter_w, inter_h, inter_d, inter_3d, uni_3d, enclose_w, enclose_h, enclose_d;
float cx_a, cx_b, cy_a, cy_b, cz_a, cz_b, dist_cent, diag_enclose;
inter_w = fmaxf(0.0f, fminf(output[3], target[3]) - fmaxf(output[0], target[0]));
inter_h = fmaxf(0.0f, fminf(output[4], target[4]) - fmaxf(output[1], target[1]));
inter_d = fmaxf(0.0f, fminf(output[5], target[5]) - fmaxf(output[2], target[2]));
inter_3d = inter_w * inter_h * inter_d;
uni_3d = abs(output[3]-output[0])*abs(output[4]-output[1])*abs(output[5]-output[2])
+ abs(target[3]-target[0])*abs(target[4]-target[1])*abs(target[5]-target[2])
- inter_3d;
enclose_w = (fmaxf(output[3], target[3]) - fminf(output[0], target[0]));
enclose_h = (fmaxf(output[4], target[4]) - fminf(output[1], target[1]));
enclose_d = (fmaxf(output[5], target[5]) - fminf(output[2], target[2]));
cx_a = (output[3] + output[0])*0.5; cx_b = (target[3] + target[0])*0.5;
cy_a = (output[4] + output[1])*0.5; cy_b = (target[4] + target[1])*0.5;
cz_a = (output[5] + output[2])*0.5; cz_b = (target[5] + target[2])*0.5;
dist_cent = sqrt((cx_a - cx_b)*(cx_a - cx_b) + (cy_a - cy_b)*(cy_a - cy_b) + (cz_a - cz_b)*(cz_a - cz_b));
diag_enclose = sqrt(enclose_w*enclose_w + enclose_h*enclose_h + enclose_d*enclose_d);
return ((float)inter_3d)/(float)uni_3d - (float)(dist_cent/diag_enclose);
}
int set_yolo_params(network *net, int nb_box, int IoU_type, float *prior_w, float *prior_h, float *prior_d, float *yolo_noobj_prob_prior, int nb_class, int nb_param, int strict_box_size, float *scale_tab, float **slopes_and_maxes_tab, float *param_ind_scale, float *IoU_limits, int *fit_parts)
{
int i;
float *temp;
float **sm;
char IoU_type_char[40];
net->y_param->IoU_type = IoU_type;
net->y_param->strict_box_size_association = strict_box_size;
if(scale_tab == NULL)
{
scale_tab = (float*) calloc(6, sizeof(float));
for(i = 0; i < 6; i++)
scale_tab[i] = 1.0f;
}
if(slopes_and_maxes_tab == NULL)
{
temp = (float*) calloc(6*3, sizeof(float));
slopes_and_maxes_tab = (float**) malloc(6*sizeof(float*));
for(i = 0; i < 6; i++)
slopes_and_maxes_tab[i] = &temp[i*3];
sm = slopes_and_maxes_tab;
sm[0][0] = 1.0f; sm[0][1] = 8.0f; sm[0][2] = 0.0f;
sm[1][0] = 1.0f; sm[1][1] = 1.8f; sm[1][2] = -1.4f;
sm[2][0] = 1.0f; sm[2][1] = 8.0f; sm[2][2] = 0.0f;
sm[3][0] = 1.0f; sm[3][1] = 8.0f; sm[3][2] = 0.0f;
sm[4][0] = 1.0f; sm[4][1] = 8.0f; sm[4][2] = 0.0f;
sm[5][0] = 1.0f; sm[5][1] = 2.0f; sm[5][2] = -0.2f;
}
if(param_ind_scale == NULL)
{
param_ind_scale = (float*) calloc(nb_param, sizeof(float));
for(i = 0; i < nb_param; i++)
param_ind_scale[i] = 1.0f;
}
if(IoU_limits == NULL)
{
IoU_limits = (float*) calloc(5,sizeof(float));
IoU_limits[0] = 0.3f;
IoU_limits[1] = 0.0f; IoU_limits[2] = 0.3f;
IoU_limits[3] = 0.3f; IoU_limits[4] = 0.3f;
}
if(fit_parts == NULL)
{
fit_parts = (int*) calloc(5,sizeof(int));
for(i = 0; i < 5; i++)
fit_parts[i] = 1;
}
net->y_param->nb_box = nb_box;
net->y_param->prior_w = prior_w;
net->y_param->prior_h = prior_h;
net->y_param->prior_d = prior_d;
net->y_param->noobj_prob_prior = yolo_noobj_prob_prior;
net->y_param->nb_class = nb_class;
net->y_param->nb_param = nb_param;
//Priors table must be sent to GPU memory if C_CUDA
net->y_param->scale_tab = scale_tab;
net->y_param->slopes_and_maxes_tab = slopes_and_maxes_tab;
net->y_param->param_ind_scale = param_ind_scale;
net->y_param->IoU_limits = IoU_limits;
net->y_param->fit_parts = fit_parts;
switch(net->y_param->IoU_type)
{
default:
case IOU:
sprintf(IoU_type_char, "Classical IoU");
net->y_param->c_IoU_fct = IoU_fct;
break;
case GIOU:
sprintf(IoU_type_char, "Generalized GIoU");
net->y_param->c_IoU_fct = GIoU_fct;
break;
case DIOU:
sprintf(IoU_type_char, "Distance DIoU");
net->y_param->c_IoU_fct = DIoU_fct;
break;
}
printf("\nYOLO layer set with:\nn Nboxes = %d\n Ndimensions = %d\n Nclasses = %d\n Nparams = %d\n IoU type = %s\n",
net->y_param->nb_box, 3, net->y_param->nb_class, net->y_param->nb_param, IoU_type_char);
printf(" W priors = [");
for(i = 0; i < net->y_param->nb_box; i++)
printf("%7.3f ", net->y_param->prior_w[i]);
printf("]\n H priors = [");
for(i = 0; i < net->y_param->nb_box; i++)
printf("%7.3f ", net->y_param->prior_h[i]);
printf("]\n D priors = [");
for(i = 0; i < net->y_param->nb_box; i++)
printf("%7.3f ", net->y_param->prior_d[i]);
printf("]\n");
if(net->y_param->strict_box_size_association)
printf(" Strict box size association is ENABLED\n");
printf(" No obj. prob. priors\n = [");
for(i = 0; i < net->y_param->nb_box; i++)
printf("%7.6f ", net->y_param->noobj_prob_prior[i]);
printf("]\n");
printf(" Error scales: Posit. Size Proba. Objct. Class. Param.\n = [");
for(i = 0; i < 6; i++)
printf(" %5.3f ",net->y_param->scale_tab[i]);
printf("]\n IoU lim. = [");
for(i = 0; i < 5; i++)
printf("%7.3f ", net->y_param->IoU_limits[i]);
printf("]\n");
return (nb_box*(8+nb_class+nb_param));
}
void YOLO_activation_fct(void *i_tab, int flat_offset, int len, yolo_param y_param, int size)
{
float* tab = (float*) i_tab;
int nb_class = y_param.nb_class, nb_param = y_param.nb_param;
/*Default values are in activ_function.c (set_yolo_params)*/
float **sm_tab = y_param.slopes_and_maxes_tab;
int i, col, in_col;
#pragma omp parallel for private(col, in_col) schedule(guided,4)
for(i = 0; i < size; i++)
{
col = i / flat_offset;
in_col = col%(8+nb_class+nb_param);
/*Position*/
if(in_col >= 0 && in_col < 3)
{
tab[i] = -sm_tab[0][0]*tab[i];
if(tab[i] > sm_tab[0][1])
tab[i] = sm_tab[0][1];
tab[i] = 1.0f/(1.0f + expf(tab[i]));
continue;
}
/*Box size*/
if(in_col >= 3 && in_col < 6)
{
tab[i] = sm_tab[1][0]*tab[i];
if(tab[i] > sm_tab[1][1])
tab[i] = sm_tab[1][1];
else if(tab[i] < (sm_tab[1][2]))
tab[i] = (sm_tab[1][2]);
continue;
}
/*Object probability*/
if(in_col == 6)
{
tab[i] = -sm_tab[2][0]*tab[i];
if(tab[i] > sm_tab[2][1])
tab[i] = sm_tab[2][1];
tab[i] = 1.0f/(1.0f + expf(tab[i]));
continue;
}
/*Objectness (Obj. quality => based on IoU)*/
if(in_col == 7)
{
tab[i] = -sm_tab[3][0]*tab[i];
if(tab[i] > sm_tab[3][1])
tab[i] = sm_tab[3][1];
tab[i] = 1.0f/(1.0f + expf(tab[i]));
continue;
}
/*Classes*/
if(in_col >= 8 && in_col < 8+nb_class)
{
tab[i] = -sm_tab[4][0]*tab[i];
if(tab[i] > sm_tab[4][1])
tab[i] = sm_tab[4][1];
tab[i] = 1.0f/(1.0f + expf(tab[i]));
continue;
}
/*Additional parameters (regression)*/
if(in_col >= 8+nb_class)
{
tab[i] = sm_tab[5][0]*tab[i];
if(tab[i] > sm_tab[5][1])
tab[i] = sm_tab[5][1];
else if(tab[i] < (sm_tab[5][2]))
tab[i] = (sm_tab[5][2]);
continue;
}
}
}
// Only minimal optimisation has been performed for now => might be responsible for a significant portion of the total network time
void YOLO_deriv_error_fct
(void *i_delta_o, void *i_output, void *i_target, int flat_target_size, int flat_output_size,
int nb_area_w, int nb_area_h, int nb_area_d, yolo_param y_param, int size)
{
float* t_delta_o = (float*) i_delta_o;
float* t_output = (float*) i_output;
float* t_target = (float*) i_target;
int nb_box = y_param.nb_box, nb_class = y_param.nb_class, nb_param = y_param.nb_param;
float *prior_w = y_param.prior_w, *prior_h = y_param.prior_h, *prior_d = y_param.prior_d;
int cell_w = y_param.cell_w, cell_h = y_param.cell_h, cell_d = y_param.cell_d;
int strict_box_size_association = y_param.strict_box_size_association;
float coord_scale = y_param.scale_tab[0], size_scale = y_param.scale_tab[1];
float prob_scale = y_param.scale_tab[2], obj_scale = y_param.scale_tab[3];
float class_scale = y_param.scale_tab[4], param_scale = y_param.scale_tab[5];
float *param_ind_scale = y_param.param_ind_scale;
float *lambda_noobj_prior = y_param.noobj_prob_prior;
float **sm_tab = y_param.slopes_and_maxes_tab;
float size_max_sat = expf(sm_tab[1][1]), size_min_sat = expf(sm_tab[1][2]);
float good_IoU_lim = y_param.IoU_limits[0];
float min_prob_IoU_lim = y_param.IoU_limits[1], min_obj_IoU_lim = y_param.IoU_limits[2];
float min_class_IoU_lim = y_param.IoU_limits[3], min_param_IoU_lim = y_param.IoU_limits[4];
int fit_size = y_param.fit_parts[0], fit_prob = y_param.fit_parts[1], fit_obj = y_param.fit_parts[2];
int fit_class = y_param.fit_parts[3], fit_param = y_param.fit_parts[4];
int c_pix;
#pragma omp parallel for schedule(guided,4)
for(c_pix = 0; c_pix < size; c_pix++)
{
//All private variables inside the loop for convenience
//Should be marginal since one iteration cost is already high
float *delta_o, *output, *target;
int i, j, k, l;
int c_batch, f_offset;
int nb_obj_target;
int resp_box = -1;
float max_IoU, current_IoU;
int cell_x, cell_y, cell_z;
int obj_cx, obj_cy, obj_cz;
float *box_in_pix, *c_box_in_pix;
float obj_in_offset[6];
int *box_locked;
float out_int[6], targ_int[6];
float targ_w, targ_h, targ_d;
int larger_box, smaller_box;
box_locked = (int*) malloc(nb_box*sizeof(int));
box_in_pix = (float*) malloc(nb_box*6*sizeof(float));
c_batch = c_pix / flat_output_size;
target = t_target + flat_target_size * c_batch;
f_offset = size;
i = c_pix % flat_output_size;
cell_z = i / (nb_area_w*nb_area_h);
cell_y = (int)(i % (nb_area_w*nb_area_h)) / nb_area_w;
cell_x = (int)(i % (nb_area_w*nb_area_h)) % nb_area_w;
delta_o = t_delta_o + (nb_area_w*nb_area_h*nb_area_d) * c_batch + cell_z*nb_area_w*nb_area_h + cell_y*nb_area_w + cell_x;
output = t_output + (nb_area_w*nb_area_h*nb_area_d) * c_batch + cell_z*nb_area_w*nb_area_h + cell_y*nb_area_w + cell_x;
nb_obj_target = target[0];
target += 1;
for(k = 0; k < nb_box; k++)
{
box_locked[k] = 0;
c_box_in_pix = box_in_pix+k*6;
c_box_in_pix[0] = ((float)output[(k*(8+nb_class+nb_param)+0)*f_offset] + cell_x) * cell_w;
c_box_in_pix[1] = ((float)output[(k*(8+nb_class+nb_param)+1)*f_offset] + cell_y) * cell_h;
c_box_in_pix[2] = ((float)output[(k*(8+nb_class+nb_param)+2)*f_offset] + cell_z) * cell_d;
c_box_in_pix[3] = prior_w[k]*expf((float)output[(k*(8+nb_class+nb_param)+3)*f_offset]);
c_box_in_pix[4] = prior_h[k]*expf((float)output[(k*(8+nb_class+nb_param)+4)*f_offset]);
c_box_in_pix[5] = prior_d[k]*expf((float)output[(k*(8+nb_class+nb_param)+5)*f_offset]);
}
for(j = 0; j < nb_obj_target; j++)
{
if((int) target[j*(7+nb_param)] == 0)
break;
obj_cx = (int)( ((float)target[j*(7+nb_param)+4] + (float)target[j*(7+nb_param)+1])*0.5f / cell_w);
obj_cy = (int)( ((float)target[j*(7+nb_param)+5] + (float)target[j*(7+nb_param)+2])*0.5f / cell_h);
obj_cz = (int)( ((float)target[j*(7+nb_param)+6] + (float)target[j*(7+nb_param)+3])*0.5f / cell_d);
if(obj_cx == cell_x && obj_cy == cell_y && obj_cz == cell_z)
{
for(k = 0; k < 6; k++)
targ_int[k] = target[j*(7+nb_param)+1+k];
targ_w = targ_int[3] - targ_int[0];
targ_h = targ_int[4] - targ_int[1];
targ_d = targ_int[5] - targ_int[2];
resp_box = -1;
max_IoU = -1.0f;
for(k = 0; k < nb_box; k++)
{
larger_box = 0;
smaller_box = 0;
if(strict_box_size_association)
{
for(l = k; l < nb_box - 1; l++)
{
if(prior_w[l+1]*prior_h[l+1]*prior_d[l+1] > prior_w[k]*prior_h[k]*prior_d[k])
if(targ_w*targ_h*targ_d >= prior_w[l+1]*prior_h[l+1]*prior_d[l+1])
larger_box = 1;
}
for(l = k; l > 0; l--)
{
if(prior_w[l-1]*prior_h[l-1]*prior_d[l+1] < prior_w[k]*prior_h[k]*prior_d[k])
if(targ_w*targ_h*targ_d < prior_w[l-1]*prior_h[l-1]*prior_d[l-1])
smaller_box = 1;
}
}
if(box_locked[k] == 2 || larger_box || smaller_box)
continue;
c_box_in_pix = box_in_pix+k*6;
out_int[0] = c_box_in_pix[0] - 0.5f*c_box_in_pix[3];
out_int[1] = c_box_in_pix[1] - 0.5f*c_box_in_pix[4];
out_int[2] = c_box_in_pix[2] - 0.5f*c_box_in_pix[5];
out_int[3] = c_box_in_pix[0] + 0.5f*c_box_in_pix[3];
out_int[4] = c_box_in_pix[1] + 0.5f*c_box_in_pix[4];
out_int[5] = c_box_in_pix[2] + 0.5f*c_box_in_pix[5];
current_IoU = y_param.c_IoU_fct(out_int, targ_int);
if(current_IoU > max_IoU)
{
max_IoU = current_IoU;
resp_box = k;
}
if(current_IoU > good_IoU_lim) /*Avoid update of non best but still good match boxes*/
box_locked[k] = 1;
}
if(resp_box == -1 || box_locked[resp_box] == 2)
continue;
box_locked[resp_box] = 2;
obj_in_offset[0] = ((targ_int[3] + targ_int[0])*0.5f - cell_x*cell_w)/(float)cell_w;
obj_in_offset[1] = ((targ_int[4] + targ_int[1])*0.5f - cell_y*cell_h)/(float)cell_h;
obj_in_offset[2] = ((targ_int[5] + targ_int[2])*0.5f - cell_z*cell_d)/(float)cell_d;
obj_in_offset[3] = (targ_w)/(float)prior_w[resp_box];
if(obj_in_offset[3] < size_min_sat)
obj_in_offset[3] = logf(size_min_sat);
else if(obj_in_offset[3] > size_max_sat)
obj_in_offset[3] = logf(size_max_sat);
else
obj_in_offset[3] = logf(obj_in_offset[3]);
obj_in_offset[4] = (targ_h)/(float)prior_h[resp_box];
if(obj_in_offset[4] < size_min_sat)
obj_in_offset[4] = logf(size_min_sat);
else if(obj_in_offset[4] > size_max_sat)
obj_in_offset[4] = logf(size_max_sat);
else
obj_in_offset[4] = logf(obj_in_offset[4]);
obj_in_offset[5] = (targ_d)/(float)prior_d[resp_box];
if(obj_in_offset[5] < size_min_sat)
obj_in_offset[5] = logf(size_min_sat);
else if(obj_in_offset[5] > size_max_sat)
obj_in_offset[5] = logf(size_max_sat);
else
obj_in_offset[5] = logf(obj_in_offset[5]);
for(k = 0; k < 3; k++)
{
delta_o[(resp_box*(8+nb_class+nb_param)+k)*f_offset] =
(sm_tab[0][0]*coord_scale*(float)output[(resp_box*(8+nb_class+nb_param)+k)*f_offset]
*(1.0f-(float)output[(resp_box*(8+nb_class+nb_param)+k)*f_offset])
*((float)output[(resp_box*(8+nb_class+nb_param)+k)*f_offset] - obj_in_offset[k]));
}
if(fit_size)
{
for(k = 0; k < 3; k++)
delta_o[(resp_box*(8+nb_class+nb_param)+k+3)*f_offset] = (sm_tab[1][0]*size_scale*
((float)output[(resp_box*(8+nb_class+nb_param)+k+3)*f_offset] - obj_in_offset[k+3]));
}
else
{
for(k = 0; k < 3; k++)
delta_o[(resp_box*(8+nb_class+nb_param)+k+3)*f_offset] = (0.0f);
}
if(fit_prob && max_IoU > min_prob_IoU_lim)
delta_o[(resp_box*(8+nb_class+nb_param)+6)*f_offset] =
(sm_tab[2][0]*prob_scale*(float)output[(resp_box*(8+nb_class+nb_param)+6)*f_offset]
*(1.0f-(float)output[(resp_box*(8+nb_class+nb_param)+6)*f_offset])
*((float)output[(resp_box*(8+nb_class+nb_param)+6)*f_offset]-0.999f));
else
delta_o[(resp_box*(8+nb_class+nb_param)+6)*f_offset] = (0.0f);
if(fit_obj && max_IoU > min_obj_IoU_lim)
{
if(max_IoU > 0.999f)
max_IoU = 0.999f;
delta_o[(resp_box*(8+nb_class+nb_param)+7)*f_offset] =
(sm_tab[3][0]*obj_scale*(float)output[(resp_box*(8+nb_class+nb_param)+7)*f_offset]
*(1.0f-(float)output[(resp_box*(8+nb_class+nb_param)+7)*f_offset])
*((float)output[(resp_box*(8+nb_class+nb_param)+7)*f_offset]-(1.0+max_IoU)*0.5));
}
else
delta_o[(resp_box*(8+nb_class+nb_param)+7)*f_offset] = (0.0f);
/*mean square error on classes => could be changed to soft max (change in activation needed as well)*/
if(fit_class && max_IoU > min_class_IoU_lim)
{
for(k = 0; k < nb_class; k++)
{
if(k == (int) target[j*(7+nb_param)]-1)
delta_o[(resp_box*(8+nb_class+nb_param)+8+k)*f_offset] =
(sm_tab[4][0]*class_scale
*(float)output[(resp_box*(8+nb_class+nb_param)+8+k)*f_offset]
*(1.0f-(float)output[(resp_box*(8+nb_class+nb_param)+8+k)*f_offset])
*((float)output[(resp_box*(8+nb_class+nb_param)+8+k)*f_offset]-0.999f));
else
delta_o[(resp_box*(8+nb_class+nb_param)+8+k)*f_offset] =
(sm_tab[4][0]*class_scale
*(float)output[(resp_box*(8+nb_class+nb_param)+8+k)*f_offset]
*(1.0f-(float)output[(resp_box*(8+nb_class+nb_param)+8+k)*f_offset])
*((float)output[(resp_box*(8+nb_class+nb_param)+8+k)*f_offset]-0.001f));
}
}
else
{
for(k = 0; k < nb_class; k++)
delta_o[(resp_box*(8+nb_class+nb_param)+8+k)*f_offset] = 0.0f;
}
/*linear activation of additional parameters*/
if(fit_param && max_IoU > min_param_IoU_lim)
{
for(k = 0; k < nb_param; k++)
delta_o[(resp_box*(8+nb_class+nb_param)+8+nb_class+k)*f_offset] =
(param_ind_scale[k]*sm_tab[5][0]*param_scale
*((float)output[(resp_box*(8+nb_class+nb_param)+8+nb_class+k)*f_offset]
- (float)target[j*(7+nb_param)+7+k]));
}
else
{
for(k = 0; k < nb_param; k++)
delta_o[(resp_box*(8+nb_class+nb_param)+8+nb_class+k)*f_offset] = 0.0f;
}
}
}
for(j = 0; j < nb_box; j++)
{
/*If no match (means no IoU > 0.5) only update Objectness toward 0 */
/*(here it means error compute)! (no coordinate nor class update)*/
if(box_locked[j] != 2)
{
for(k = 0; k < 6; k++)
delta_o[(j*(8+nb_class+nb_param)+k)*f_offset] = 0.0f;
if(box_locked[j] == 1)
{
delta_o[(j*(8+nb_class+nb_param)+6)*f_offset] = 0.0f;
delta_o[(j*(8+nb_class+nb_param)+7)*f_offset] = 0.0f;
}
else
{
if(fit_prob)
delta_o[(j*(8+nb_class+nb_param)+6)*f_offset] =
(sm_tab[3][0]*(lambda_noobj_prior[j])*prob_scale
*(float)output[(j*(8+nb_class+nb_param)+6)*f_offset]
*(1.0f-(float)output[(j*(8+nb_class+nb_param)+6)*f_offset])
*((float)output[(j*(8+nb_class+nb_param)+6)*f_offset]-0.001f));
else
delta_o[(j*(8+nb_class+nb_param)+6)*f_offset] = (0.0f);
delta_o[(j*(8+nb_class+nb_param)+7)*f_offset] = 0.0f;
}
for(k = 0; k < nb_class; k++)
delta_o[(j*(8+nb_class+nb_param)+8+k)*f_offset] = 0.0f;
for(k = 0; k < nb_param; k++)
delta_o[(j*(8+nb_class+nb_param)+8+nb_class+k)*f_offset] = 0.0f;
}
}
free(box_in_pix);
free(box_locked);
}
}
// Only minimal optimisation has been performed for now => might be responsible for a significant portion of the total network time
void YOLO_error_fct
(float *i_output_error, void *i_output, void *i_target, int flat_target_size, int flat_output_size,
int nb_area_w, int nb_area_h, int nb_area_d, yolo_param y_param, int size)
{
float* t_output = (float*) i_output;
float* t_target = (float*) i_target;
int nb_box = y_param.nb_box, nb_class = y_param.nb_class, nb_param = y_param.nb_param;
float *prior_w = y_param.prior_w, *prior_h = y_param.prior_h, *prior_d = y_param.prior_d;
int cell_w = y_param.cell_w, cell_h = y_param.cell_h, cell_d = y_param.cell_d;
int strict_box_size_association = y_param.strict_box_size_association;
float coord_scale = y_param.scale_tab[0], size_scale = y_param.scale_tab[1];
float prob_scale = y_param.scale_tab[2], obj_scale = y_param.scale_tab[3];
float class_scale = y_param.scale_tab[4], param_scale = y_param.scale_tab[5];
float *lambda_noobj_prior = y_param.noobj_prob_prior;
float **sm_tab = y_param.slopes_and_maxes_tab;
float size_max_sat = expf(sm_tab[1][1]), size_min_sat = expf(sm_tab[1][2]);
float good_IoU_lim = y_param.IoU_limits[0];
float min_prob_IoU_lim = y_param.IoU_limits[1], min_obj_IoU_lim = y_param.IoU_limits[2];
float min_class_IoU_lim = y_param.IoU_limits[3], min_param_IoU_lim = y_param.IoU_limits[4];
float *param_ind_scale = y_param.param_ind_scale;
float *t_IoU_monitor = y_param.IoU_monitor;
int c_pix;
#pragma omp parallel for schedule(guided,4)
for(c_pix = 0; c_pix < size; c_pix++)
{
float *output, *target, *output_error, *IoU_monitor;
int i, j, k, l;
int c_batch, f_offset;
int nb_obj_target;
int resp_box = -1;
float max_IoU, current_IoU;
int cell_x, cell_y, cell_z;
int obj_cx, obj_cy, obj_cz;
float *box_in_pix, *c_box_in_pix;
float obj_in_offset[6];
int *box_locked;
float out_int[6], targ_int[6];
float targ_w, targ_h, targ_d;
int larger_box, smaller_box;
box_locked = (int*) malloc(nb_box*sizeof(int));
box_in_pix = (float*) malloc(nb_box*6*sizeof(float));
c_batch = c_pix / flat_output_size;
target = t_target + flat_target_size * c_batch;
f_offset = size;
i = c_pix % flat_output_size;
cell_z = i / (nb_area_w*nb_area_h);
cell_y = (int)(i % (nb_area_w*nb_area_h)) % nb_area_w;
cell_x = (int)(i % (nb_area_w*nb_area_h)) / nb_area_w;
output_error = i_output_error + (nb_area_w*nb_area_h*nb_area_d) * c_batch + cell_z*nb_area_w*nb_area_h + cell_y*nb_area_w + cell_x;
output = t_output + (nb_area_w*nb_area_h*nb_area_d) * c_batch + cell_z*nb_area_w*nb_area_h + cell_y*nb_area_w + cell_x;
IoU_monitor = t_IoU_monitor + 2 * nb_box * ((nb_area_w*nb_area_h*nb_area_d) * c_batch + cell_z*nb_area_w*nb_area_h + cell_y*nb_area_w + cell_x);
nb_obj_target = target[0];
target += 1;
for(k = 0; k < nb_box; k++)
{
box_locked[k] = 0;
c_box_in_pix = box_in_pix+k*6;
c_box_in_pix[0] = ((float)output[(k*(8+nb_class+nb_param)+0)*f_offset] + cell_x) * cell_w;
c_box_in_pix[1] = ((float)output[(k*(8+nb_class+nb_param)+1)*f_offset] + cell_y) * cell_h;
c_box_in_pix[2] = ((float)output[(k*(8+nb_class+nb_param)+2)*f_offset] + cell_z) * cell_d;
c_box_in_pix[3] = prior_w[k]*expf((float)output[(k*(8+nb_class+nb_param)+3)*f_offset]);
c_box_in_pix[4] = prior_h[k]*expf((float)output[(k*(8+nb_class+nb_param)+4)*f_offset]);
c_box_in_pix[5] = prior_d[k]*expf((float)output[(k*(8+nb_class+nb_param)+5)*f_offset]);
IoU_monitor[k*2] = 0.0f;
IoU_monitor[k*2+1] = -1.0f;
}
for(j = 0; j < nb_obj_target; j++)
{
if((int) target[j*(7+nb_param)] == 0)
break;
obj_cx = (int)( ((float)target[j*(7+nb_param)+4] + (float)target[j*(7+nb_param)+1])*0.5f / cell_w);
obj_cy = (int)( ((float)target[j*(7+nb_param)+5] + (float)target[j*(7+nb_param)+2])*0.5f / cell_h);
obj_cz = (int)( ((float)target[j*(7+nb_param)+6] + (float)target[j*(7+nb_param)+3])*0.5f / cell_d);
if(obj_cx == cell_x && obj_cy == cell_y && obj_cz == cell_z)
{
for(k = 0; k < 6; k++)
targ_int[k] = target[j*(7+nb_param)+1+k];
targ_w = targ_int[3] - targ_int[0];
targ_h = targ_int[4] - targ_int[1];
targ_d = targ_int[5] - targ_int[2];
resp_box = -1;
max_IoU = -1.0f;
for(k = 0; k < nb_box; k++)
{
larger_box = 0;
smaller_box = 0;
if(strict_box_size_association)
{
for(l = k; l < nb_box - 1; l++)
{
if(prior_w[l+1]*prior_h[l+1]*prior_d[l+1] > prior_w[k]*prior_h[k]*prior_d[k])
if(targ_w*targ_h*targ_d >= prior_w[l+1]*prior_h[l+1]*prior_d[l+1])
larger_box = 1;
}
for(l = k; l > 0; l--)
{
if(prior_w[l-1]*prior_h[l-1]*prior_d[l+1] < prior_w[k]*prior_h[k]*prior_d[k])
if(targ_w*targ_h*targ_d < prior_w[l-1]*prior_h[l-1]*prior_d[l-1])
smaller_box = 1;
}
}
if(box_locked[k] == 2 || larger_box || smaller_box)
continue;
c_box_in_pix = box_in_pix+k*6;
out_int[0] = c_box_in_pix[0] - 0.5f*c_box_in_pix[3];
out_int[1] = c_box_in_pix[1] - 0.5f*c_box_in_pix[4];
out_int[2] = c_box_in_pix[2] - 0.5f*c_box_in_pix[5];
out_int[3] = c_box_in_pix[0] + 0.5f*c_box_in_pix[3];
out_int[4] = c_box_in_pix[1] + 0.5f*c_box_in_pix[4];
out_int[5] = c_box_in_pix[2] + 0.5f*c_box_in_pix[5];
current_IoU = y_param.c_IoU_fct(out_int, targ_int);
if(current_IoU > max_IoU)
{
max_IoU = current_IoU;
resp_box = k;
}
if(current_IoU > good_IoU_lim) /*Avoid update of non best but still good match boxes*/
box_locked[k] = 1;
}
if(resp_box == -1 || box_locked[resp_box] == 2)
continue;
box_locked[resp_box] = 2;
IoU_monitor[resp_box*2] = 1.0f;
IoU_monitor[resp_box*2+1] = max_IoU*(float)output[(resp_box*(8+nb_class+nb_param)+6)*f_offset];
obj_in_offset[0] = ((targ_int[3] + targ_int[0])*0.5f - cell_x*cell_w)/(float)cell_w;
obj_in_offset[1] = ((targ_int[4] + targ_int[1])*0.5f - cell_y*cell_h)/(float)cell_h;
obj_in_offset[2] = ((targ_int[5] + targ_int[2])*0.5f - cell_z*cell_d)/(float)cell_d;
obj_in_offset[3] = (targ_w)/(float)prior_w[resp_box];
if(obj_in_offset[3] < size_min_sat)
obj_in_offset[3] = logf(size_min_sat);
else if(obj_in_offset[3] > size_max_sat)
obj_in_offset[3] = logf(size_max_sat);
else
obj_in_offset[3] = logf(obj_in_offset[3]);
obj_in_offset[4] = (targ_h)/(float)prior_h[resp_box];
if(obj_in_offset[4] < size_min_sat)
obj_in_offset[4] = logf(size_min_sat);
else if(obj_in_offset[4] > size_max_sat)
obj_in_offset[4] = logf(size_max_sat);
else
obj_in_offset[4] = logf(obj_in_offset[4]);
obj_in_offset[5] = (targ_d)/(float)prior_d[resp_box];
if(obj_in_offset[5] < size_min_sat)
obj_in_offset[5] = logf(size_min_sat);
else if(obj_in_offset[5] > size_max_sat)
obj_in_offset[5] = logf(size_max_sat);
else
obj_in_offset[5] = logf(obj_in_offset[5]);
/*Already compute error for the responsible box*/
for(k = 0; k < 3; k++)
output_error[(resp_box*(8+nb_class+nb_param)+k)*f_offset] =
0.5f*coord_scale*((float)output[(resp_box*(8+nb_class+nb_param)+k)*f_offset] - obj_in_offset[k])
*((float)output[(resp_box*(8+nb_class+nb_param)+k)*f_offset] - obj_in_offset[k]);
for(k = 0; k < 3; k++)
output_error[(resp_box*(8+nb_class+nb_param)+k+3)*f_offset] =
0.5f*size_scale*((float)output[(resp_box*(8+nb_class+nb_param)+k+3)*f_offset] - obj_in_offset[k+3])
*((float)output[(resp_box*(8+nb_class+nb_param)+k+3)*f_offset] - obj_in_offset[k+3]);
if(max_IoU > min_prob_IoU_lim)
output_error[(resp_box*(8+nb_class+nb_param)+6)*f_offset] =
0.5f*prob_scale*((float)output[(resp_box*(8+nb_class+nb_param)+6)*f_offset]-0.999f)
*((float)output[(resp_box*(8+nb_class+nb_param)+6)*f_offset]-0.999f);
else
output_error[(resp_box*(8+nb_class+nb_param)+6)*f_offset] = 0.0f;
if(max_IoU > min_obj_IoU_lim)
{
if(max_IoU > 0.999f)
max_IoU = 0.999f;
output_error[(resp_box*(8+nb_class+nb_param)+7)*f_offset] =
0.5f*obj_scale*((float)output[(resp_box*(8+nb_class+nb_param)+7)*f_offset]-(1.0+max_IoU)*0.5)
*((float)output[(resp_box*(8+nb_class+nb_param)+7)*f_offset]-(1.0+max_IoU)*0.5);
}
else
output_error[(resp_box*(8+nb_class+nb_param)+7)*f_offset] = 0.0f;
/*mean square error on classes => could be changed to soft max (change in activation needed as well)*/
if(max_IoU > min_class_IoU_lim)
{
for(k = 0; k < nb_class; k++)
{
if(k == (int)target[j*(7+nb_param)]-1)
output_error[(resp_box*(8+nb_class+nb_param)+8+k)*f_offset] = 0.5f*class_scale
*((float)output[(resp_box*(8+nb_class+nb_param)+8+k)*f_offset]-0.999f)
*((float)output[(resp_box*(8+nb_class+nb_param)+8+k)*f_offset]-0.999f);
else
output_error[(resp_box*(8+nb_class+nb_param)+8+k)*f_offset] = 0.5f*class_scale
*((float)output[(resp_box*(8+nb_class+nb_param)+8+k)*f_offset]-0.001f)
*((float)output[(resp_box*(8+nb_class+nb_param)+8+k)*f_offset]-0.001f);
}
}
else
{
for(k = 0; k < nb_class; k++)
output_error[(resp_box*(8+nb_class+nb_param)+8+k)*f_offset] = 0.0f;
}
/*linear error of additional parameters*/
if(max_IoU > min_param_IoU_lim)
{
for(k = 0; k < nb_param; k++)
output_error[(resp_box*(8+nb_class+nb_param)+8+nb_class+k)*f_offset] =
(param_ind_scale[k]*0.5f*param_scale*((float)output[(resp_box*(8+nb_class+nb_param)
+8+nb_class+k)*f_offset] - (float) target[j*(7+nb_param)+7+k])
*((float)output[(resp_box*(8+nb_class+nb_param)
+8+nb_class+k)*f_offset] - (float) target[j*(7+nb_param)+7+k]));
}
else
{
for(k = 0; k < nb_param; k++)
output_error[(resp_box*(8+nb_class+nb_param)+8+nb_class+k)*f_offset] = 0.0f;
}
}
}
for(j = 0; j < nb_box; j++)
{
/*If no match (means no IoU > 0.5) only update Objectness toward 0 */
/*(here it means error compute)! (no coordinate nor class update)*/
if(box_locked[j] != 2)
{
for(k = 0; k < 6; k++)
output_error[(j*(8+nb_class+nb_param)+k)*f_offset] = 0.0f;
if(box_locked[j] == 1)
{
output_error[(j*(8+nb_class+nb_param)+6)*f_offset] = 0.0f;
output_error[(j*(8+nb_class+nb_param)+7)*f_offset] = 0.0f;
}
else
{
output_error[(j*(8+nb_class+nb_param)+6)*f_offset] =
0.5f*(lambda_noobj_prior[j])*prob_scale*((float)output[(j*(8+nb_class+nb_param)+6)*f_offset]-0.001f)
*((float)output[(j*(8+nb_class+nb_param)+6)*f_offset]-0.001f);
output_error[(j*(8+nb_class+nb_param)+7)*f_offset] = 0.0f;
}
for(k = 0; k < nb_class; k++)
output_error[(j*(8+nb_class+nb_param)+8+k)*f_offset] = 0.0f;
for(k = 0; k < nb_param; k++)
output_error[(j*(8+nb_class+nb_param)+8+nb_class+k)*f_offset] = 0.0f;
}
}
free(box_in_pix);
free(box_locked);
}
}
void YOLO_activation(layer* current)
{
yolo_param *a_param = (yolo_param*)current->activ_param;
conv_param *c_param = (conv_param*)current->param;
YOLO_activation_fct(current->output, c_param->nb_area[0] * c_param->nb_area[1] * c_param->nb_area[2]
* current->c_network->batch_size, a_param->biased_dim*current->c_network->length, *a_param, a_param->size);
}
void YOLO_deriv(layer *previous)
{
printf("Error : YOLO activation can not be used in the middle of the network !\n");
exit(EXIT_FAILURE);
}
void YOLO_deriv_output_error(layer* current)
{
yolo_param *a_param = (yolo_param*)current->activ_param;
conv_param *c_param = (conv_param*)current->param;
YOLO_deriv_error_fct(current->delta_o, current->output, current->c_network->target, current->c_network->output_dim,
c_param->nb_area[0] * c_param->nb_area[1] * c_param->nb_area[2], c_param->nb_area[0], c_param->nb_area[1], c_param->nb_area[2],
*a_param, c_param->nb_area[0] * c_param->nb_area[1] * c_param->nb_area[2] * current->c_network->batch_size);
}
void YOLO_output_error(layer* current)
{
yolo_param *a_param = (yolo_param*)current->activ_param;
conv_param *c_param = (conv_param*)current->param;
YOLO_error_fct((float*)current->c_network->output_error, current->output, current->c_network->target, current->c_network->output_dim,
c_param->nb_area[0] * c_param->nb_area[1] * c_param->nb_area[2], c_param->nb_area[0], c_param->nb_area[1], c_param->nb_area[2],
*a_param, c_param->nb_area[0] * c_param->nb_area[1] * c_param->nb_area[2] * current->c_network->batch_size);
}
//#####################################################
|
edist.c | // Licensed under a 3-clause BSD style license - see LICENSE
#include "common.h"
#include "math.h"
#include "gsl/gsl_math.h"
#include "gsl/gsl_deriv.h"
#include "stdio.h"
double P_THRESH=1e-4;
/*****************************************************************************/
// Plain electron distributions no normalization
// params : p, gamma_min, gamma_max
double powerlaw(double gamma, void *params){
double *p = (double*) params;
return pow(gamma, -p[0]);
}
// params : p, gamma_min, gamma_max
double powerlawexpcutoff(double gamma, void *params){
double *p = (double*) params;
return powerlaw(gamma, params) * exp(-gamma/p[2]);
}
// params : theta
double thermal(double gamma, void *params){
double *p = (double*) params;
return gamma*sqrt(gamma*gamma-1)*exp(-gamma/p[0]);
}
// params : kappa, kappa_width
double kappa(double gamma, void *params){
double *p = (double*) params;
return gamma*sqrt(gamma*gamma-1)*pow(1+(gamma-1)/(p[0]*p[1]),-p[0]-1);
}
//params : p1, p2, gamma_b, gamma_min, gamma_max
double bknpowerlaw(double gamma, void *params){
double *p = (double*) params;
double factor = pow(p[2], p[1]-p[0]);
return (gamma < p[2]) ? pow(gamma, -p[0]) : factor*pow(gamma, -p[1]);
}
//params : p1, p2, gamma_b, gamma_min, gamma_max
double bknpowerlawexpcutoff(double gamma, void *params){
double *p = (double*) params;
return bknpowerlaw(gamma, params) * exp(-gamma/p[4]);
}
/*****************************************************************************/
// Electron distribution normalizations
double powerlaw_norm(void* params){
double *p = (double*) params;
return (P_THRESH > fabs(p[0] - 1)) ? (log(p[2])-log(p[1])) : (pow(p[1], -p[0]+1)-pow(p[2], -p[0]+1))/(p[0]-1);
}
double bknpowerlaw_norm(void* params){
double *p = (double*) params;
double factor = pow(p[2], p[1]-p[0]);
double norm1 = (P_THRESH > fabs(p[0] - 1)) ? (log(p[2])-log(p[3])) : (pow(p[3], -p[0]+1)-pow(p[2], -p[0]+1))/(p[0]-1);
double norm2 = (P_THRESH > fabs(p[1] - 1)) ? factor*(log(p[4])-log(p[2])) : factor*(pow(p[2], -p[1]+1)-pow(p[4], -p[1]+1))/(p[1]-1);
return norm1+norm2;
}
double thermal_norm(void* params){
double *p = (double*) params;
return (p[0]*BESSELK(2, 1/p[0]));
}
// Pandya+ 2016, ApJ 822:34 / Sec. 3.3
double kappa_norm(void* params){
double *p = (double*) params;
double norm_low, norm_high;
norm_high = (p[0]-2)*(p[0]-1)/(2* (p[0]*p[0]) * (p[1]*p[1]*p[1]));
norm_low = pow(2/(M_PI* (p[0]*p[0]*p[0]) * (p[1]*p[1]*p[1])), 0.5);
norm_low = norm_low * GAMMAF(p[0] + 1);
norm_low = norm_low / GAMMAF(p[0] - 0.5);
return pow(pow(norm_low, -0.7) + pow(norm_high, -0.7), 1/0.7);
}
/*****************************************************************************/
// Commmon interface for electron distributions - no normalization
double eDist_s(double gamma, Source* source_t){
return source_t->d_func(gamma, (void*) source_t->params);
}
double deDistdgam_s(double gamma, Source* source_t){
gsl_function F;
F.function = source_t->d_func;
F.params = (void*) source_t->params;
double err;
double res;
double h = 1e-8;
gsl_deriv_central(&F, gamma, h, &res, &err);
return res;
}
/*****************************************************************************/
// Electron distributions from gamma array. Full math.
int deDistdgam(double *res, int sz, double *gamma, Source* source_t){
double norm = source_t->n_func((void*) source_t->params);
int i=0;
#pragma omp parallel for
for (i=0; i<sz; i++){
res[i] = source_t->ne*deDistdgam_s(gamma[i], source_t)/norm;
}
return 0;
}
int eDist(double *res, int sz, double *gamma, Source* source_t){
double norm = source_t->n_func((void*) source_t->params);
int i=0;
#pragma omp parallel for
for (i=0; i<sz; i++){
res[i] = source_t->ne*eDist_s(gamma[i], source_t)/norm;
}
return 0;
}
/*****************************************************************************/
|
test.c | #include <stdio.h>
#include <omp.h>
int a, b, i, tid;
float x;
// #pragma omp threadprivate(a, x)
int main()
{
/* 显式关闭动态线程 Explicitly turn off dynamic threads */
omp_set_dynamic(0);
printf("1st Parallel Region:\n");
#pragma omp parallel private(b,tid)
{
tid = omp_get_thread_num();
a = tid;
b = tid;
x = 1.1 * tid + 1.0;
printf("Thread %d: a,b,x= %d %d %f\n", tid, a, b, x);
} /* end of parallel region */
printf("************************************\n");
printf("Master thread doing serial work here\n");
printf("************************************\n");
printf("2nd Parallel Region:\n");
#pragma omp parallel private(tid)
{
tid = omp_get_thread_num();
printf("Thread %d: a,b,x= %d %d %f\n", tid, a, b, x);
} /* end of parallel region */
return 0;
} |
tree.h | #ifndef LIGHTGBM_TREE_H_
#define LIGHTGBM_TREE_H_
#include <LightGBM/meta.h>
#include <LightGBM/utils/common.h>
#include <string>
#include <vector>
#include <memory>
#include <cmath>
#include <map>
#include <unordered_map>
namespace LightGBM {
#define kCategoricalMask (1)
#define kDefaultLeftMask (2)
/*!
* \brief Tree model
*/
class Tree {
public:
/*!
* \brief Constructor
* \param max_leaves The number of max leaves
*/
explicit Tree(int max_leaves);
/*!
* \brief Construtor, from a string
* \param str Model string
* \param used_len used count of str
*/
Tree(const char* str, size_t* used_len);
~Tree();
/*! \brief Get the output of one leaf */
inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; }
/*! \brief Set the output of one leaf */
inline void SetLeafOutput(int leaf, double output) {
leaf_value_[leaf] = output;
}
/*!
* \brief Prediction on one record
* \param feature_values Feature value of this record
* \return Prediction result
*/
inline double Predict(const double* feature_values) const;
inline double PredictByMap(const std::unordered_map<int, double>& feature_values) const;
inline int PredictLeafIndex(const double* feature_values) const;
inline int PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const;
inline void PredictContrib(const double* feature_values, int num_features, double* output);
/*! \brief Get Number of leaves*/
inline int num_leaves() const { return num_leaves_; }
/*! \brief Get depth of specific leaf*/
inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; }
/*! \brief Get feature of specific split*/
inline int split_feature(int split_idx) const { return split_feature_[split_idx]; }
inline double split_gain(int split_idx) const { return split_gain_[split_idx]; }
/*! \brief Get the number of data points that fall at or below this node*/
inline int data_count(int node) const { return node >= 0 ? internal_count_[node] : leaf_count_[~node]; }
/*!
* \brief Shrinkage for the tree's output
* shrinkage rate (a.k.a learning rate) is used to tune the traning process
* \param rate The factor of shrinkage
*/
inline void Shrinkage(double rate) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_; ++i) {
leaf_value_[i] *= rate;
}
shrinkage_ *= rate;
}
inline double shrinkage() const {
return shrinkage_;
}
inline void AddBias(double val) {
#pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048)
for (int i = 0; i < num_leaves_; ++i) {
leaf_value_[i] = val + leaf_value_[i];
}
// force to 1.0
shrinkage_ = 1.0f;
}
inline void AsConstantTree(double val) {
num_leaves_ = 1;
shrinkage_ = 1.0f;
leaf_value_[0] = val;
}
/*! \brief Serialize this object to string*/
std::string ToString() const;
/*! \brief Serialize this object to json*/
std::string ToJSON() const;
/*! \brief Serialize this object to if-else statement*/
std::string ToIfElse(int index, bool predict_leaf_index) const;
inline static bool IsZero(double fval) {
if (fval > -kZeroThreshold && fval <= kZeroThreshold) {
return true;
} else {
return false;
}
}
inline static bool GetDecisionType(int8_t decision_type, int8_t mask) {
return (decision_type & mask) > 0;
}
inline static void SetDecisionType(int8_t* decision_type, bool input, int8_t mask) {
if (input) {
(*decision_type) |= mask;
} else {
(*decision_type) &= (127 - mask);
}
}
inline static int8_t GetMissingType(int8_t decision_type) {
return (decision_type >> 2) & 3;
}
inline static void SetMissingType(int8_t* decision_type, int8_t input) {
(*decision_type) &= 3;
(*decision_type) |= (input << 2);
}
void RecomputeMaxDepth();
private:
std::string NumericalDecisionIfElse(int node) const;
std::string CategoricalDecisionIfElse(int node) const;
inline int NumericalDecision(double fval, int node) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
if (std::isnan(fval)) {
if (missing_type != 2) {
fval = 0.0f;
}
}
if ((missing_type == 1 && IsZero(fval))
|| (missing_type == 2 && std::isnan(fval))) {
if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) {
return left_child_[node];
} else {
return right_child_[node];
}
}
if (fval <= threshold_[node]) {
return left_child_[node];
} else {
return right_child_[node];
}
}
inline int NumericalDecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
if ((missing_type == 1 && fval == default_bin)
|| (missing_type == 2 && fval == max_bin)) {
if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) {
return left_child_[node];
} else {
return right_child_[node];
}
}
if (fval <= threshold_in_bin_[node]) {
return left_child_[node];
} else {
return right_child_[node];
}
}
inline int CategoricalDecision(double fval, int node) const {
uint8_t missing_type = GetMissingType(decision_type_[node]);
int int_fval = static_cast<int>(fval);
if (int_fval < 0) {
return right_child_[node];;
} else if (std::isnan(fval)) {
// NaN is always in the right
if (missing_type == 2) {
return right_child_[node];
}
int_fval = 0;
}
int cat_idx = int(threshold_[node]);
if (Common::FindInBitset(cat_threshold_.data() + cat_boundaries_[cat_idx],
cat_boundaries_[cat_idx + 1] - cat_boundaries_[cat_idx], int_fval)) {
return left_child_[node];
}
return right_child_[node];
}
inline int CategoricalDecisionInner(uint32_t fval, int node) const {
int cat_idx = int(threshold_in_bin_[node]);
if (Common::FindInBitset(cat_threshold_inner_.data() + cat_boundaries_inner_[cat_idx],
cat_boundaries_inner_[cat_idx + 1] - cat_boundaries_inner_[cat_idx], fval)) {
return left_child_[node];
}
return right_child_[node];
}
inline int Decision(double fval, int node) const {
if (GetDecisionType(decision_type_[node], kCategoricalMask)) {
return CategoricalDecision(fval, node);
} else {
return NumericalDecision(fval, node);
}
}
inline int DecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const {
if (GetDecisionType(decision_type_[node], kCategoricalMask)) {
return CategoricalDecisionInner(fval, node);
} else {
return NumericalDecisionInner(fval, node, default_bin, max_bin);
}
}
inline void Split(int leaf, int feature, int real_feature,
double left_value, double right_value, int left_cnt, int right_cnt, float gain);
/*!
* \brief Find leaf index of which record belongs by features
* \param feature_values Feature value of this record
* \return Leaf index
*/
inline int GetLeaf(const double* feature_values) const;
inline int GetLeafByMap(const std::unordered_map<int, double>& feature_values) const;
/*! \brief Serialize one node to json*/
std::string NodeToJSON(int index) const;
/*! \brief Serialize one node to if-else statement*/
std::string NodeToIfElse(int index, bool predict_leaf_index) const;
std::string NodeToIfElseByMap(int index, bool predict_leaf_index) const;
double ExpectedValue() const;
/*! \brief This is used fill in leaf_depth_ after reloading a model*/
inline void RecomputeLeafDepths(int node = 0, int depth = 0);
/*!
* \brief Used by TreeSHAP for data we keep about our decision path
*/
struct PathElement {
int feature_index;
double zero_fraction;
double one_fraction;
// note that pweight is included for convenience and is not tied with the other attributes,
// the pweight of the i'th path element is the permuation weight of paths with i-1 ones in them
double pweight;
PathElement() {}
PathElement(int i, double z, double o, double w) : feature_index(i), zero_fraction(z), one_fraction(o), pweight(w) {}
};
/*! \brief Polynomial time algorithm for SHAP values (https://arxiv.org/abs/1706.06060) */
void TreeSHAP(const double *feature_values, double *phi,
int node, int unique_depth,
PathElement *parent_unique_path, double parent_zero_fraction,
double parent_one_fraction, int parent_feature_index) const;
/*! \brief Extend our decision path with a fraction of one and zero extensions for TreeSHAP*/
static void ExtendPath(PathElement *unique_path, int unique_depth,
double zero_fraction, double one_fraction, int feature_index);
/*! \brief Undo a previous extension of the decision path for TreeSHAP*/
static void UnwindPath(PathElement *unique_path, int unique_depth, int path_index);
/*! determine what the total permuation weight would be if we unwound a previous extension in the decision path*/
static double UnwoundPathSum(const PathElement *unique_path, int unique_depth, int path_index);
/*! \brief Number of max leaves*/
int max_leaves_;
/*! \brief Number of current levas*/
int num_leaves_;
// following values used for non-leaf node
/*! \brief A non-leaf node's left child */
std::vector<int> left_child_;
/*! \brief A non-leaf node's right child */
std::vector<int> right_child_;
/*! \brief A non-leaf node's split feature */
std::vector<int> split_feature_inner_;
/*! \brief A non-leaf node's split feature, the original index */
std::vector<int> split_feature_;
/*! \brief A non-leaf node's split threshold in bin */
std::vector<uint32_t> threshold_in_bin_;
/*! \brief A non-leaf node's split threshold in feature value */
std::vector<double> threshold_;
int num_cat_;
std::vector<int> cat_boundaries_inner_;
std::vector<uint32_t> cat_threshold_inner_;
std::vector<int> cat_boundaries_;
std::vector<uint32_t> cat_threshold_;
/*! \brief Store the information for categorical feature handle and mising value handle. */
std::vector<int8_t> decision_type_;
/*! \brief A non-leaf node's split gain */
std::vector<float> split_gain_;
// used for leaf node
/*! \brief The parent of leaf */
std::vector<int> leaf_parent_;
/*! \brief Output of leaves */
std::vector<double> leaf_value_;
/*! \brief DataCount of leaves */
std::vector<int> leaf_count_;
/*! \brief Output of non-leaf nodes */
std::vector<double> internal_value_;
/*! \brief DataCount of non-leaf nodes */
std::vector<int> internal_count_;
/*! \brief Depth for leaves */
std::vector<int> leaf_depth_;
double shrinkage_;
int max_depth_;
};
inline void Tree::Split(int leaf, int feature, int real_feature,
double left_value, double right_value, int left_cnt, int right_cnt, float gain) {
int new_node_idx = num_leaves_ - 1;
// update parent info
int parent = leaf_parent_[leaf];
if (parent >= 0) {
// if cur node is left child
if (left_child_[parent] == ~leaf) {
left_child_[parent] = new_node_idx;
} else {
right_child_[parent] = new_node_idx;
}
}
// add new node
split_feature_inner_[new_node_idx] = feature;
split_feature_[new_node_idx] = real_feature;
split_gain_[new_node_idx] = Common::AvoidInf(gain);
// add two new leaves
left_child_[new_node_idx] = ~leaf;
right_child_[new_node_idx] = ~num_leaves_;
// update new leaves
leaf_parent_[leaf] = new_node_idx;
leaf_parent_[num_leaves_] = new_node_idx;
// save current leaf value to internal node before change
internal_value_[new_node_idx] = leaf_value_[leaf];
internal_count_[new_node_idx] = left_cnt + right_cnt;
leaf_value_[leaf] = std::isnan(left_value) ? 0.0f : left_value;
leaf_count_[leaf] = left_cnt;
leaf_value_[num_leaves_] = std::isnan(right_value) ? 0.0f : right_value;
leaf_count_[num_leaves_] = right_cnt;
// update leaf depth
leaf_depth_[num_leaves_] = leaf_depth_[leaf] + 1;
leaf_depth_[leaf]++;
}
inline double Tree::Predict(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return LeafOutput(leaf);
} else {
return leaf_value_[0];
}
}
inline double Tree::PredictByMap(const std::unordered_map<int, double>& feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeafByMap(feature_values);
return LeafOutput(leaf);
} else {
return leaf_value_[0];
}
}
inline int Tree::PredictLeafIndex(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return leaf;
} else {
return 0;
}
}
inline int Tree::PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeafByMap(feature_values);
return leaf;
} else {
return 0;
}
}
inline void Tree::PredictContrib(const double* feature_values, int num_features, double* output) {
output[num_features] += ExpectedValue();
// Run the recursion with preallocated space for the unique path data
if (num_leaves_ > 1) {
CHECK(max_depth_ >= 0);
const int max_path_len = max_depth_ + 1;
std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2);
TreeSHAP(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1);
}
}
inline void Tree::RecomputeLeafDepths(int node, int depth) {
if (node == 0) leaf_depth_.resize(num_leaves());
if (node < 0) {
leaf_depth_[~node] = depth;
} else {
RecomputeLeafDepths(left_child_[node], depth + 1);
RecomputeLeafDepths(right_child_[node], depth + 1);
}
}
inline int Tree::GetLeaf(const double* feature_values) const {
int node = 0;
if (num_cat_ > 0) {
while (node >= 0) {
node = Decision(feature_values[split_feature_[node]], node);
}
} else {
while (node >= 0) {
node = NumericalDecision(feature_values[split_feature_[node]], node);
}
}
return ~node;
}
inline int Tree::GetLeafByMap(const std::unordered_map<int, double>& feature_values) const {
int node = 0;
if (num_cat_ > 0) {
while (node >= 0) {
node = Decision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
} else {
while (node >= 0) {
node = NumericalDecision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node);
}
}
return ~node;
}
} // namespace LightGBM
#endif // LightGBM_TREE_H_
|
vms_fmt_plug.c | /*
* This file is part of John the Ripper password cracker.
*
* It comes from OpenVMS support 2.4(jtr_vms_2-4.zip) patch
* posted by David Jones.
*
* Converted to OpenVMS format module by David Jones
*
* Copyright (c) 2011 by David L. Jones <jonesd/at/columbus.rr.com>,
* Copyright (c) 2012 by Dhiru Kholia <dhiru/at/openwall.com> and
* is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modifications, are permitted. */
#if !AC_BUILT
#if __GNUC__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define ARCH_LITTLE_ENDIAN 1
#endif
#endif
#if FMT_EXTERNS_H
#if ARCH_LITTLE_ENDIAN
extern struct fmt_main fmt_VMS;
#endif
#elif FMT_REGISTERS_H
#if ARCH_LITTLE_ENDIAN
john_register_one(&fmt_VMS);
#endif
#else
#include <stdio.h>
#include <string.h>
#include "arch.h"
#include "misc.h"
#include "vms_std.h"
#include "common.h"
#include "formats.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1024 // Tuned on K8-Dual HT
#endif
#endif
#ifdef VMS
#include <ssdef.h>
#define UAIsM_PWDMIX UAI$M_PWDMIX
#else
/*
* Emulate symbols defined for VMS services.
*/
#define UAIsM_PWDMIX 0x2000000
#endif
#include "memdbg.h"
#define FORMAT_LABEL "OpenVMS"
#define FORMAT_NAME "Purdy"
#define FORMAT_TAG "$V$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define FORMAT_NAME_NOPWDMIX "Purdy (nopwdmix)"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 32
#define CIPHERTEXT_LENGTH UAF_ENCODE_SIZE
#define BINARY_SIZE 8
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct uaf_hash_info)
#define SALT_ALIGN 4
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
/*
* The following two test vectors: "USER" and "service" are case-insensitive
*/
{"$V$9AYXUd5LfDy-aj48Vj54P-----", "USER"},
{"$V$p1UQjRZKulr-Z25g5lJ-------", "service"},
/*
* The following one test vector: "President#44" is case-sensitive
*/
{"$V$S44zI913bBx-UJrcFSC------D", "President#44"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uaf_qword (*crypt_out)[BINARY_SIZE / sizeof(uaf_qword)];
static int initialized;
/*
* See if signature of ciphertext (from passwd file) matches the hack
* produced by the uaf_encode routine (starts with $V$)
*/
static int valid(char *ciphertext, struct fmt_main *self )
{
struct uaf_hash_info pwd;
if (!initialized) {
uaf_init();
initialized = 1;
}
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0; /* no match */
if ( strlen ( ciphertext ) < (UAF_ENCODE_SIZE-1) )
return 0;
if (!uaf_hash_decode(ciphertext, &pwd))
return 0;
#ifdef VMS_DEBUG
fprintf(stderr, "/VMS_STD/ get_salt decoded '%s' to %x/%x-%x-%x-%x-%x"
" %ld\n", ciphertext, pwd.salt, pwd.alg, pwd.username.r40[0],
pwd.username.r40[1], pwd.username.r40[2], pwd.username.r40[3],
pwd.flags);
#endif
if (pwd.alg < 1 || pwd.alg > 3)
return 0;
return 1;
}
static void fmt_vms_init ( struct fmt_main *self )
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
/* Init bin 2 hex table for faster conversions later */
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
if (!initialized) {
uaf_init();
initialized = 1;
}
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
/*
* Save a password (key) for testing. VMS_std_set_key returns position value
* we can use if needed to recall the key by a fmt->get_key request. On get_key
* return a private copy.
*/
static void set_key(char *key, int index)
{
strcpy(saved_key[index], key);
}
static char *get_key(int index)
{
return saved_key[index];
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
/*
* Save salt for producing ciphertext from it and saved keys at next crypt call.
*/
static struct uaf_hash_info *cur_salt;
void VMS_std_set_salt ( void *salt )
{
cur_salt = (struct uaf_hash_info*)salt;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
/*
* Hash the password and salt saved with VMS_std_set_key and VMS_std_set_salt,
* saving the result in global storage for retrieval by vms_fmt.c module.
*/
int VMS_std_crypt(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
uaf_test_password (cur_salt, saved_key[index], 0, crypt_out[index]);
}
return count;
}
/*
* Extract salt from ciphertext string to static storage and return
* pointer to it. Salt is effectively 70-80 bits (username, salt,
* algorithm, pwdmix flag).
*/
char *VMS_std_get_salt(char *ciphertext)
{
static struct uaf_hash_info pwd;
memset(&pwd, 0, sizeof(pwd));
uaf_hash_decode ( ciphertext, &pwd );
#ifdef VMS_DEBUG
printf("/VMS_STD/ get_salt decoded '%s' to %x/%x-%x-%x-%x-%x %ld\n",
ciphertext, pwd.salt, pwd.alg, pwd.username.r40[0], pwd.username.r40[1],
pwd.username.r40[2], pwd.username.r40[3], pwd.flags );
#endif
return (char *) &pwd;
}
/*
* Extract binary hash from ciphertext into static storage and return
* pointer to it.
*/
VMS_word *VMS_std_get_binary(char *ciphertext)
{
static union {
struct uaf_hash_info pwd;
VMS_word b[16];
} out;
uaf_hash_decode ( ciphertext, &out.pwd );
return out.b;
}
/*
* Class record.
*/
struct fmt_main fmt_VMS = {
{
FORMAT_LABEL, /* .label */
FORMAT_NAME, /* .format_name */
VMS_ALGORITHM_NAME, /* .algorithm_name */
BENCHMARK_COMMENT, /* .benchmark_comment */
BENCHMARK_LENGTH, /* .benchmark_length (pwd break len) */
0,
PLAINTEXT_LENGTH, /* .plaintext_length (max) */
BINARY_SIZE, /* .binary_size (quadword) */
BINARY_ALIGN,
SALT_SIZE, /* .salt_size (word) */
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
/*
* This format supports both case-sensitive and case-insensitive passwords,
* so this format should set FMT_CASE
*/
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
}, {
fmt_vms_init, /* changed for jumbo */
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
(void *(*)(char *))VMS_std_get_binary,
(void *(*)(char *))VMS_std_get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
(void (*)(void *))VMS_std_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
VMS_std_crypt,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
siemens-s7_fmt_plug.c | /* Siemens S7 authentication protocol cracker. Written by Narendra Kangralkar
* <narendrakangralkar at gmail.com> and Dhiru Kholia <dhiru at openwall.com>.
*
* This software is Copyright (c) 2013, Dhiru Kholia <dhiru.kholia at gmail.com>
* and Narendra Kangralkar <narendrakangralkar at gmail.com> and it is hereby
* released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_s7;
#elif FMT_REGISTERS_H
john_register_one(&fmt_s7);
#else
#include "sha.h"
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "Siemens-S7"
#define FORMAT_NAME ""
#define FORMAT_TAG "$siemens-s7$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "HMAC-SHA1 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 125
#define CIPHERTEXT_LENGTH (1 + 10 + 1 + 1 + 1 + 40 + 1 + 40)
#define BINARY_SIZE 20
#define SALT_SIZE 20
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 8
static struct fmt_tests s7_tests[] = {
{"$siemens-s7$1$599fe00cdb61f76cc6e949162f22c95943468acb$002e45951f62602b2f5d15df217f49da2f5379cb", "123"},
{"$siemens-s7$0$387c1fe4ce97e0e71f5a93b4a9557a947cd40d6c$d7789feee651559a09e2f2d92b57306d2835e209", "321"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static int new_keys;
static SHA_CTX *ipad_ctx;
static SHA_CTX *opad_ctx;
unsigned char *challenge;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
ipad_ctx = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*ipad_ctx));
opad_ctx = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*opad_ctx));
}
static void done(void)
{
MEM_FREE(opad_ctx);
MEM_FREE(ipad_ctx);
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
char *ctcopy;
char *keeptr;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
if (strnlen(ciphertext, CIPHERTEXT_LENGTH + 1) != CIPHERTEXT_LENGTH)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN; /* skip over "$siemens-s7$" */
if ((p = strtokm(ctcopy, "$")) == NULL) /* outcome, currently unused */
goto bail;
if (strlen(p) != 1 || (*p != '1' && *p != '0')) /* outcome must be '1' or '0' */
goto bail;
if ((p = strtokm(NULL, "$")) == NULL) /* challenge */
goto bail;
if (strlen(p) != 40 || !ishexlc(p)) /* must be hex string and lower cases*/
goto bail;
if ((p = strtokm(NULL, "$")) == NULL) /* Fix bug: #1090 */
goto bail;
if (strlen(p) != 40 || !ishexlc(p))
goto bail;
MEM_FREE(keeptr);
return 1;
bail:
MEM_FREE(keeptr);
return 0;
}
/*
* Hash versions '0' and '1' were exactly the same.
* Version '0' is still supported for backwards compatibility,
* but version '1' is used as the canonical hash representation
*/
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[CIPHERTEXT_LENGTH+1];
strnzcpy(out, ciphertext, CIPHERTEXT_LENGTH+1);
if ( out[FORMAT_TAG_LEN] == '0')
out[FORMAT_TAG_LEN] = '1';
return out;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
int i;
static unsigned char lchallenge[20];
ctcopy += FORMAT_TAG_LEN; /* skip over "$siemens-s7$" */
p = strtokm(ctcopy, "$");
p = strtokm(NULL, "$");
for (i = 0; i < 20; i++)
lchallenge[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)lchallenge;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '$') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
#define COMMON_GET_HASH_VAR crypt_out
#include "common-get-hash.h"
static void set_salt(void *salt)
{
challenge = (unsigned char*)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (index = 0; index < count; index++)
#endif
{
unsigned char buf[20];
SHA_CTX ctx;
if (new_keys) {
unsigned char pad[20];
int i;
SHA1_Init(&ctx);
SHA1_Update(&ctx, saved_key[index], strlen(saved_key[index]));
SHA1_Final(buf, &ctx);
for (i = 0; i < 20; ++i) {
pad[i] = buf[i] ^ 0x36;
}
SHA1_Init(&ipad_ctx[index]);
SHA1_Update(&ipad_ctx[index], pad, 20);
SHA1_Update(&ipad_ctx[index], "\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36", 44);
for (i = 0; i < 20; ++i) {
pad[i] = buf[i] ^ 0x5C;
}
SHA1_Init(&opad_ctx[index]);
SHA1_Update(&opad_ctx[index], pad, 20);
SHA1_Update(&opad_ctx[index], "\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C", 44);
}
memcpy(&ctx, &ipad_ctx[index], sizeof(ctx));
SHA1_Update(&ctx, challenge, 20);
SHA1_Final(buf, &ctx);
memcpy(&ctx, &opad_ctx[index], sizeof(ctx));
SHA1_Update(&ctx, buf, 20);
SHA1_Final((unsigned char*)(crypt_out[index]), &ctx);
}
new_keys = 0;
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (; index < count; index++)
#endif
if (*(uint32_t*)binary == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void s7_set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, sizeof(*saved_key));
new_keys = 1;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int salt_hash(void *salt)
{
unsigned char *s = salt;
unsigned int hash = 5381;
unsigned int len = SALT_SIZE;
while (len--)
hash = ((hash << 5) + hash) ^ *s++;
return hash & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_s7 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
s7_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
s7_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
parallel-3.c | // { dg-do compile }
extern int printf (const char *, ...);
int main (void)
{
double d = 6;
int i = 1;
#pragma omp parallel shared(d) private(i) num_threads (4 + i)
{
i = 4;
printf ("%s %d %g\n", "Hello, World!", i, d);
}
return 0;
}
|
mat_mul_p4a.c | /*
* file for mat_mul_dyn.c
*/
#include <stdio.h>
#include <stdlib.h>
#include "./mat_mul.h"
void mat_mul(int n, int **a, int **b, int **c);
int compute_cijk(int i, int j, int k, int **a, int **b);
void mat_mul_function_calls(int n, int **a, int **b, int **c);
void compute_cijk2(int i, int j, int k, int **a, int **b, int **c);
void mat_mul_function_calls2(int n, int **a, int **b, int **c);
void compute_cij(int i, int j, int n, int **a, int **b, int **c);
void mat_mul_function_calls3(int n, int **a, int **b, int **c);
void compute_ci(int i, int n, int **a, int **b, int **c);
void mat_mul_function_calls4(int n, int **a, int **b, int **c);
void mat_mul_loop_unroll(int n, int **a, int **b, int **c);
void mat_mul_loop_unroll2(int n, int **a, int **b, int **c);
void mat_mul_loop_unroll3(int n, int **a, int **b, int **c);
void mat_mul_arr_priv(int n, int **a, int **b, int **c);
void mat_mul_arr_priv2(int n, int **a, int **b, int **c);
void mat_mul_pointer_alias(int n, int **a, int **b, int **c);
void mat_mul_pointer_alias2(int n, int **a, int **b, int **c);
void mat_mul(int n, int **a, int **b, int **c)
{
int i, j, k;
#pragma omp parallel for private(j, k)
for(i = 0; i <= n-1; i += 1)
for(j = 0; j <= n-1; j += 1) {
c[i][j] = 0;
for(k = 0; k <= n-1; k += 1)
c[i][j] += a[i][k]*b[k][j];
}
return;
}
int compute_cijk(int i, int j, int k, int **a, int **b)
{
return a[i][k]*b[k][j];
}
void mat_mul_function_calls(int n, int **a, int **b, int **c)
{
int i, j, k;
#pragma omp parallel for private(j, k)
for(i = 0; i <= n-1; i += 1)
for(j = 0; j <= n-1; j += 1) {
c[i][j] = 0;
for(k = 0; k <= n-1; k += 1)
c[i][j] += compute_cijk(i, j, k, a, b);
}
return;
}
void compute_cijk2(int i, int j, int k, int **a, int **b, int **c)
{
c[i][j] += a[i][k]*b[k][j];
}
void mat_mul_function_calls2(int n, int **a, int **b, int **c)
{
int i, j, k;
#pragma omp parallel for private(j, k)
for(i = 0; i <= n-1; i += 1)
for(j = 0; j <= n-1; j += 1) {
c[i][j] = 0;
for(k = 0; k <= n-1; k += 1)
compute_cijk2(i, j, k, a, b, c);
}
return;
}
void compute_cij(int i, int j, int n, int **a, int **b, int **c)
{
int k;
c[i][j] = 0;
for(k = 0; k <= n-1; k += 1)
c[i][j] += a[i][k]*b[k][j];
}
void mat_mul_function_calls3(int n, int **a, int **b, int **c)
{
int i, j;
#pragma omp parallel for private(j)
for(i = 0; i <= n-1; i += 1)
for(j = 0; j <= n-1; j += 1)
compute_cij(i, j, n, a, b, c);
return;
}
void compute_ci(int i, int n, int **a, int **b, int **c)
{
int j, k;
#pragma omp parallel for private(k)
for(j = 0; j <= n-1; j += 1) {
c[i][j] = 0;
for(k = 0; k <= n-1; k += 1)
c[i][j] += a[i][k]*b[k][j];
}
}
void mat_mul_function_calls4(int n, int **a, int **b, int **c)
{
int i;
#pragma omp parallel for
for(i = 0; i <= n-1; i += 1)
compute_ci(i, n, a, b, c);
return;
}
void mat_mul_loop_unroll(int n, int **a, int **b, int **c)
{
int i, j, k;
#pragma omp parallel for private(j, k)
for(i = 0; i <= n-1; i += 1)
for(j = 0; j <= n-1; j += 1) {
c[i][j] = 0;
for(k = 0; k <= n-1-1; k += 2)
// we asume that n is even
c[i][j] += a[i][k]*b[k][j]+a[i][k+1]*b[k+1][j];
}
return;
}
void mat_mul_loop_unroll2(int n, int **a, int **b, int **c)
{
int i, j, k;
#pragma omp parallel for private(j, k)
for(i = 0; i <= n-1; i += 1)
for(j = 0; j <= n-1-1; j += 2) {
// we asume that n is even
c[i][j] = 0;
c[i][j+1] = 0;
for(k = 0; k <= n-1-1; k += 2) {
c[i][j] += a[i][k]*b[k][j]+a[i][k+1]*b[k+1][j];
c[i][j+1] += a[i][k]*b[k][j+1]+a[i][k+1]*b[k+1][j+1];
}
}
return;
}
void mat_mul_loop_unroll3(int n, int **a, int **b, int **c)
{
int i, j, k;
#pragma omp parallel for private(j, k)
for(i = 0; i <= n-1-1; i += 2)
// we asume that n is even
for(j = 0; j <= n-1-1; j += 2) {
c[i][j] = 0;
c[i][j+1] = 0;
c[i+1][j] = 0;
c[i+1][j+1] = 0;
for(k = 0; k <= n-1-1; k += 2) {
c[i][j] += a[i][k]*b[k][j]+a[i][k+1]*b[k+1][j];
c[i][j+1] += a[i][k]*b[k][j+1]+a[i][k+1]*b[k+1][j+1];
c[i+1][j] += a[i+1][k]*b[k][j]+a[i+1][k+1]*b[k+1][j];
c[i+1][j+1] += a[i+1][k]*b[k][j+1]+a[i+1][k+1]*b[k+1][j+1];
}
}
return;
}
void mat_mul_arr_priv(int n, int **a, int **b, int **c)
{
int i, j, k;
int *d = malloc(sizeof(int)*n);
for(i = 0; i <= n-1; i += 1) {
d[i] = 0;
for(j = 0; j <= n-1; j += 1) {
d[i] = i;
c[d[i]][j] = 0;
for(k = 0; k <= n-1; k += 1)
c[i][j] += a[i][k]*b[k][j];
}
}
return;
}
void mat_mul_arr_priv2(int n, int **a, int **b, int **c)
{
int i, j, k;
int *d = malloc(sizeof(int)*n);
#pragma omp parallel for private(k, j)
for(i = 0; i <= n-1; i += 1)
for(j = 0; j <= n-1; j += 1) {
d[i] = 0;
for(k = 0; k <= n-1; k += 1)
d[i] += a[i][k]*b[k][j];
c[i][j] = d[i];
}
return;
}
void mat_mul_pointer_alias(int n, int **a, int **b, int **c)
{
int i, j, k;
#pragma omp parallel for private(j, k)
for(i = 0; i <= n-1; i += 1)
for(j = 0; j <= n-1; j += 1) {
c[i][j] = 0;
for(k = 0; k <= n-1; k += 1)
(*(c+i))[j] += (*(a+i))[k]*b[k][j];
}
return;
}
void mat_mul_pointer_alias2(int n, int **a, int **b, int **c)
{
int i, j, k;
int **d = c;
#pragma omp parallel for private(j)
for(i = 0; i <= n-1; i += 1) {
c[i][0] = 0;
for(j = 1; j <= n-1; j += 1)
c[i][j] = d[i][j-1];
}
#pragma omp parallel for private(j, k)
for(i = 0; i <= n-1; i += 1)
for(j = 0; j <= n-1; j += 1)
for(k = 0; k <= n-1; k += 1)
c[i][j] += a[i][k]*b[k][j];
return;
}
|
GB_unaryop__abs_uint32_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint32_uint16
// op(A') function: GB_tran__abs_uint32_uint16
// C type: uint32_t
// A type: uint16_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT32 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint32_uint16
(
uint32_t *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint32_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
deconvolution_pack16to4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void deconvolution_pack16to4_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packed, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
const int maxk = kernel_w * kernel_h;
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m128 _sum = _mm_setzero_ps();
if (bias_data_ptr)
{
_sum = _mm_loadu_ps(bias_data_ptr + p * 4);
}
const float* kptr = weight_data_packed.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const float* sptr = m.row(sy) + sx * 16;
int k = (y * kernel_w + x) * 64;
__m128 _val0 = _mm_broadcast_ss(sptr);
__m128 _val1 = _mm_broadcast_ss(sptr + 1);
__m128 _val2 = _mm_broadcast_ss(sptr + 2);
__m128 _val3 = _mm_broadcast_ss(sptr + 3);
__m128 _val4 = _mm_broadcast_ss(sptr + 4);
__m128 _val5 = _mm_broadcast_ss(sptr + 5);
__m128 _val6 = _mm_broadcast_ss(sptr + 6);
__m128 _val7 = _mm_broadcast_ss(sptr + 7);
__m128 _val8 = _mm_broadcast_ss(sptr + 8);
__m128 _val9 = _mm_broadcast_ss(sptr + 9);
__m128 _vala = _mm_broadcast_ss(sptr + 10);
__m128 _valb = _mm_broadcast_ss(sptr + 11);
__m128 _valc = _mm_broadcast_ss(sptr + 12);
__m128 _vald = _mm_broadcast_ss(sptr + 13);
__m128 _vale = _mm_broadcast_ss(sptr + 14);
__m128 _valf = _mm_broadcast_ss(sptr + 15);
__m128 _w0 = _mm_load_ps(kptr + k);
__m128 _w1 = _mm_load_ps(kptr + k + 4);
__m128 _w2 = _mm_load_ps(kptr + k + 8);
__m128 _w3 = _mm_load_ps(kptr + k + 12);
__m128 _w4 = _mm_load_ps(kptr + k + 16);
__m128 _w5 = _mm_load_ps(kptr + k + 20);
__m128 _w6 = _mm_load_ps(kptr + k + 24);
__m128 _w7 = _mm_load_ps(kptr + k + 28);
__m128 _w8 = _mm_load_ps(kptr + k + 32);
__m128 _w9 = _mm_load_ps(kptr + k + 36);
__m128 _wa = _mm_load_ps(kptr + k + 40);
__m128 _wb = _mm_load_ps(kptr + k + 44);
__m128 _wc = _mm_load_ps(kptr + k + 48);
__m128 _wd = _mm_load_ps(kptr + k + 52);
__m128 _we = _mm_load_ps(kptr + k + 56);
__m128 _wf = _mm_load_ps(kptr + k + 60);
_sum = _mm_fmadd_ps(_val0, _w0, _sum);
_sum = _mm_fmadd_ps(_val1, _w1, _sum);
_sum = _mm_fmadd_ps(_val2, _w2, _sum);
_sum = _mm_fmadd_ps(_val3, _w3, _sum);
_sum = _mm_fmadd_ps(_val4, _w4, _sum);
_sum = _mm_fmadd_ps(_val5, _w5, _sum);
_sum = _mm_fmadd_ps(_val6, _w6, _sum);
_sum = _mm_fmadd_ps(_val7, _w7, _sum);
_sum = _mm_fmadd_ps(_val8, _w8, _sum);
_sum = _mm_fmadd_ps(_val9, _w9, _sum);
_sum = _mm_fmadd_ps(_vala, _wa, _sum);
_sum = _mm_fmadd_ps(_valb, _wb, _sum);
_sum = _mm_fmadd_ps(_valc, _wc, _sum);
_sum = _mm_fmadd_ps(_vald, _wd, _sum);
_sum = _mm_fmadd_ps(_vale, _we, _sum);
_sum = _mm_fmadd_ps(_valf, _wf, _sum);
}
}
kptr += maxk * 64;
}
_sum = activation_sse(_sum, activation_type, activation_params);
_mm_storeu_ps(outptr, _sum);
outptr += 4;
}
}
}
}
|
micro-app-aos-openmp.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <getopt.h>
#include <lua.h>
#include <lauxlib.h>
#include <lualib.h>
#define NPOINTS 10000
#define NEDGES 10000
struct edge {
int v0;
int v1;
float data;
float v0_pt_data[3];
float v1_pt_data[3];
};
struct edge edges[NEDGES];
float pt_data[NPOINTS][3];
float edge_data[NEDGES];
void print_help() {
printf("Usage: \n");
printf("\t --help print this message and exit \n");
printf("\t --type Type of graph, must be one of:\n");
printf("\t\t\t pure_random \n");
printf("\t\t\t regular_random \n");
printf("\t\t\t contiguous \n");
printf("\t --nloops Number of repetitions, must be \n");
printf("\t at least one. \n");
printf("\t --file File from which to read graph \n");
}
double timer() {
struct timeval tp;
struct timezone tzp;
long i;
i = gettimeofday(&tp, &tzp);
return ((double)tp.tv_sec) + ((double) tp.tv_usec) * 1e-6;
}
int graph_init(char* graph_type, char* fname) {
lua_State *L;
int i, j, k, v;
L = luaL_newstate();
luaL_openlibs(L);
luaL_loadfile(L, "graph.lua");
lua_pcall(L, 0, 0, 0);
lua_getglobal(L, "create_graph");
lua_pushstring(L, graph_type);
lua_pushinteger(L, NPOINTS);
lua_pushinteger(L, NEDGES);
if (fname != NULL) {
lua_pushstring(L, fname);
lua_call(L, 4, 1);
} else {
lua_call(L, 3, 1);
}
/* Table is now sitting at the top of the stack */
i = 0;
lua_pushnil(L); /* Make sure lua_next starts at beginning */
while (lua_next(L, -2) != 0) {
// fetch first key
k = lua_tointeger(L, -2);
lua_pushnil(L);
while (lua_next(L, -2) != 0) { // loop over neighbors
lua_pop(L,1);
v = lua_tointeger(L, -1);
// build edges array here
edges[i].v0 = k - 1;
edges[i].v1 = v - 1;
for (j = 0; j < 3; j++) {
edges[i].v0_pt_data[j] = 0;
edges[i].v1_pt_data[j] = 0;
}
i++;
}
lua_pop(L,1);
}
lua_close(L);
if (i == 0) {
return -1;
} else {
return 0;
}
}
int data_init() {
int i;
for (i = 0; i < NPOINTS; i++) {
pt_data[i][0] = 1;
pt_data[i][1] = 1;
pt_data[i][2] = 1;
}
return 0;
}
int edge_data_init() {
int i;
for (i = 0; i < NEDGES; i++) {
edge_data[i] = 1;
}
return 0;
}
int edge_gather() {
int i,j;
int v0;
int v1;
#pragma omp parallel for \
private(i, j, v0, v1)
for (i = 0; i < NEDGES; i++) {
v0 = edges[i].v0;
v1 = edges[i].v1;
edges[i].v0_pt_data[0] = pt_data[v0][0];
edges[i].v0_pt_data[1] = pt_data[v0][1];
edges[i].v0_pt_data[2] = pt_data[v0][2];
edges[i].v1_pt_data[0] = pt_data[v1][0];
edges[i].v1_pt_data[1] = pt_data[v1][1];
edges[i].v1_pt_data[2] = pt_data[v1][2];
edges[i].data = edge_data[i];
}
return 0;
}
int edge_compute() {
int i, j;
float v0_p0, v0_p1, v0_p2;
float v1_p0, v1_p1, v1_p2;
float x0, x1, x2;
float e_data;
#pragma omp parallel for \
private(i, j, v0_p0, v0_p1, v0_p2) \
private(v1_p0, v1_p1, v1_p2) \
private(x0, x1, x2, e_data)
for (i = 0; i < NEDGES; i++) {
v0_p0 = edges[i].v0_pt_data[0];
v0_p1 = edges[i].v0_pt_data[1];
v0_p2 = edges[i].v0_pt_data[2];
v1_p0 = edges[i].v1_pt_data[0];
v1_p1 = edges[i].v1_pt_data[1];
v1_p2 = edges[i].v1_pt_data[2];
e_data = edges[i].data;
x0 = (v0_p0 + v1_p0) * e_data;
x1 = (v0_p1 + v1_p1) * e_data;
x2 = (v0_p2 + v1_p2) * e_data;
edges[i].v0_pt_data[0] = x0;
edges[i].v0_pt_data[1] = x1;
edges[i].v0_pt_data[2] = x2;
edges[i].v1_pt_data[0] = x0;
edges[i].v1_pt_data[1] = x1;
edges[i].v1_pt_data[2] = x2;
}
return 0;
}
int edge_scatter() {
int i;
int v0;
int v1;
#pragma omp parallel for \
private(i, v0, v1)
for (i = 0; i < NEDGES; i++) {
v0 = edges[i].v0;
v1 = edges[i].v1;
#pragma omp atomic
pt_data[v0][0] += edges[i].v0_pt_data[0];
#pragma omp atomic
pt_data[v0][1] += edges[i].v0_pt_data[1];
#pragma omp atomic
pt_data[v0][2] += edges[i].v0_pt_data[2];
#pragma omp atomic
pt_data[v1][0] += edges[i].v1_pt_data[0];
#pragma omp atomic
pt_data[v1][1] += edges[i].v1_pt_data[1];
#pragma omp atomic
pt_data[v1][2] += edges[i].v1_pt_data[2];
}
return 0;
}
int main(int argc, char** argv) {
int i;
int rv;
double time0, time1;
int c, opt_i;
int nloops = 0;
char* gt = "";
char* fname = "";
static struct option long_opts[] = {
{"help", no_argument, 0, 0},
{"type", required_argument, 0, 0},
{"nloops", required_argument, 0, 0},
{"file", required_argument, 0, 0}
};
/* Parse command-line arguments */
while (1) {
c = getopt_long(argc, argv, "",
long_opts, &opt_i);
if (c == -1) {
break;
}
if (c == 0) {
switch (opt_i) {
case 0:
print_help();
exit(0);
case 1:
gt = optarg;
break;
case 2:
nloops = atoi(optarg);
break;
case 3:
fname = optarg;
break;
}
} else {
print_help();
exit(0);
}
}
/* check for errors */
if (gt == NULL || nloops < 1) {
print_help();
exit(0);
}
// initialize data structures
rv = graph_init(gt, fname);
if (rv < 0) {
printf("Error creating graph. \n");
exit(0);
}
data_init();
edge_data_init();
// loop
time0 = timer();
for (i = 0; i < nloops; i++) {
edge_gather();
edge_compute();
edge_scatter();
}
time1 = timer();
// print results
for (i = 0; i < 10; i++) {
printf("%i : %f %f %f \n", i, pt_data[i][0], pt_data[i][1], pt_data[i][2]);
}
printf("Time: %f s \n", (time1 - time0) / ((float) nloops));
return 0;
}
|
PGraph.h | // SPDX-License-Identifier: BSD-2-Clause
#ifndef _PGRAPH_H_
#define _PGRAPH_H_
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#include <HostLink.h>
#include <config.h>
#include <POLite.h>
#include <POLite/Seq.h>
#include <POLite/Graph.h>
#include <POLite/Placer.h>
#include <POLite/Bitmap.h>
#include <POLite/ProgRouters.h>
#include <type_traits>
#include <tinsel-interface.h>
// Nodes of a POETS graph are devices
typedef NodeId PDeviceId;
// This structure holds a group of receiving edges on a thread.
// All of the edges originate from the same output pin.
template <typename E> struct PReceiverGroup {
// Thread id where all the receivers reside
uint32_t threadId;
// A sequence of receiving devices on that thread
SmallSeq<PInEdge<E>> receivers;
};
// This structure holds info about an edge destination
struct PEdgeDest {
// Index of edge in outgoing edge list
uint32_t index;
// Destination device
PDeviceId dest;
// Address where destination is located
PDeviceAddr addr;
};
// Comparison function for PEdgeDest
// (Useful to sort destinations by thread id of destination)
inline int cmpEdgeDest(const void* e0, const void* e1) {
PEdgeDest* d0 = (PEdgeDest*) e0;
PEdgeDest* d1 = (PEdgeDest*) e1;
return getThreadId(d0->addr) < getThreadId(d1->addr);
}
// POETS graph
template <typename DeviceType,
typename S, typename E, typename M> class PGraph {
private:
// Align address to 2^n byte boundary
inline uint32_t align(uint32_t n, uint32_t addr) {
if ((addr & (1<<n)-1) == 0) return addr;
return ((addr >> n) + 1) << n;
}
// Align address to 32-bit word boundary
uint32_t wordAlign(uint32_t addr) { return align(2, addr); }
// Align address to cache-line boundary
uint32_t cacheAlign(uint32_t addr) {
return align(TinselLogBytesPerLine, addr);
}
// Helper function
inline uint32_t min(uint32_t x, uint32_t y) { return x < y ? x : y; }
// Number of FPGA boards available
uint32_t meshLenX;
uint32_t meshLenY;
// Number of FPGA boards to use
uint32_t numBoardsX;
uint32_t numBoardsY;
// Multicast routing tables:
// Sequence of outgoing edges for every (device, pin) pair
Seq<POutEdge>*** outTable;
// Sequence of in-edge headers, for each thread
Seq<PInHeader<E>>** inTableHeaders;
// Remaining in-edges that don't fit in the header table, for each thread
Seq<PInEdge<E>>** inTableRest;
// Bitmap denoting used space in header table, for each thread
Bitmap** inTableBitmaps;
// Programmable routing tables
ProgRouterMesh* progRouterTables;
// Receiver groups (used internally by some methods, but declared once
// to avoid repeated allocation)
PReceiverGroup<E> groups[TinselThreadsPerMailbox];
// Generic constructor
void constructor(uint32_t lenX, uint32_t lenY) {
meshLenX = lenX;
meshLenY = lenY;
char* str = getenv("POLITE_BOARDS_X");
int nx = str ? atoi(str) : meshLenX;
str = getenv("POLITE_BOARDS_Y");
int ny = str ? atoi(str) : meshLenY;
setNumBoards(nx, ny);
numDevices = 0;
devices = NULL;
toDeviceAddr = NULL;
numDevicesOnThread = NULL;
fromDeviceAddr = NULL;
vertexMem = NULL;
vertexMemSize = NULL;
vertexMemBase = NULL;
inEdgeHeaderMem = NULL;
inEdgeHeaderMemSize = NULL;
inEdgeHeaderMemBase = NULL;
inEdgeRestMem = NULL;
inEdgeRestMemSize = NULL;
inEdgeRestMemBase = NULL;
outEdgeMem = NULL;
outEdgeMemSize = NULL;
outEdgeMemBase = NULL;
mapVerticesToDRAM = false;
mapInEdgeHeadersToDRAM = true;
mapInEdgeRestToDRAM = true;
mapOutEdgesToDRAM = true;
outTable = NULL;
inTableHeaders = NULL;
inTableRest = NULL;
inTableBitmaps = NULL;
progRouterTables = NULL;
chatty = 0;
str = getenv("POLITE_CHATTY");
if (str != NULL) {
chatty = !strcmp(str, "0") ? 0 : 1;
}
}
public:
// Number of devices
uint32_t numDevices;
// Graph containing device ids and connections
Graph graph;
// Edge labels: has same structure as graph.outgoing
Seq<Seq<E>*> edgeLabels;
// Mapping from device id to device state
// (Not valid until the mapper is called)
PState<S>** devices;
// Mapping from thread id to number of devices on that thread
// (Not valid until the mapper is called)
uint32_t* numDevicesOnThread;
// Mapping from device id to device address and back
// (Not valid until the mapper is called)
PDeviceAddr* toDeviceAddr; // Device id -> device address
PDeviceId** fromDeviceAddr; // Device address -> device id
// Each thread's vertex mem and thread mem regions
// (Not valid until the mapper is called)
uint8_t** vertexMem; uint8_t** threadMem;
uint32_t* vertexMemSize; uint32_t* threadMemSize;
uint32_t* vertexMemBase; uint32_t* threadMemBase;
// Each thread's in-edge and out-edge regions
// (Not valid until the mapper is called)
uint8_t** inEdgeHeaderMem; uint8_t** inEdgeRestMem;
uint32_t* inEdgeHeaderMemSize; uint32_t* inEdgeRestMemSize;
uint32_t* inEdgeHeaderMemBase; uint32_t* inEdgeRestMemBase;
uint8_t** outEdgeMem;
uint32_t* outEdgeMemSize;
uint32_t* outEdgeMemBase;
// Where to map the various regions
// (If false, map to SRAM instead)
bool mapVerticesToDRAM;
bool mapInEdgeHeadersToDRAM;
bool mapInEdgeRestToDRAM;
bool mapOutEdgesToDRAM;
// Allow mapper to print useful information to stdout
uint32_t chatty;
// Setter for number of boards to use
void setNumBoards(uint32_t x, uint32_t y) {
if (x > meshLenX || y > meshLenY) {
printf("Mapper: %d x %d boards requested, %d x %d available\n",
numBoardsX, numBoardsY, meshLenX, meshLenY);
exit(EXIT_FAILURE);
}
numBoardsX = x;
numBoardsY = y;
}
// Create new device
inline PDeviceId newDevice() {
edgeLabels.append(new SmallSeq<E>);
numDevices++;
return graph.newNode();
}
// Add a connection between devices
inline void addEdge(PDeviceId from, PinId pin, PDeviceId to) {
if (pin >= POLITE_NUM_PINS) {
printf("addEdge: pin exceeds POLITE_NUM_PINS\n");
exit(EXIT_FAILURE);
}
graph.addEdge(from, pin, to);
E edge;
edgeLabels.elems[from]->append(edge);
}
// Add labelled edge using given output pin
void addLabelledEdge(E edge, PDeviceId x, PinId pin, PDeviceId y) {
graph.addEdge(x, pin, y);
edgeLabels.elems[x]->append(edge);
}
// Allocate SRAM and DRAM partitions
void allocatePartitions() {
// Decide a maximum partition size that is reasonable
// SRAM: Partition size minus 2048 bytes for the stack
uint32_t maxSRAMSize = (1<<TinselLogBytesPerSRAMPartition) - 2048;
// DRAM: Partition size minus 65536 bytes for the stack
uint32_t maxDRAMSize = (1<<TinselLogBytesPerDRAMPartition) - 65536;
// Allocate partition sizes and bases
vertexMem = (uint8_t**) calloc(TinselMaxThreads, sizeof(uint8_t*));
vertexMemSize = (uint32_t*) calloc(TinselMaxThreads, sizeof(uint32_t));
vertexMemBase = (uint32_t*) calloc(TinselMaxThreads, sizeof(uint32_t));
threadMem = (uint8_t**) calloc(TinselMaxThreads, sizeof(uint8_t*));
threadMemSize = (uint32_t*) calloc(TinselMaxThreads, sizeof(uint32_t));
threadMemBase = (uint32_t*) calloc(TinselMaxThreads, sizeof(uint32_t));
inEdgeHeaderMem = (uint8_t**) calloc(TinselMaxThreads, sizeof(uint8_t*));
inEdgeHeaderMemSize =
(uint32_t*) calloc(TinselMaxThreads, sizeof(uint32_t));
inEdgeHeaderMemBase =
(uint32_t*) calloc(TinselMaxThreads, sizeof(uint32_t));
inEdgeRestMem = (uint8_t**) calloc(TinselMaxThreads, sizeof(uint8_t*));
inEdgeRestMemSize = (uint32_t*) calloc(TinselMaxThreads, sizeof(uint32_t));
inEdgeRestMemBase = (uint32_t*) calloc(TinselMaxThreads, sizeof(uint32_t));
outEdgeMem = (uint8_t**) calloc(TinselMaxThreads, sizeof(uint8_t*));
outEdgeMemSize = (uint32_t*) calloc(TinselMaxThreads, sizeof(uint32_t));
outEdgeMemBase = (uint32_t*) calloc(TinselMaxThreads, sizeof(uint32_t));
// Compute partition sizes for each thread
for (uint32_t threadId = 0; threadId < TinselMaxThreads; threadId++) {
// This variable is used to count the size of the *initialised*
// partition. The total partition size is larger as it includes
// uninitialised portions.
uint32_t sizeVMem = 0;
uint32_t sizeEIHeaderMem = 0;
uint32_t sizeEIRestMem = 0;
uint32_t sizeEOMem = 0;
uint32_t sizeTMem = 0;
// Add space for thread structure (always stored in SRAM)
sizeTMem = cacheAlign(sizeof(PThread<DeviceType, S, E, M>));
// Add space for devices
uint32_t numDevs = numDevicesOnThread[threadId];
for (uint32_t devNum = 0; devNum < numDevs; devNum++) {
// Add space for device
sizeVMem = sizeVMem + sizeof(PState<S>);
}
// Add space for incoming edge tables
if (inTableHeaders[threadId]) {
sizeEIHeaderMem = inTableHeaders[threadId]->numElems *
sizeof(PInHeader<E>);
sizeEIHeaderMem = wordAlign(sizeEIHeaderMem);
}
if (inTableRest[threadId]) {
sizeEIRestMem = inTableRest[threadId]->numElems * sizeof(PInEdge<E>);
sizeEIRestMem = wordAlign(sizeEIRestMem);
}
// Add space for outgoing edge table
for (uint32_t devNum = 0; devNum < numDevs; devNum++) {
PDeviceId id = fromDeviceAddr[threadId][devNum];
for (uint32_t p = 0; p < POLITE_NUM_PINS; p++) {
Seq<POutEdge>* edges = outTable[id][p];
sizeEOMem += sizeof(POutEdge) * edges->numElems;
}
}
sizeEOMem = wordAlign(sizeEOMem);
// The total partition size including uninitialised portions
uint32_t totalSizeVMem =
sizeVMem + wordAlign(sizeof(PLocalDeviceId) * numDevs);
// Check that total size is reasonable
uint32_t totalSizeSRAM = sizeTMem;
uint32_t totalSizeDRAM = 0;
if (mapVerticesToDRAM) totalSizeDRAM += totalSizeVMem;
else totalSizeSRAM += totalSizeVMem;
if (mapInEdgeHeadersToDRAM) totalSizeDRAM += sizeEIHeaderMem;
else totalSizeSRAM += sizeEIHeaderMem;
if (mapInEdgeRestToDRAM) totalSizeDRAM += sizeEIRestMem;
else totalSizeSRAM += sizeEIRestMem;
if (mapOutEdgesToDRAM) totalSizeDRAM += sizeEOMem;
else totalSizeSRAM += sizeEOMem;
if (totalSizeDRAM > maxDRAMSize) {
printf("Error: max DRAM partition size exceeded\n");
exit(EXIT_FAILURE);
}
if (totalSizeSRAM > maxSRAMSize) {
printf("Error: max SRAM partition size exceeded\n");
exit(EXIT_FAILURE);
}
// Allocate space for the initialised portion of the partition
assert((sizeVMem%4) == 0);
assert((sizeTMem%4) == 0);
assert((sizeEIHeaderMem%4) == 0);
assert((sizeEIRestMem%4) == 0);
assert((sizeEOMem%4) == 0);
vertexMem[threadId] = (uint8_t*) calloc(sizeVMem, 1);
vertexMemSize[threadId] = sizeVMem;
threadMem[threadId] = (uint8_t*) calloc(sizeTMem, 1);
threadMemSize[threadId] = sizeTMem;
inEdgeHeaderMem[threadId] = (uint8_t*) calloc(sizeEIHeaderMem, 1);
inEdgeHeaderMemSize[threadId] = sizeEIHeaderMem;
inEdgeRestMem[threadId] = (uint8_t*) calloc(sizeEIRestMem, 1);
inEdgeRestMemSize[threadId] = sizeEIRestMem;
outEdgeMem[threadId] = (uint8_t*) calloc(sizeEOMem, 1);
outEdgeMemSize[threadId] = sizeEOMem;
// Tinsel address of base of partition
uint32_t partId = threadId & (TinselThreadsPerDRAM-1);
uint32_t sramBase = (1 << TinselLogBytesPerSRAM) +
(partId << TinselLogBytesPerSRAMPartition);
uint32_t dramBase = TinselBytesPerDRAM -
((partId+1) << TinselLogBytesPerDRAMPartition);
// Use partition-interleaved region for DRAM
dramBase |= 0x80000000;
threadMemBase[threadId] = sramBase;
sramBase += threadMemSize[threadId];
// Determine base addresses of each region
if (mapVerticesToDRAM) {
vertexMemBase[threadId] = dramBase;
dramBase += totalSizeVMem;
}
else {
vertexMemBase[threadId] = sramBase;
sramBase += totalSizeVMem;
}
if (mapInEdgeHeadersToDRAM) {
inEdgeHeaderMemBase[threadId] = dramBase;
dramBase += sizeEIHeaderMem;
}
else {
inEdgeHeaderMemBase[threadId] = sramBase;
sramBase += sizeEIHeaderMem;
}
if (mapInEdgeRestToDRAM) {
inEdgeRestMemBase[threadId] = dramBase;
dramBase += sizeEIRestMem;
}
else {
inEdgeRestMemBase[threadId] = sramBase;
sramBase += sizeEIRestMem;
}
if (mapOutEdgesToDRAM) {
outEdgeMemBase[threadId] = dramBase;
dramBase += sizeEOMem;
}
else {
outEdgeMemBase[threadId] = sramBase;
sramBase += sizeEOMem;
}
}
}
// Initialise partitions
void initialisePartitions() {
for (uint32_t threadId = 0; threadId < TinselMaxThreads; threadId++) {
// Next pointers for each partition
uint32_t nextVMem = 0;
uint32_t nextOutIndex = 0;
// Pointer to thread structure
PThread<DeviceType, S, E, M>* thread =
(PThread<DeviceType, S, E, M>*) &threadMem[threadId][0];
// Set number of devices on thread
thread->numDevices = numDevicesOnThread[threadId];
// Set number of devices in graph
thread->numVertices = numDevices;
// Set tinsel address of array of device states
thread->devices = vertexMemBase[threadId];
// Set tinsel address of base of edge tables
thread->outTableBase = outEdgeMemBase[threadId];
thread->inTableHeaderBase = inEdgeHeaderMemBase[threadId];
thread->inTableRestBase = inEdgeRestMemBase[threadId];
// Add space for each device on thread
uint32_t numDevs = numDevicesOnThread[threadId];
for (uint32_t devNum = 0; devNum < numDevs; devNum++) {
PState<S>* dev = (PState<S>*) &vertexMem[threadId][nextVMem];
PDeviceId id = fromDeviceAddr[threadId][devNum];
devices[id] = dev;
// Add space for device
nextVMem = nextVMem + sizeof(PState<S>);
}
// Initialise each device and the thread's out edges
for (uint32_t devNum = 0; devNum < numDevs; devNum++) {
PDeviceId id = fromDeviceAddr[threadId][devNum];
PState<S>* dev = devices[id];
// Initialise
POutEdge* outEdgeArray = (POutEdge*) outEdgeMem[threadId];
for (uint32_t p = 0; p < POLITE_NUM_PINS; p++) {
dev->pinBase[p] = nextOutIndex;
Seq<POutEdge>* edges = outTable[id][p];
for (uint32_t i = 0; i < edges->numElems; i++) {
outEdgeArray[nextOutIndex] = edges->elems[i];
nextOutIndex++;
}
}
}
// Intialise thread's in edges
PInHeader<E>* inEdgeHeaderArray =
(PInHeader<E>*) inEdgeHeaderMem[threadId];
Seq<PInHeader<E>>* headers = inTableHeaders[threadId];
if (headers)
for (uint32_t i = 0; i < headers->numElems; i++) {
inEdgeHeaderArray[i] = headers->elems[i];
}
PInEdge<E>* inEdgeRestArray = (PInEdge<E>*) inEdgeRestMem[threadId];
Seq<PInEdge<E>>* edges = inTableRest[threadId];
if (edges)
for (uint32_t i = 0; i < edges->numElems; i++) {
inEdgeRestArray[i] = edges->elems[i];
}
// At this point, check that next pointers line up with heap sizes
if (nextVMem != vertexMemSize[threadId]) {
printf("Error: vertex mem size does not match pre-computed size\n");
exit(EXIT_FAILURE);
}
if ((nextOutIndex * sizeof(POutEdge)) != outEdgeMemSize[threadId]) {
printf("Error: out edge mem size does not match pre-computed size\n");
exit(EXIT_FAILURE);
}
// Set tinsel address of senders array
thread->senders = vertexMemBase[threadId] + nextVMem;
}
}
// Allocate mapping structures
void allocateMapping() {
devices = (PState<S>**) calloc(numDevices, sizeof(PState<S>*));
toDeviceAddr = (PDeviceAddr*) calloc(numDevices, sizeof(PDeviceAddr));
fromDeviceAddr = (PDeviceId**) calloc(TinselMaxThreads, sizeof(PDeviceId*));
numDevicesOnThread = (uint32_t*) calloc(TinselMaxThreads, sizeof(uint32_t));
}
// Allocate routing tables
// (Only valid after mapper is called)
void allocateRoutingTables() {
// Receiver-side tables (headers)
inTableHeaders = (Seq<PInHeader<E>>**)
calloc(TinselMaxThreads,sizeof(Seq<PInHeader<E>>*));
for (uint32_t t = 0; t < TinselMaxThreads; t++) {
if (numDevicesOnThread[t] != 0)
inTableHeaders[t] = new SmallSeq<PInHeader<E>>;
}
// Receiver-side tables (rest)
inTableRest = (Seq<PInEdge<E>>**)
calloc(TinselMaxThreads,sizeof(Seq<PInEdge<E>>*));
for (uint32_t t = 0; t < TinselMaxThreads; t++) {
if (numDevicesOnThread[t] != 0)
inTableRest[t] = new SmallSeq<PInEdge<E>>;
}
// Receiver-side tables (bitmaps)
inTableBitmaps = (Bitmap**) calloc(TinselMaxThreads,sizeof(Bitmap*));
for (uint32_t t = 0; t < TinselMaxThreads; t++) {
if (numDevicesOnThread[t] != 0)
inTableBitmaps[t] = new Bitmap;
}
// Sender-side tables
outTable = (Seq<POutEdge>***) calloc(numDevices, sizeof(Seq<POutEdge>**));
for (uint32_t d = 0; d < numDevices; d++) {
outTable[d] = (Seq<POutEdge>**)
calloc(POLITE_NUM_PINS, sizeof(Seq<POutEdge>*));
for (uint32_t p = 0; p < POLITE_NUM_PINS; p++)
outTable[d][p] = new SmallSeq<POutEdge>;
}
}
// Determine local-multicast routing key for given set of receivers
// (The key must be the same for all receivers)
uint32_t findKey(uint32_t numGroups) {
// Fast path (single receiver)
if (numGroups == 1) {
Bitmap* bm = inTableBitmaps[groups[0].threadId];
return bm->grabNextBit();
}
// Determine starting index for key search
uint32_t index = 0;
for (uint32_t i = 0; i < numGroups; i++) {
PReceiverGroup<E>* g = &groups[i];
Bitmap* bm = inTableBitmaps[g->threadId];
if (bm->firstFree > index) index = bm->firstFree;
}
// Find key that is available for all receivers
uint64_t mask;
retry:
mask = 0ul;
for (uint32_t i = 0; i < numGroups; i++) {
PReceiverGroup<E>* g = &groups[i];
Bitmap* bm = inTableBitmaps[g->threadId];
mask |= bm->getWord(index);
if (~mask == 0ul) { index++; goto retry; }
}
// Mark key as taken in each bitmap
uint32_t bit = __builtin_ctzll(~mask);
for (uint32_t i = 0; i < numGroups; i++) {
PReceiverGroup<E>* g = &groups[i];
Bitmap* bm = inTableBitmaps[g->threadId];
bm->setBit(index, bit);
}
return 64*index + bit;
}
// Add entries to the input tables for the given receivers
// (Only valid after mapper is called)
uint32_t addInTableEntries(uint32_t numGroups) {
uint32_t key = findKey(numGroups);
if (key >= 0xffff) {
printf("Routing key exceeds 16 bits\n");
exit(EXIT_FAILURE);
}
// Populate inTableHeaders and inTableRest using the key
for (uint32_t i = 0; i < numGroups; i++) {
PReceiverGroup<E>* g = &groups[i];
uint32_t numEdges = g->receivers.numElems;
PInEdge<E>* edgePtr = g->receivers.elems;
if (numEdges > 0) {
// Determine thread id of receiver
uint32_t t = g->threadId;
// Extend table
Seq<PInHeader<E>>* headers = inTableHeaders[t];
if (key >= headers->numElems)
headers->extendBy(key + 1 - headers->numElems);
// Fill in header
PInHeader<E>* header = &inTableHeaders[t]->elems[key];
header->numReceivers = numEdges;
if (inTableRest[t]->numElems > 0xffff) {
printf("In-table index exceeds 16 bits\n");
exit(EXIT_FAILURE);
}
header->restIndex = inTableRest[t]->numElems;
uint32_t numHeaderEdges = numEdges < POLITE_EDGES_PER_HEADER ?
numEdges : POLITE_EDGES_PER_HEADER;
for (uint32_t j = 0; j < numHeaderEdges; j++) {
header->edges[j] = *edgePtr;
edgePtr++;
}
numEdges -= numHeaderEdges;
// Overflow into rest memory if header not big enough
for (uint32_t j = 0; j < numEdges; j++) {
inTableRest[t]->append(*edgePtr);
edgePtr++;
}
}
}
return key;
}
// Split edge list into board-local and non-board-local destinations
// And sort each list by destination thread id
// (Only valid after mapper is called)
void splitDests(PDeviceId devId, PinId pinId,
Seq<PEdgeDest>* local, Seq<PEdgeDest>* nonLocal) {
local->clear();
nonLocal->clear();
PDeviceAddr devAddr = toDeviceAddr[devId];
uint32_t devBoard = getThreadId(devAddr) >> TinselLogThreadsPerBoard;
// Split destinations into local/non-local
Seq<PDeviceId>* dests = graph.outgoing->elems[devId];
Seq<PinId>* pinIds = graph.pins->elems[devId];
for (uint32_t d = 0; d < dests->numElems; d++) {
if (pinIds->elems[d] == pinId) {
PEdgeDest e;
e.index = d;
e.dest = dests->elems[d];
e.addr = toDeviceAddr[e.dest];
uint32_t destBoard = getThreadId(e.addr) >> TinselLogThreadsPerBoard;
if (devBoard == destBoard)
local->append(e);
else
nonLocal->append(e);
}
}
// Sort local list
qsort(local->elems, local->numElems, sizeof(PEdgeDest), cmpEdgeDest);
// Sort non-local list
qsort(nonLocal->elems, nonLocal->numElems, sizeof(PEdgeDest), cmpEdgeDest);
}
// Compute table updates for destinations for given device
// (Only valid after mapper is called)
void computeTables(Seq<PEdgeDest>* dests, uint32_t d,
Seq<PRoutingDest>* out) {
out->clear();
uint32_t index = 0;
while (index < dests->numElems) {
// New set of receiver groups on same mailbox
uint32_t threadMaskLow = 0;
uint32_t threadMaskHigh = 0;
uint32_t nextGroup = 0;
// Current mailbox & thread being considered
PDeviceAddr mbox = getThreadId(dests->elems[index].addr) >>
TinselLogThreadsPerMailbox;
uint32_t thread = getThreadId(dests->elems[index].addr) &
((1<<TinselLogThreadsPerMailbox)-1);
// Determine edges targetting same mailbox
while (index < dests->numElems) {
PEdgeDest* edge = &dests->elems[index];
// Determine destination mailbox address and mailbox-local thread
uint32_t destMailbox = getThreadId(edge->addr) >>
TinselLogThreadsPerMailbox;
uint32_t destThread = getThreadId(edge->addr) &
((1<<TinselLogThreadsPerMailbox)-1);
// Does destination match current destination?
if (destMailbox == mbox) {
if (destThread == thread) {
// Add to current receiver group
PInEdge<E> in;
in.devId = getLocalDeviceId(edge->addr);
Seq<E>* edges = edgeLabels.elems[d];
if (! std::is_same<E, None>::value)
in.edge = edges->elems[edge->index];
// Update current receiver group
groups[nextGroup].receivers.append(in);
groups[nextGroup].threadId = getThreadId(edge->addr);
if (thread < 32) threadMaskLow |= 1 << thread;
if (thread >= 32) threadMaskHigh |= 1 << (thread-32);
index++;
}
else {
// Start new receiver group
thread = destThread;
nextGroup++;
assert(nextGroup < TinselThreadsPerMailbox);
}
}
else break;
}
// Add input table entries
uint32_t key = addInTableEntries(nextGroup+1);
// Add output entry
PRoutingDest dest;
dest.kind = PRDestKindMRM;
dest.mbox = mbox;
dest.mrm.key = key;
dest.mrm.threadMaskLow = threadMaskLow;
dest.mrm.threadMaskHigh = threadMaskHigh;
out->append(dest);
// Clear receiver groups, for a new iteration
for (uint32_t i = 0; i <= nextGroup; i++) groups[i].receivers.clear();
}
}
// Compute routing tables
// (Only valid after mapper is called)
void computeRoutingTables() {
// Edge destinations (local to sender board, or not)
Seq<PEdgeDest> local;
Seq<PEdgeDest> nonLocal;
// Routing destinations
Seq<PRoutingDest> dests;
// Allocate per-board programmable routing tables
progRouterTables = new ProgRouterMesh(numBoardsX, numBoardsY);
// For each device
for (uint32_t d = 0; d < numDevices; d++) {
// For each pin
for (uint32_t p = 0; p < POLITE_NUM_PINS; p++) {
// Split edge lists into local/non-local and sort by target thread id
splitDests(d, p, &local, &nonLocal);
// Deal with board-local connections
computeTables(&local, d, &dests);
for (uint32_t i = 0; i < dests.numElems; i++) {
PRoutingDest dest = dests.elems[i];
POutEdge edge;
edge.mbox = dest.mbox;
edge.key = dest.mrm.key;
edge.threadMaskLow = dest.mrm.threadMaskLow;
edge.threadMaskHigh = dest.mrm.threadMaskHigh;
outTable[d][p]->append(edge);
}
// Deal with non-board-local connections
computeTables(&nonLocal, d, &dests);
uint32_t src = getThreadId(toDeviceAddr[d]) >>
TinselLogThreadsPerMailbox;
uint32_t key = progRouterTables->addDestsFromBoard(src, &dests);
POutEdge edge;
edge.mbox = tinselUseRoutingKey();
edge.key = 0;
edge.threadMaskLow = key;
edge.threadMaskHigh = 0;
outTable[d][p]->append(edge);
// Add output list terminator
POutEdge term;
term.key = InvalidKey;
outTable[d][p]->append(term);
}
}
}
// Release all structures
void releaseAll() {
if (devices != NULL) {
free(devices);
free(toDeviceAddr);
free(numDevicesOnThread);
for (uint32_t t = 0; t < TinselMaxThreads; t++)
if (fromDeviceAddr[t] != NULL) free(fromDeviceAddr[t]);
free(fromDeviceAddr);
for (uint32_t t = 0; t < TinselMaxThreads; t++)
if (vertexMem[t] != NULL) free(vertexMem[t]);
free(vertexMem);
free(vertexMemSize);
free(vertexMemBase);
for (uint32_t t = 0; t < TinselMaxThreads; t++)
if (threadMem[t] != NULL) free(threadMem[t]);
free(threadMem);
free(threadMemSize);
free(threadMemBase);
for (uint32_t t = 0; t < TinselMaxThreads; t++)
if (inEdgeHeaderMem[t] != NULL) free(inEdgeHeaderMem[t]);
free(inEdgeHeaderMem);
free(inEdgeHeaderMemSize);
free(inEdgeHeaderMemBase);
for (uint32_t t = 0; t < TinselMaxThreads; t++)
if (inEdgeRestMem[t] != NULL) free(inEdgeRestMem[t]);
free(inEdgeRestMem);
free(inEdgeRestMemSize);
free(inEdgeRestMemBase);
for (uint32_t t = 0; t < TinselMaxThreads; t++)
if (outEdgeMem[t] != NULL) free(outEdgeMem[t]);
free(outEdgeMem);
free(outEdgeMemSize);
free(outEdgeMemBase);
}
if (inTableHeaders != NULL) {
for (uint32_t t = 0; t < TinselMaxThreads; t++)
if (inTableHeaders[t] != NULL) delete inTableHeaders[t];
free(inTableHeaders);
inTableHeaders = NULL;
}
if (inTableRest != NULL) {
for (uint32_t t = 0; t < TinselMaxThreads; t++)
if (inTableRest[t] != NULL) delete inTableRest[t];
free(inTableRest);
inTableRest = NULL;
}
if (inTableBitmaps != NULL) {
for (uint32_t t = 0; t < TinselMaxThreads; t++)
if (inTableBitmaps[t] != NULL) delete inTableBitmaps[t];
free(inTableBitmaps);
inTableBitmaps = NULL;
}
if (outTable != NULL) {
for (uint32_t d = 0; d < numDevices; d++) {
if (outTable[d] == NULL) continue;
for (uint32_t p = 0; p < POLITE_NUM_PINS; p++)
delete outTable[d][p];
free(outTable[d]);
}
free(outTable);
outTable = NULL;
}
if (progRouterTables != NULL) delete progRouterTables;
}
// Implement mapping to tinsel threads
void map() {
// Let's measure some times
struct timeval placementStart, placementFinish;
struct timeval routingStart, routingFinish;
struct timeval initStart, initFinish;
// Release all mapping and heap structures
releaseAll();
// Reallocate mapping structures
allocateMapping();
// Start placement timer
gettimeofday(&placementStart, NULL);
// Partition into subgraphs, one per board
Placer boards(&graph, numBoardsX, numBoardsY);
// Place subgraphs onto 2D mesh
const uint32_t placerEffort = 8;
boards.place(placerEffort);
// For each board
#pragma omp parallel for collapse(2)
for (uint32_t boardY = 0; boardY < numBoardsY; boardY++) {
for (uint32_t boardX = 0; boardX < numBoardsX; boardX++) {
// Partition into subgraphs, one per mailbox
PartitionId b = boards.mapping[boardY][boardX];
Placer boxes(&boards.subgraphs[b],
TinselMailboxMeshXLen, TinselMailboxMeshYLen);
boxes.place(placerEffort);
// For each mailbox
for (uint32_t boxX = 0; boxX < TinselMailboxMeshXLen; boxX++) {
for (uint32_t boxY = 0; boxY < TinselMailboxMeshYLen; boxY++) {
// Partition into subgraphs, one per thread
uint32_t numThreads = 1<<TinselLogThreadsPerMailbox;
PartitionId t = boxes.mapping[boxY][boxX];
Placer threads(&boxes.subgraphs[t], numThreads, 1);
// For each thread
for (uint32_t threadNum = 0; threadNum < numThreads; threadNum++) {
// Determine tinsel thread id
uint32_t threadId = boardY;
threadId = (threadId << TinselMeshXBits) | boardX;
threadId = (threadId << TinselMailboxMeshYBits) | boxY;
threadId = (threadId << TinselMailboxMeshXBits) | boxX;
threadId = (threadId << (TinselLogCoresPerMailbox +
TinselLogThreadsPerCore)) | threadNum;
// Get subgraph
Graph* g = &threads.subgraphs[threadNum];
// Populate fromDeviceAddr mapping
uint32_t numDevs = g->incoming->numElems;
numDevicesOnThread[threadId] = numDevs;
fromDeviceAddr[threadId] = (PDeviceId*)
malloc(sizeof(PDeviceId) * numDevs);
for (uint32_t devNum = 0; devNum < numDevs; devNum++)
fromDeviceAddr[threadId][devNum] = g->labels->elems[devNum];
// Populate toDeviceAddr mapping
assert(numDevs < maxLocalDeviceId());
for (uint32_t devNum = 0; devNum < numDevs; devNum++) {
PDeviceAddr devAddr =
makeDeviceAddr(threadId, devNum);
toDeviceAddr[g->labels->elems[devNum]] = devAddr;
}
}
}
}
}
}
// Stop placement timer and start routing timer
gettimeofday(&placementFinish, NULL);
gettimeofday(&routingStart, NULL);
// Compute send and receive side routing tables
allocateRoutingTables();
computeRoutingTables();
// Stop routing timer and start init timer
gettimeofday(&routingFinish, NULL);
gettimeofday(&initStart, NULL);
// Reallocate and initialise heap structures
allocatePartitions();
initialisePartitions();
// Display times, if chatty
gettimeofday(&initFinish, NULL);
if (chatty > 0) {
struct timeval diff;
timersub(&placementFinish, &placementStart, &diff);
double duration = (double) diff.tv_sec +
(double) diff.tv_usec / 1000000.0;
printf("POLite mapper profile:\n");
printf(" Partitioning and placement: %lfs\n", duration);
timersub(&routingFinish, &routingStart, &diff);
duration = (double) diff.tv_sec + (double) diff.tv_usec / 1000000.0;
printf(" Routing table construction: %lfs\n", duration);
timersub(&initFinish, &initStart, &diff);
duration = (double) diff.tv_sec + (double) diff.tv_usec / 1000000.0;
printf(" Thread state initialisation: %lfs\n", duration);
}
}
// Constructor
PGraph() {
char* str = getenv("HOSTLINK_BOXES_X");
int x = str ? atoi(str) : 1;
x = x * TinselMeshXLenWithinBox;
str = getenv("HOSTLINK_BOXES_Y");
int y = str ? atoi(str) : 1;
y = y * TinselMeshYLenWithinBox;
constructor(x, y);
}
PGraph(uint32_t numBoxesX, uint32_t numBoxesY) {
int x = numBoxesX * TinselMeshXLenWithinBox;
int y = numBoxesY * TinselMeshYLenWithinBox;
constructor(x, y);
}
// Deconstructor
~PGraph() {
releaseAll();
for (uint32_t i = 0; i < edgeLabels.numElems; i++)
delete edgeLabels.elems[i];
}
// Write partition to tinsel machine
void writeRAM(HostLink* hostLink,
uint8_t** heap, uint32_t* heapSize, uint32_t* heapBase) {
// Number of bytes written by each thread
uint32_t* writeCount = (uint32_t*)
calloc(TinselMaxThreads, sizeof(uint32_t));
// Number of threads completed by each core
uint32_t*** threadCount = (uint32_t***)
calloc(meshLenX, sizeof(uint32_t**));
for (uint32_t x = 0; x < meshLenX; x++) {
threadCount[x] = (uint32_t**)
calloc(meshLenY, sizeof(uint32_t*));
for (uint32_t y = 0; y < meshLenY; y++)
threadCount[x][y] = (uint32_t*)
calloc(TinselCoresPerBoard, sizeof(uint32_t));
}
// Initialise write addresses
for (int x = 0; x < meshLenX; x++)
for (int y = 0; y < meshLenY; y++)
for (int c = 0; c < TinselCoresPerBoard; c++)
hostLink->setAddr(x, y, c, heapBase[hostLink->toAddr(x, y, c, 0)]);
// Write heaps
uint32_t done = false;
while (! done) {
done = true;
for (int x = 0; x < meshLenX; x++) {
for (int y = 0; y < meshLenY; y++) {
for (int c = 0; c < TinselCoresPerBoard; c++) {
uint32_t t = threadCount[x][y][c];
if (t < TinselThreadsPerCore) {
done = false;
uint32_t threadId = hostLink->toAddr(x, y, c, t);
uint32_t written = writeCount[threadId];
if (written == heapSize[threadId]) {
threadCount[x][y][c] = t+1;
if ((t+1) < TinselThreadsPerCore)
hostLink->setAddr(x, y, c,
heapBase[hostLink->toAddr(x, y, c, t+1)]);
} else {
uint32_t send = min((heapSize[threadId] - written)>>2, 15);
hostLink->store(x, y, c, send,
(uint32_t*) &heap[threadId][written]);
writeCount[threadId] = written + send * sizeof(uint32_t);
}
}
}
}
}
}
// Release memory
free(writeCount);
for (uint32_t x = 0; x < meshLenX; x++) {
for (uint32_t y = 0; y < meshLenY; y++)
free(threadCount[x][y]);
free(threadCount[x]);
}
free(threadCount);
}
// Write graph to tinsel machine
void write(HostLink* hostLink) {
// Start timer
struct timeval start, finish;
gettimeofday(&start, NULL);
bool useSendBufferOld = hostLink->useSendBuffer;
hostLink->useSendBuffer = true;
writeRAM(hostLink, vertexMem, vertexMemSize, vertexMemBase);
writeRAM(hostLink, threadMem, threadMemSize, threadMemBase);
writeRAM(hostLink, inEdgeHeaderMem,
inEdgeHeaderMemSize, inEdgeHeaderMemBase);
writeRAM(hostLink, inEdgeRestMem, inEdgeRestMemSize, inEdgeRestMemBase);
writeRAM(hostLink, outEdgeMem, outEdgeMemSize, outEdgeMemBase);
progRouterTables->write(hostLink);
hostLink->flush();
hostLink->useSendBuffer = useSendBufferOld;
// Display time if chatty
gettimeofday(&finish, NULL);
if (chatty > 0) {
struct timeval diff;
timersub(&finish, &start, &diff);
double duration = (double) diff.tv_sec +
(double) diff.tv_usec / 1000000.0;
printf("POLite graph upload time: %lfs\n", duration);
}
}
// Determine fan-in of given device
uint32_t fanIn(PDeviceId id) {
return graph.fanIn(id);
}
// Determine fan-out of given device
uint32_t fanOut(PDeviceId id) {
return graph.fanOut(id);
}
};
// Read performance stats and store in file
inline void politeSaveStats(HostLink* hostLink, const char* filename) {
#ifdef POLITE_DUMP_STATS
// Open file for performance counters
FILE* statsFile = fopen(filename, "wt");
if (statsFile == NULL) {
printf("Error creating stats file\n");
exit(EXIT_FAILURE);
}
uint32_t meshLenX = hostLink->meshXLen;
uint32_t meshLenY = hostLink->meshYLen;
// Number of caches
uint32_t numLines = meshLenX * meshLenY *
TinselDCachesPerDRAM * TinselDRAMsPerBoard;
// Add on number of cores
numLines += meshLenX * meshLenY * TinselCoresPerBoard;
// Add on number of threads
#ifdef POLITE_COUNT_MSGS
numLines += meshLenX * meshLenY * TinselThreadsPerBoard;
#endif
hostLink->dumpStdOut(statsFile, numLines);
fclose(statsFile);
#endif
}
#endif
|
PoW.c | /* Copyright 2016-2018 The Ulord Core Foundation */
#include "PoW.h"
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
#include <assert.h>
// #include <omp.h>
#include "my_time.h"
#include "common.h"
#include "my_rand48_r.h"
#include "oneWayFunction.h"
/*
* Step 1: Initialize working memory.
*/
void initWorkMemory(uint8_t *input, uint32_t inputLen, uint8_t *Maddr, const uint32_t K) {
uint32_t i, j, k;
#ifdef _MSC_VER
__declspec(align(16)) uint8_t a[OUTPUT_LEN], b[OUTPUT_LEN], a_rrs[OUTPUT_LEN], b_rrs[OUTPUT_LEN];
#else
__attribute__((aligned(16))) uint8_t a[OUTPUT_LEN], b[OUTPUT_LEN], a_rrs[OUTPUT_LEN], b_rrs[OUTPUT_LEN];
#endif
uint64_t *a_u64 = (uint64_t *)a, *b_u64 = (uint64_t *)b;
funcInfor[0].func(input, inputLen, a);
uint64_t randSeed[4] = {0, 0, 0, 0};
const uint32_t iterNum = WORK_MEMORY_SIZE >> 5;
for (i = 0; i < iterNum; i += K) {
uint8_t t = 0, shift_num = 0;
reduce_bit(a, 32, (uint8_t *)&t, 8);
t = (t & 0x0f) ^ (t >> 4);
// reduce_bit((uint8_t *)&i, 4, (uint8_t *)&shift_num, 8);
shift_num = reduce32_8bits(i);
// rrs(a, OUTPUT_LEN, a_rrs, shift_num);
rrs_32Byte_256(a, a_rrs, shift_num);
funcInfor[t].func(a_rrs, 32, a);
reduce_bit(a, 8, (uint8_t *)&randSeed[0], 48);
reduce_bit(a + 8, 8, (uint8_t *)&randSeed[1], 48);
reduce_bit(a + 16, 8, (uint8_t *)&randSeed[2], 48);
reduce_bit(a + 24, 8, (uint8_t *)&randSeed[3], 48);
#ifndef SSE_VERSION
struct my_rand48_data randBuffer[4];
my_seed48_r(randSeed[0], &randBuffer[0]);
my_seed48_r(randSeed[1], &randBuffer[1]);
my_seed48_r(randSeed[2], &randBuffer[2]);
my_seed48_r(randSeed[3], &randBuffer[3]);
#else
struct vrand48_data randBuffer[2];
vseed48(randSeed , &randBuffer[0]);
vseed48(randSeed + 2, &randBuffer[1]);
#endif
memcpy(Maddr + (i << 5), a, 32*sizeof(uint8_t));
for (k = 1; k < K; ++k) {
#ifndef SSE_VERSION
my_rand64_r(&randBuffer[0], &b_u64[0]);
my_rand64_r(&randBuffer[1], &b_u64[1]);
my_rand64_r(&randBuffer[2], &b_u64[2]);
my_rand64_r(&randBuffer[3], &b_u64[3]);
uint8_t shift_num;
uint8_t result[OUTPUT_LEN];
// reduce_bit((uint8_t *)&i, 4, (uint8_t *)&shift_num, 8);
shift_num = reduce32_8bits(i + k);
// rrs(b, OUTPUT_LEN, result, shift_num);
rrs_32Byte_256(b, b_rrs, shift_num);
uint64_t *b_rrs_u64 = (uint64_t *)b_rrs;
a_u64[0] ^= b_rrs_u64[0];
a_u64[1] ^= b_rrs_u64[1];
a_u64[2] ^= b_rrs_u64[2];
a_u64[3] ^= b_rrs_u64[3];
#else
vrand64(b, randBuffer);
uint8_t shift_num;
shift_num = reduce32_8bits(i + k);
// rrs(b, OUTPUT_LEN, result, shift_num);
rrs_32Byte_256(b, b_rrs, shift_num);
__m128i val = _mm_load_si128((__m128i *)a);
__m128i vah = _mm_load_si128((__m128i *)(a + 16));
__m128i vresultl = _mm_load_si128((__m128i *)b_rrs);
__m128i vresulth = _mm_load_si128((__m128i *)(b_rrs + 16));
vresultl = _mm_xor_si128(val, vresultl);
vresulth = _mm_xor_si128(vah, vresulth);
_mm_store_si128((__m128i *)a, vresultl);
_mm_store_si128((__m128i *)(a + 16), vresulth);
#endif
memcpy(Maddr + ((i + k) << 5), b_rrs, OUTPUT_LEN*sizeof(uint8_t));
}
}
}
/*
* Step 2: Modify the working memory contents.
*/
void modifyWorkMemory(uint8_t *Maddr, const uint32_t L, const uint32_t C,
uint8_t *result) {
uint32_t i, j;
#ifdef _MSC_VER
__declspec(align(16)) uint8_t a[OUTPUT_LEN], b[64], a_rrs[OUTPUT_LEN];
#else
__attribute__((aligned(16))) uint8_t a[OUTPUT_LEN], b[64], a_rrs[OUTPUT_LEN];
#endif
uint64_t *a_u64 = (uint64_t *)a, *b_u64 = (uint64_t *)b;
funcInfor[0].func(Maddr + WORK_MEMORY_SIZE - 32, 32, a);
memcpy(result, a, OUTPUT_LEN*sizeof(uint8_t));
uint64_t r = 0;
reduce_bit(a, 32, (uint8_t *)&r, 64);
const uint32_t iterNum = L << 6;
for (i = 0; i < C; ++i) {
uint64_t randSeed = 0;
reduce_bit(a, 32, (uint8_t *)&randSeed, 48);
struct my_rand48_data randBuffer;
my_seed48_r(randSeed, &randBuffer);
uint8_t t1, t2, s;
uint64_t randNum = 0, base = 0;
for (j = 0; j < iterNum; ++j) {
my_rand48_r(&randBuffer, &randNum);
base = randNum + r;
uint64_t offset = 0;
// reduce_bit((uint8_t *)&r, 8, (uint8_t *)&offset, 8);
offset = reduce64_8bits(r);
offset = (offset << 8) + 1;
uint64_t addr1 = (base + WORK_MEMORY_SIZE - offset) % WORK_MEMORY_SIZE;
uint64_t addr2 = (base + offset) % WORK_MEMORY_SIZE;
t1 = Maddr[addr1];
t2 = Maddr[addr2];
s = a[j & 0x1f];
Maddr[addr1] = t2 ^ s;
Maddr[addr2] = t1 ^ s;
b[j & 0x3f] = t1 ^ t2;
r = r + s + t1 + t2;
}
uint8_t t = 0;
// reduce_bit((uint8_t *)&r, 8, (uint8_t *)&t, 8);
// t = (t & 0x0f) ^ (t >> 4);
t = reduce64_4bits(r);
reduce_bit(b, 64, a, 256);
uint8_t shift_num = 0;
uint64_t ir = r + i;
// reduce_bit((uint8_t *)&ir, 8, (uint8_t *)&shift_num, 8);
shift_num = reduce64_8bits(ir);
// rrs(a, OUTPUT_LEN, a_rrs, shift_num);
rrs_32Byte_256(a, a_rrs, shift_num);
funcInfor[t].func(a_rrs, 32, a);
uint64_t *result_u64 = (uint64_t *)result;
result_u64[0] ^= a_u64[0];
result_u64[1] ^= a_u64[1];
result_u64[2] ^= a_u64[2];
result_u64[3] ^= a_u64[3];
}
}
/*
* Step 3: Calculate the final result.
*/
void calculateFinalResult(uint8_t *Maddr, uint8_t *c, const uint32_t D, uint8_t *output) {
uint32_t i = 0, j = 0, k = 0;
#ifdef _MSC_VER
__declspec(align(16)) uint8_t result[OUTPUT_LEN], result_rrs[OUTPUT_LEN];
#else
__attribute__((aligned(16))) uint8_t result[OUTPUT_LEN], result_rrs[OUTPUT_LEN];
#endif
uint64_t *Maddr_u64 = (uint64_t *)Maddr;
uint64_t *result_u64 = (uint64_t *)result;
const uint32_t num = (WORK_MEMORY_SIZE >> 5) - 1;
uint32_t it = 0;
memcpy(result, c, OUTPUT_LEN * sizeof(uint8_t));
while(1) {
uint8_t t = 0, shift_num = 0;
uint32_t d = 0;
reduce_bit(result, 32, (uint8_t *)&t, 8);
t = (t & 0x0f) ^ (t >> 4);
reduce_bit(result, 32, (uint8_t *)&d, D);
++d;
for (j = 0; j < d; ++j) {
uint32_t index = i << 2;
result_u64[0] ^= Maddr_u64[index + 0];
result_u64[1] ^= Maddr_u64[index + 1];
result_u64[2] ^= Maddr_u64[index + 2];
result_u64[3] ^= Maddr_u64[index + 3];
++i;
if (i == num) {
it = i + t;
// reduce_bit((uint8_t *)&it, 4, (uint8_t *)&shift_num, 8);
shift_num = reduce32_8bits(it);
// rrs(result, OUTPUT_LEN, result_rrs, shift_num);
rrs_32Byte_256(result, result_rrs, shift_num);
funcInfor[0].func(result_rrs, 32, result);
memcpy(output, result, OUTPUT_LEN * sizeof(uint8_t));
return;
}
}
it = t + i;
// reduce_bit((uint8_t *)&it, 4, (uint8_t *)&shift_num, 8);
shift_num = reduce32_8bits(it);
// rrs(result, OUTPUT_LEN, result_rrs, shift_num);
rrs_32Byte_256(result, result_rrs, shift_num);
funcInfor[t].func(result_rrs, 32, result);
}
}
/*
* Correctness & Performance test for Proof of work
*/
/*
void testPowFunction(uint8_t *mess, uint32_t messLen, const int64_t iterNum) {
int64_t j;
uint32_t inputLen = messLen;
uint8_t input[INPUT_LEN], output[OUTPUT_LEN];
memset(input, 0, INPUT_LEN*sizeof(uint8_t));
memcpy(input, mess, messLen*sizeof(char));
// Init all one-way function
initOneWayFunction();
uint8_t *Maddr = (uint8_t *)malloc(64 * WORK_MEMORY_SIZE*sizeof(uint8_t));
assert(NULL != Maddr);
memset(Maddr, 0, 64 * WORK_MEMORY_SIZE*sizeof(uint8_t));
printf("****************************** Correctness test (PoW function) ******************************\n");
printf("Test message: %s\n", mess);
powFunction(input, inputLen, Maddr, output);
view_data_u8("PoW", output, OUTPUT_LEN);
printf("*********************************************************************************************\n");
printf("*************************************************** Performance test (PoW function) ***************************************************\n");
uint8_t *result = (uint8_t *)malloc(iterNum * OUTPUT_LEN * sizeof(uint8_t));
assert(NULL != result);
memset(result, 0, iterNum * OUTPUT_LEN * sizeof(uint8_t));
uint32_t threadNumArr[] = {1, 4, 8, 12, 16, 20, 24, 32, 48, 64};
uint32_t threadNumTypes = sizeof(threadNumArr) / sizeof(uint32_t);
printf(" %-18s", "Algorithm");
for (uint32_t ix = 0; ix < threadNumTypes; ++ix)
printf("%12d", threadNumArr[ix]);
printf("\n");
printf("00 %-18s\t", "PoW");
for (uint32_t ix = 0; ix < threadNumTypes; ++ix) {
omp_set_num_threads(threadNumArr[ix]);
double startTime = get_wall_time();
if (threadNumArr[ix] == 1) {
for (j = 0; j < iterNum; ++j) {
powFunction(input, inputLen, Maddr, result + j * OUTPUT_LEN);
}
} else {
#pragma omp parallel for firstprivate(input), private(j) shared(result)
for (j = 0; j < iterNum; ++j) {
powFunction(input, inputLen, Maddr + omp_get_thread_num() * WORK_MEMORY_SIZE, result + j * OUTPUT_LEN);
}
}
double endTime = get_wall_time();
double costTime = endTime - startTime;
printf("%5.0f bps ", iterNum / costTime); fflush(stdout);
// Check result
for (j = 0; j < iterNum; j += 1) {
if (memcmp(output, result + j * OUTPUT_LEN, OUTPUT_LEN)) {
printf("Thread num: %d, j: %ld\n", threadNumArr[ix], j);
view_data_u8("output", output, OUTPUT_LEN);
view_data_u8("result", result + j * OUTPUT_LEN, OUTPUT_LEN);
abort();
}
}
}
printf("\n");
printf("***************************************************************************************************************************************\n");
if (NULL != result) {
free(result);
result = NULL;
}
if (NULL != Maddr) {
free(Maddr);
Maddr = NULL;
}
}
*/
#define OUTPUT_BUFFER_SIZE (32 * 1024UL * 1024UL)
#define MAX_TEST_INPUT_LEN 140
#define MAX_OUT_FILE_NAME_LEN 25
const char testInputCase[][MAX_TEST_INPUT_LEN] = {
"",
"HelloWorld",
"0123456789"
};
void powNistTest(const char *outFileName) {
const uint64_t iterNum = 1024UL * 1024UL;
// const uint64_t iterNum = 1024UL;
uint8_t *outputBuffer = (uint8_t *)malloc(OUTPUT_BUFFER_SIZE * sizeof(uint8_t));
assert(NULL != outputBuffer);
memset(outputBuffer, 0, OUTPUT_BUFFER_SIZE * sizeof(uint8_t));
uint8_t *Maddr = (uint8_t *)malloc(WORK_MEMORY_SIZE*sizeof(uint8_t));
assert(NULL != Maddr);
memset(Maddr, 0, WORK_MEMORY_SIZE*sizeof(uint8_t));
initOneWayFunction();
uint32_t testInputCaseNum = sizeof(testInputCase) / sizeof(const char [MAX_TEST_INPUT_LEN]);
for (uint32_t testCaseIx = 0; testCaseIx < testInputCaseNum; ++testCaseIx) {
char curOutFileName[MAX_OUT_FILE_NAME_LEN] = "";
sprintf(curOutFileName, "%s-%u.txt", outFileName, testCaseIx);
FILE *fp = NULL;
if (NULL != (fp = fopen(curOutFileName, "wb"))) {
const uint32_t testInputCaseLen = strlen((char *)testInputCase[testCaseIx]);
uint8_t input[MAX_TEST_INPUT_LEN];
memset(input, 0, MAX_TEST_INPUT_LEN*sizeof(uint8_t));
memcpy(input, testInputCase[testCaseIx], testInputCaseLen*sizeof(uint8_t));
double startTime = get_wall_time();
powFunction(input, testInputCaseLen, Maddr, outputBuffer);
for (uint64_t i = 1, j = 0; i < iterNum; ++i) {
memcpy(input, outputBuffer + j, OUTPUT_LEN * sizeof(uint32_t));
j += OUTPUT_LEN;
powFunction(input, OUTPUT_LEN, Maddr, outputBuffer + j);
/* if (j == OUTPUT_BUFFER_SIZE) {
fwrite(outputBuffer, sizeof(uint8_t), OUTPUT_BUFFER_SIZE / sizeof(uint8_t), fp);
j = 0;
} */
}
double endTime = get_wall_time();
double costTime = endTime - startTime;
fprintf(stdout, "TestCaseIx: %d, Input: %s, IterNum: %lu, Time: %4.2f, Performance: %5.2f bps\n", testCaseIx, \
testInputCase[testCaseIx], iterNum, costTime, ((double)(iterNum * OUTPUT_LEN)) / costTime); fflush(stdout);
fwrite(outputBuffer, sizeof(uint8_t), OUTPUT_BUFFER_SIZE / sizeof(uint8_t), fp);
fclose(fp);
} else {
fprintf(stderr, "Error: Open %s failed!\n", curOutFileName);
abort();
}
}
if (NULL != outputBuffer) {
free(outputBuffer);
outputBuffer = NULL;
}
if (NULL != Maddr) {
free(Maddr);
Maddr = NULL;
}
}
|
kmp_glt_atomic.c | /*
* kmp_atomic.c -- ATOMIC implementation routines
*/
//===----------------------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.txt for details.
//
//===----------------------------------------------------------------------===//
#include "kmp_glt_atomic.h"
#include "kmp_glt.h" // TRUE, asm routines prototypes
typedef unsigned char uchar;
typedef unsigned short ushort;
/*!
@defgroup ATOMIC_OPS Atomic Operations
These functions are used for implementing the many different varieties of atomic operations.
The compiler is at liberty to inline atomic operations that are naturally supported
by the target architecture. For instance on IA-32 architecture an atomic like this can be inlined
@code
static int s = 0;
#pragma omp atomic
s++;
@endcode
using the single instruction: `lock; incl s`
However the runtime does provide entrypoints for these operations to support compilers that choose
not to inline them. (For instance, `__kmpc_atomic_fixed4_add` could be used to perform the
increment above.)
The names of the functions are encoded by using the data type name and the operation name, as in these tables.
Data Type | Data type encoding
-----------|---------------
int8_t | `fixed1`
uint8_t | `fixed1u`
int16_t | `fixed2`
uint16_t | `fixed2u`
int32_t | `fixed4`
uint32_t | `fixed4u`
int32_t | `fixed8`
uint32_t | `fixed8u`
float | `float4`
double | `float8`
float 10 (8087 eighty bit float) | `float10`
complex<float> | `cmplx4`
complex<double> | `cmplx8`
complex<float10> | `cmplx10`
<br>
Operation | Operation encoding
----------|-------------------
+ | add
- | sub
\* | mul
/ | div
& | andb
<< | shl
\>\> | shr
\| | orb
^ | xor
&& | andl
\|\| | orl
maximum | max
minimum | min
.eqv. | eqv
.neqv. | neqv
<br>
For non-commutative operations, `_rev` can also be added for the reversed operation.
For the functions that capture the result, the suffix `_cpt` is added.
Update Functions
================
The general form of an atomic function that just performs an update (without a `capture`)
@code
void __kmpc_atomic_<datatype>_<operation>( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs );
@endcode
@param ident_t a pointer to source location
@param gtid the global thread id
@param lhs a pointer to the left operand
@param rhs the right operand
`capture` functions
===================
The capture functions perform an atomic update and return a result, which is either the value
before the capture, or that after. They take an additional argument to determine which result is returned.
Their general form is therefore
@code
TYPE __kmpc_atomic_<datatype>_<operation>_cpt( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, int flag );
@endcode
@param ident_t a pointer to source location
@param gtid the global thread id
@param lhs a pointer to the left operand
@param rhs the right operand
@param flag one if the result is to be captured *after* the operation, zero if captured *before*.
The one set of exceptions to this is the `complex<float>` type where the value is not returned,
rather an extra argument pointer is passed.
They look like
@code
void __kmpc_atomic_cmplx4_<op>_cpt( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx32 rhs, kmp_cmplx32 * out, int flag );
@endcode
Read and Write Operations
=========================
The OpenMP<sup>*</sup> standard now supports atomic operations that simply ensure that the
value is read or written atomically, with no modification
performed. In many cases on IA-32 architecture these operations can be inlined since
the architecture guarantees that no tearing occurs on aligned objects
accessed with a single memory operation of up to 64 bits in size.
The general form of the read operations is
@code
TYPE __kmpc_atomic_<type>_rd ( ident_t *id_ref, int gtid, TYPE * loc );
@endcode
For the write operations the form is
@code
void __kmpc_atomic_<type>_wr ( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs );
@endcode
Full list of functions
======================
This leads to the generation of 376 atomic functions, as follows.
Functons for integers
---------------------
There are versions here for integers of size 1,2,4 and 8 bytes both signed and unsigned (where that matters).
@code
__kmpc_atomic_fixed1_add
__kmpc_atomic_fixed1_add_cpt
__kmpc_atomic_fixed1_add_fp
__kmpc_atomic_fixed1_andb
__kmpc_atomic_fixed1_andb_cpt
__kmpc_atomic_fixed1_andl
__kmpc_atomic_fixed1_andl_cpt
__kmpc_atomic_fixed1_div
__kmpc_atomic_fixed1_div_cpt
__kmpc_atomic_fixed1_div_cpt_rev
__kmpc_atomic_fixed1_div_float8
__kmpc_atomic_fixed1_div_fp
__kmpc_atomic_fixed1_div_rev
__kmpc_atomic_fixed1_eqv
__kmpc_atomic_fixed1_eqv_cpt
__kmpc_atomic_fixed1_max
__kmpc_atomic_fixed1_max_cpt
__kmpc_atomic_fixed1_min
__kmpc_atomic_fixed1_min_cpt
__kmpc_atomic_fixed1_mul
__kmpc_atomic_fixed1_mul_cpt
__kmpc_atomic_fixed1_mul_float8
__kmpc_atomic_fixed1_mul_fp
__kmpc_atomic_fixed1_neqv
__kmpc_atomic_fixed1_neqv_cpt
__kmpc_atomic_fixed1_orb
__kmpc_atomic_fixed1_orb_cpt
__kmpc_atomic_fixed1_orl
__kmpc_atomic_fixed1_orl_cpt
__kmpc_atomic_fixed1_rd
__kmpc_atomic_fixed1_shl
__kmpc_atomic_fixed1_shl_cpt
__kmpc_atomic_fixed1_shl_cpt_rev
__kmpc_atomic_fixed1_shl_rev
__kmpc_atomic_fixed1_shr
__kmpc_atomic_fixed1_shr_cpt
__kmpc_atomic_fixed1_shr_cpt_rev
__kmpc_atomic_fixed1_shr_rev
__kmpc_atomic_fixed1_sub
__kmpc_atomic_fixed1_sub_cpt
__kmpc_atomic_fixed1_sub_cpt_rev
__kmpc_atomic_fixed1_sub_fp
__kmpc_atomic_fixed1_sub_rev
__kmpc_atomic_fixed1_swp
__kmpc_atomic_fixed1_wr
__kmpc_atomic_fixed1_xor
__kmpc_atomic_fixed1_xor_cpt
__kmpc_atomic_fixed1u_div
__kmpc_atomic_fixed1u_div_cpt
__kmpc_atomic_fixed1u_div_cpt_rev
__kmpc_atomic_fixed1u_div_fp
__kmpc_atomic_fixed1u_div_rev
__kmpc_atomic_fixed1u_shr
__kmpc_atomic_fixed1u_shr_cpt
__kmpc_atomic_fixed1u_shr_cpt_rev
__kmpc_atomic_fixed1u_shr_rev
__kmpc_atomic_fixed2_add
__kmpc_atomic_fixed2_add_cpt
__kmpc_atomic_fixed2_add_fp
__kmpc_atomic_fixed2_andb
__kmpc_atomic_fixed2_andb_cpt
__kmpc_atomic_fixed2_andl
__kmpc_atomic_fixed2_andl_cpt
__kmpc_atomic_fixed2_div
__kmpc_atomic_fixed2_div_cpt
__kmpc_atomic_fixed2_div_cpt_rev
__kmpc_atomic_fixed2_div_float8
__kmpc_atomic_fixed2_div_fp
__kmpc_atomic_fixed2_div_rev
__kmpc_atomic_fixed2_eqv
__kmpc_atomic_fixed2_eqv_cpt
__kmpc_atomic_fixed2_max
__kmpc_atomic_fixed2_max_cpt
__kmpc_atomic_fixed2_min
__kmpc_atomic_fixed2_min_cpt
__kmpc_atomic_fixed2_mul
__kmpc_atomic_fixed2_mul_cpt
__kmpc_atomic_fixed2_mul_float8
__kmpc_atomic_fixed2_mul_fp
__kmpc_atomic_fixed2_neqv
__kmpc_atomic_fixed2_neqv_cpt
__kmpc_atomic_fixed2_orb
__kmpc_atomic_fixed2_orb_cpt
__kmpc_atomic_fixed2_orl
__kmpc_atomic_fixed2_orl_cpt
__kmpc_atomic_fixed2_rd
__kmpc_atomic_fixed2_shl
__kmpc_atomic_fixed2_shl_cpt
__kmpc_atomic_fixed2_shl_cpt_rev
__kmpc_atomic_fixed2_shl_rev
__kmpc_atomic_fixed2_shr
__kmpc_atomic_fixed2_shr_cpt
__kmpc_atomic_fixed2_shr_cpt_rev
__kmpc_atomic_fixed2_shr_rev
__kmpc_atomic_fixed2_sub
__kmpc_atomic_fixed2_sub_cpt
__kmpc_atomic_fixed2_sub_cpt_rev
__kmpc_atomic_fixed2_sub_fp
__kmpc_atomic_fixed2_sub_rev
__kmpc_atomic_fixed2_swp
__kmpc_atomic_fixed2_wr
__kmpc_atomic_fixed2_xor
__kmpc_atomic_fixed2_xor_cpt
__kmpc_atomic_fixed2u_div
__kmpc_atomic_fixed2u_div_cpt
__kmpc_atomic_fixed2u_div_cpt_rev
__kmpc_atomic_fixed2u_div_fp
__kmpc_atomic_fixed2u_div_rev
__kmpc_atomic_fixed2u_shr
__kmpc_atomic_fixed2u_shr_cpt
__kmpc_atomic_fixed2u_shr_cpt_rev
__kmpc_atomic_fixed2u_shr_rev
__kmpc_atomic_fixed4_add
__kmpc_atomic_fixed4_add_cpt
__kmpc_atomic_fixed4_add_fp
__kmpc_atomic_fixed4_andb
__kmpc_atomic_fixed4_andb_cpt
__kmpc_atomic_fixed4_andl
__kmpc_atomic_fixed4_andl_cpt
__kmpc_atomic_fixed4_div
__kmpc_atomic_fixed4_div_cpt
__kmpc_atomic_fixed4_div_cpt_rev
__kmpc_atomic_fixed4_div_float8
__kmpc_atomic_fixed4_div_fp
__kmpc_atomic_fixed4_div_rev
__kmpc_atomic_fixed4_eqv
__kmpc_atomic_fixed4_eqv_cpt
__kmpc_atomic_fixed4_max
__kmpc_atomic_fixed4_max_cpt
__kmpc_atomic_fixed4_min
__kmpc_atomic_fixed4_min_cpt
__kmpc_atomic_fixed4_mul
__kmpc_atomic_fixed4_mul_cpt
__kmpc_atomic_fixed4_mul_float8
__kmpc_atomic_fixed4_mul_fp
__kmpc_atomic_fixed4_neqv
__kmpc_atomic_fixed4_neqv_cpt
__kmpc_atomic_fixed4_orb
__kmpc_atomic_fixed4_orb_cpt
__kmpc_atomic_fixed4_orl
__kmpc_atomic_fixed4_orl_cpt
__kmpc_atomic_fixed4_rd
__kmpc_atomic_fixed4_shl
__kmpc_atomic_fixed4_shl_cpt
__kmpc_atomic_fixed4_shl_cpt_rev
__kmpc_atomic_fixed4_shl_rev
__kmpc_atomic_fixed4_shr
__kmpc_atomic_fixed4_shr_cpt
__kmpc_atomic_fixed4_shr_cpt_rev
__kmpc_atomic_fixed4_shr_rev
__kmpc_atomic_fixed4_sub
__kmpc_atomic_fixed4_sub_cpt
__kmpc_atomic_fixed4_sub_cpt_rev
__kmpc_atomic_fixed4_sub_fp
__kmpc_atomic_fixed4_sub_rev
__kmpc_atomic_fixed4_swp
__kmpc_atomic_fixed4_wr
__kmpc_atomic_fixed4_xor
__kmpc_atomic_fixed4_xor_cpt
__kmpc_atomic_fixed4u_div
__kmpc_atomic_fixed4u_div_cpt
__kmpc_atomic_fixed4u_div_cpt_rev
__kmpc_atomic_fixed4u_div_fp
__kmpc_atomic_fixed4u_div_rev
__kmpc_atomic_fixed4u_shr
__kmpc_atomic_fixed4u_shr_cpt
__kmpc_atomic_fixed4u_shr_cpt_rev
__kmpc_atomic_fixed4u_shr_rev
__kmpc_atomic_fixed8_add
__kmpc_atomic_fixed8_add_cpt
__kmpc_atomic_fixed8_add_fp
__kmpc_atomic_fixed8_andb
__kmpc_atomic_fixed8_andb_cpt
__kmpc_atomic_fixed8_andl
__kmpc_atomic_fixed8_andl_cpt
__kmpc_atomic_fixed8_div
__kmpc_atomic_fixed8_div_cpt
__kmpc_atomic_fixed8_div_cpt_rev
__kmpc_atomic_fixed8_div_float8
__kmpc_atomic_fixed8_div_fp
__kmpc_atomic_fixed8_div_rev
__kmpc_atomic_fixed8_eqv
__kmpc_atomic_fixed8_eqv_cpt
__kmpc_atomic_fixed8_max
__kmpc_atomic_fixed8_max_cpt
__kmpc_atomic_fixed8_min
__kmpc_atomic_fixed8_min_cpt
__kmpc_atomic_fixed8_mul
__kmpc_atomic_fixed8_mul_cpt
__kmpc_atomic_fixed8_mul_float8
__kmpc_atomic_fixed8_mul_fp
__kmpc_atomic_fixed8_neqv
__kmpc_atomic_fixed8_neqv_cpt
__kmpc_atomic_fixed8_orb
__kmpc_atomic_fixed8_orb_cpt
__kmpc_atomic_fixed8_orl
__kmpc_atomic_fixed8_orl_cpt
__kmpc_atomic_fixed8_rd
__kmpc_atomic_fixed8_shl
__kmpc_atomic_fixed8_shl_cpt
__kmpc_atomic_fixed8_shl_cpt_rev
__kmpc_atomic_fixed8_shl_rev
__kmpc_atomic_fixed8_shr
__kmpc_atomic_fixed8_shr_cpt
__kmpc_atomic_fixed8_shr_cpt_rev
__kmpc_atomic_fixed8_shr_rev
__kmpc_atomic_fixed8_sub
__kmpc_atomic_fixed8_sub_cpt
__kmpc_atomic_fixed8_sub_cpt_rev
__kmpc_atomic_fixed8_sub_fp
__kmpc_atomic_fixed8_sub_rev
__kmpc_atomic_fixed8_swp
__kmpc_atomic_fixed8_wr
__kmpc_atomic_fixed8_xor
__kmpc_atomic_fixed8_xor_cpt
__kmpc_atomic_fixed8u_div
__kmpc_atomic_fixed8u_div_cpt
__kmpc_atomic_fixed8u_div_cpt_rev
__kmpc_atomic_fixed8u_div_fp
__kmpc_atomic_fixed8u_div_rev
__kmpc_atomic_fixed8u_shr
__kmpc_atomic_fixed8u_shr_cpt
__kmpc_atomic_fixed8u_shr_cpt_rev
__kmpc_atomic_fixed8u_shr_rev
@endcode
Functions for floating point
----------------------------
There are versions here for floating point numbers of size 4, 8, 10 and 16 bytes.
(Ten byte floats are used by X87, but are now rare).
@code
__kmpc_atomic_float4_add
__kmpc_atomic_float4_add_cpt
__kmpc_atomic_float4_add_float8
__kmpc_atomic_float4_add_fp
__kmpc_atomic_float4_div
__kmpc_atomic_float4_div_cpt
__kmpc_atomic_float4_div_cpt_rev
__kmpc_atomic_float4_div_float8
__kmpc_atomic_float4_div_fp
__kmpc_atomic_float4_div_rev
__kmpc_atomic_float4_max
__kmpc_atomic_float4_max_cpt
__kmpc_atomic_float4_min
__kmpc_atomic_float4_min_cpt
__kmpc_atomic_float4_mul
__kmpc_atomic_float4_mul_cpt
__kmpc_atomic_float4_mul_float8
__kmpc_atomic_float4_mul_fp
__kmpc_atomic_float4_rd
__kmpc_atomic_float4_sub
__kmpc_atomic_float4_sub_cpt
__kmpc_atomic_float4_sub_cpt_rev
__kmpc_atomic_float4_sub_float8
__kmpc_atomic_float4_sub_fp
__kmpc_atomic_float4_sub_rev
__kmpc_atomic_float4_swp
__kmpc_atomic_float4_wr
__kmpc_atomic_float8_add
__kmpc_atomic_float8_add_cpt
__kmpc_atomic_float8_add_fp
__kmpc_atomic_float8_div
__kmpc_atomic_float8_div_cpt
__kmpc_atomic_float8_div_cpt_rev
__kmpc_atomic_float8_div_fp
__kmpc_atomic_float8_div_rev
__kmpc_atomic_float8_max
__kmpc_atomic_float8_max_cpt
__kmpc_atomic_float8_min
__kmpc_atomic_float8_min_cpt
__kmpc_atomic_float8_mul
__kmpc_atomic_float8_mul_cpt
__kmpc_atomic_float8_mul_fp
__kmpc_atomic_float8_rd
__kmpc_atomic_float8_sub
__kmpc_atomic_float8_sub_cpt
__kmpc_atomic_float8_sub_cpt_rev
__kmpc_atomic_float8_sub_fp
__kmpc_atomic_float8_sub_rev
__kmpc_atomic_float8_swp
__kmpc_atomic_float8_wr
__kmpc_atomic_float10_add
__kmpc_atomic_float10_add_cpt
__kmpc_atomic_float10_add_fp
__kmpc_atomic_float10_div
__kmpc_atomic_float10_div_cpt
__kmpc_atomic_float10_div_cpt_rev
__kmpc_atomic_float10_div_fp
__kmpc_atomic_float10_div_rev
__kmpc_atomic_float10_mul
__kmpc_atomic_float10_mul_cpt
__kmpc_atomic_float10_mul_fp
__kmpc_atomic_float10_rd
__kmpc_atomic_float10_sub
__kmpc_atomic_float10_sub_cpt
__kmpc_atomic_float10_sub_cpt_rev
__kmpc_atomic_float10_sub_fp
__kmpc_atomic_float10_sub_rev
__kmpc_atomic_float10_swp
__kmpc_atomic_float10_wr
__kmpc_atomic_float16_add
__kmpc_atomic_float16_add_cpt
__kmpc_atomic_float16_div
__kmpc_atomic_float16_div_cpt
__kmpc_atomic_float16_div_cpt_rev
__kmpc_atomic_float16_div_rev
__kmpc_atomic_float16_max
__kmpc_atomic_float16_max_cpt
__kmpc_atomic_float16_min
__kmpc_atomic_float16_min_cpt
__kmpc_atomic_float16_mul
__kmpc_atomic_float16_mul_cpt
__kmpc_atomic_float16_rd
__kmpc_atomic_float16_sub
__kmpc_atomic_float16_sub_cpt
__kmpc_atomic_float16_sub_cpt_rev
__kmpc_atomic_float16_sub_rev
__kmpc_atomic_float16_swp
__kmpc_atomic_float16_wr
@endcode
Functions for Complex types
---------------------------
Functions for complex types whose component floating point variables are of size 4,8,10 or 16 bytes.
The names here are based on the size of the component float, *not* the size of the complex type. So
`__kmpc_atomc_cmplx8_add` is an operation on a `complex<double>` or `complex(kind=8)`, *not* `complex<float>`.
@code
__kmpc_atomic_cmplx4_add
__kmpc_atomic_cmplx4_add_cmplx8
__kmpc_atomic_cmplx4_add_cpt
__kmpc_atomic_cmplx4_div
__kmpc_atomic_cmplx4_div_cmplx8
__kmpc_atomic_cmplx4_div_cpt
__kmpc_atomic_cmplx4_div_cpt_rev
__kmpc_atomic_cmplx4_div_rev
__kmpc_atomic_cmplx4_mul
__kmpc_atomic_cmplx4_mul_cmplx8
__kmpc_atomic_cmplx4_mul_cpt
__kmpc_atomic_cmplx4_rd
__kmpc_atomic_cmplx4_sub
__kmpc_atomic_cmplx4_sub_cmplx8
__kmpc_atomic_cmplx4_sub_cpt
__kmpc_atomic_cmplx4_sub_cpt_rev
__kmpc_atomic_cmplx4_sub_rev
__kmpc_atomic_cmplx4_swp
__kmpc_atomic_cmplx4_wr
__kmpc_atomic_cmplx8_add
__kmpc_atomic_cmplx8_add_cpt
__kmpc_atomic_cmplx8_div
__kmpc_atomic_cmplx8_div_cpt
__kmpc_atomic_cmplx8_div_cpt_rev
__kmpc_atomic_cmplx8_div_rev
__kmpc_atomic_cmplx8_mul
__kmpc_atomic_cmplx8_mul_cpt
__kmpc_atomic_cmplx8_rd
__kmpc_atomic_cmplx8_sub
__kmpc_atomic_cmplx8_sub_cpt
__kmpc_atomic_cmplx8_sub_cpt_rev
__kmpc_atomic_cmplx8_sub_rev
__kmpc_atomic_cmplx8_swp
__kmpc_atomic_cmplx8_wr
__kmpc_atomic_cmplx10_add
__kmpc_atomic_cmplx10_add_cpt
__kmpc_atomic_cmplx10_div
__kmpc_atomic_cmplx10_div_cpt
__kmpc_atomic_cmplx10_div_cpt_rev
__kmpc_atomic_cmplx10_div_rev
__kmpc_atomic_cmplx10_mul
__kmpc_atomic_cmplx10_mul_cpt
__kmpc_atomic_cmplx10_rd
__kmpc_atomic_cmplx10_sub
__kmpc_atomic_cmplx10_sub_cpt
__kmpc_atomic_cmplx10_sub_cpt_rev
__kmpc_atomic_cmplx10_sub_rev
__kmpc_atomic_cmplx10_swp
__kmpc_atomic_cmplx10_wr
__kmpc_atomic_cmplx16_add
__kmpc_atomic_cmplx16_add_cpt
__kmpc_atomic_cmplx16_div
__kmpc_atomic_cmplx16_div_cpt
__kmpc_atomic_cmplx16_div_cpt_rev
__kmpc_atomic_cmplx16_div_rev
__kmpc_atomic_cmplx16_mul
__kmpc_atomic_cmplx16_mul_cpt
__kmpc_atomic_cmplx16_rd
__kmpc_atomic_cmplx16_sub
__kmpc_atomic_cmplx16_sub_cpt
__kmpc_atomic_cmplx16_sub_cpt_rev
__kmpc_atomic_cmplx16_swp
__kmpc_atomic_cmplx16_wr
@endcode
*/
/*!
@ingroup ATOMIC_OPS
@{
*/
/*
* Global vars
*/
#ifndef KMP_GOMP_COMPAT
int __kmp_atomic_mode = 1; // Intel perf
#else
int __kmp_atomic_mode = 2; // GOMP compatibility
#endif /* KMP_GOMP_COMPAT */
KMP_ALIGN(128)
kmp_atomic_lock_t __kmp_atomic_lock; /* Control access to all user coded atomics in Gnu compat mode */
kmp_atomic_lock_t __kmp_atomic_lock_1i; /* Control access to all user coded atomics for 1-byte fixed data types */
kmp_atomic_lock_t __kmp_atomic_lock_2i; /* Control access to all user coded atomics for 2-byte fixed data types */
kmp_atomic_lock_t __kmp_atomic_lock_4i; /* Control access to all user coded atomics for 4-byte fixed data types */
kmp_atomic_lock_t __kmp_atomic_lock_4r; /* Control access to all user coded atomics for kmp_real32 data type */
kmp_atomic_lock_t __kmp_atomic_lock_8i; /* Control access to all user coded atomics for 8-byte fixed data types */
kmp_atomic_lock_t __kmp_atomic_lock_8r; /* Control access to all user coded atomics for kmp_real64 data type */
kmp_atomic_lock_t __kmp_atomic_lock_8c; /* Control access to all user coded atomics for complex byte data type */
kmp_atomic_lock_t __kmp_atomic_lock_10r; /* Control access to all user coded atomics for long double data type */
kmp_atomic_lock_t __kmp_atomic_lock_16r; /* Control access to all user coded atomics for _Quad data type */
kmp_atomic_lock_t __kmp_atomic_lock_16c; /* Control access to all user coded atomics for double complex data type*/
kmp_atomic_lock_t __kmp_atomic_lock_20c; /* Control access to all user coded atomics for long double complex type*/
kmp_atomic_lock_t __kmp_atomic_lock_32c; /* Control access to all user coded atomics for _Quad complex data type */
/*
2007-03-02:
Without "volatile" specifier in OP_CMPXCHG and MIN_MAX_CMPXCHG we have a
bug on *_32 and *_32e. This is just a temporary workaround for the problem.
It seems the right solution is writing OP_CMPXCHG and MIN_MAX_CMPXCHG
routines in assembler language.
*/
#define KMP_ATOMIC_VOLATILE volatile
#if ( KMP_ARCH_X86 ) && KMP_HAVE_QUAD
static inline void operator +=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q += rhs.q; };
static inline void operator -=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q -= rhs.q; };
static inline void operator *=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q *= rhs.q; };
static inline void operator /=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q /= rhs.q; };
static inline bool operator < ( Quad_a4_t & lhs, Quad_a4_t & rhs ) { return lhs.q < rhs.q; }
static inline bool operator > ( Quad_a4_t & lhs, Quad_a4_t & rhs ) { return lhs.q > rhs.q; }
static inline void operator +=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q += rhs.q; };
static inline void operator -=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q -= rhs.q; };
static inline void operator *=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q *= rhs.q; };
static inline void operator /=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q /= rhs.q; };
static inline bool operator < ( Quad_a16_t & lhs, Quad_a16_t & rhs ) { return lhs.q < rhs.q; }
static inline bool operator > ( Quad_a16_t & lhs, Quad_a16_t & rhs ) { return lhs.q > rhs.q; }
static inline void operator +=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q += rhs.q; };
static inline void operator -=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q -= rhs.q; };
static inline void operator *=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q *= rhs.q; };
static inline void operator /=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q /= rhs.q; };
static inline void operator +=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q += rhs.q; };
static inline void operator -=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q -= rhs.q; };
static inline void operator *=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q *= rhs.q; };
static inline void operator /=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q /= rhs.q; };
#endif
/* ------------------------------------------------------------------------ */
/* ATOMIC implementation routines */
/* one routine for each operation and operand type */
/* ------------------------------------------------------------------------ */
// All routines declarations looks like
// void __kmpc_atomic_RTYPE_OP( ident_t*, int, TYPE *lhs, TYPE rhs );
// ------------------------------------------------------------------------
#define KMP_CHECK_GTID \
if ( gtid == KMP_GTID_UNKNOWN ) { \
gtid = __kmp_entry_gtid(); \
} // check and get gtid when needed
// Beginning of a definition (provides name, parameters, gebug trace)
// TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed)
// OP_ID - operation identifier (add, sub, mul, ...)
// TYPE - operands' type
#define ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE, RET_TYPE) \
RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \
{ \
KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid ));
// ------------------------------------------------------------------------
// Lock variables used for critical sections for various size operands
#define ATOMIC_LOCK0 __kmp_atomic_lock // all types, for Gnu compat
#define ATOMIC_LOCK1i __kmp_atomic_lock_1i // char
#define ATOMIC_LOCK2i __kmp_atomic_lock_2i // short
#define ATOMIC_LOCK4i __kmp_atomic_lock_4i // long int
#define ATOMIC_LOCK4r __kmp_atomic_lock_4r // float
#define ATOMIC_LOCK8i __kmp_atomic_lock_8i // long long int
#define ATOMIC_LOCK8r __kmp_atomic_lock_8r // double
#define ATOMIC_LOCK8c __kmp_atomic_lock_8c // float complex
#define ATOMIC_LOCK10r __kmp_atomic_lock_10r // long double
#define ATOMIC_LOCK16r __kmp_atomic_lock_16r // _Quad
#define ATOMIC_LOCK16c __kmp_atomic_lock_16c // double complex
#define ATOMIC_LOCK20c __kmp_atomic_lock_20c // long double complex
#define ATOMIC_LOCK32c __kmp_atomic_lock_32c // _Quad complex
// ------------------------------------------------------------------------
// Operation on *lhs, rhs bound by critical section
// OP - operator (it's supposed to contain an assignment)
// LCK_ID - lock identifier
// Note: don't check gtid as it should always be valid
// 1, 2-byte - expect valid parameter, other - check before this macro
#define OP_CRITICAL(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
(*lhs) OP (rhs); \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid );
// ------------------------------------------------------------------------
// For GNU compatibility, we may need to use a critical section,
// even though it is not required by the ISA.
//
// On IA-32 architecture, all atomic operations except for fixed 4 byte add,
// sub, and bitwise logical ops, and 1 & 2 byte logical ops use a common
// critical section. On Intel(R) 64, all atomic operations are done with fetch
// and add or compare and exchange. Therefore, the FLAG parameter to this
// macro is either KMP_ARCH_X86 or 0 (or 1, for Intel-specific extension which
// require a critical section, where we predict that they will be implemented
// in the Gnu codegen by calling GOMP_atomic_start() / GOMP_atomic_end()).
//
// When the OP_GOMP_CRITICAL macro is used in a *CRITICAL* macro construct,
// the FLAG parameter should always be 1. If we know that we will be using
// a critical section, then we want to make certain that we use the generic
// lock __kmp_atomic_lock to protect the atomic update, and not of of the
// locks that are specialized based upon the size or type of the data.
//
// If FLAG is 0, then we are relying on dead code elimination by the build
// compiler to get rid of the useless block of code, and save a needless
// branch at runtime.
//
#ifdef KMP_GOMP_COMPAT
# define OP_GOMP_CRITICAL(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL( OP, 0 ); \
return; \
}
# else
# define OP_GOMP_CRITICAL(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
#if KMP_MIC
# define KMP_DO_PAUSE _mm_delay_32( 1 )
#else
# define KMP_DO_PAUSE KMP_CPU_PAUSE()
#endif /* KMP_MIC */
// ------------------------------------------------------------------------
// Operation on *lhs, rhs using "compare_and_store" routine
// TYPE - operands' type
// BITS - size in bits, used to distinguish low level calls
// OP - operator
#define OP_CMPXCHG(TYPE,BITS,OP) \
{ \
TYPE old_value, new_value; \
old_value = *(TYPE volatile *)lhs; \
new_value = old_value OP rhs; \
while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value, \
*VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \
{ \
KMP_DO_PAUSE; \
\
old_value = *(TYPE volatile *)lhs; \
new_value = old_value OP rhs; \
} \
}
#if USE_CMPXCHG_FIX
// 2007-06-25:
// workaround for C78287 (complex(kind=4) data type)
// lin_32, lin_32e, win_32 and win_32e are affected (I verified the asm)
// Compiler ignores the volatile qualifier of the temp_val in the OP_CMPXCHG macro.
// This is a problem of the compiler.
// Related tracker is C76005, targeted to 11.0.
// I verified the asm of the workaround.
#define OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \
{ \
struct _sss { \
TYPE cmp; \
kmp_int##BITS *vvv; \
}; \
struct _sss old_value, new_value; \
old_value.vvv = ( kmp_int##BITS * )&old_value.cmp; \
new_value.vvv = ( kmp_int##BITS * )&new_value.cmp; \
*old_value.vvv = * ( volatile kmp_int##BITS * ) lhs; \
new_value.cmp = old_value.cmp OP rhs; \
while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) old_value.vvv, \
*VOLATILE_CAST(kmp_int##BITS *) new_value.vvv ) ) \
{ \
KMP_DO_PAUSE; \
\
*old_value.vvv = * ( volatile kmp_int##BITS * ) lhs; \
new_value.cmp = old_value.cmp OP rhs; \
} \
}
// end of the first part of the workaround for C78287
#endif // USE_CMPXCHG_FIX
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// ------------------------------------------------------------------------
// X86 or X86_64: no alignment problems ====================================
#define ATOMIC_FIXED_ADD(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
/* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \
KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \
}
// -------------------------------------------------------------------------
#define ATOMIC_CMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
OP_CMPXCHG(TYPE,BITS,OP) \
}
#if USE_CMPXCHG_FIX
// -------------------------------------------------------------------------
// workaround for C78287 (complex(kind=4) data type)
#define ATOMIC_CMPXCHG_WORKAROUND(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \
}
// end of the second part of the workaround for C78287
#endif
#else
// -------------------------------------------------------------------------
// Code for other architectures that don't handle unaligned accesses.
#define ATOMIC_FIXED_ADD(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \
/* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \
KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \
} else { \
KMP_CHECK_GTID; \
OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \
} \
}
// -------------------------------------------------------------------------
#define ATOMIC_CMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \
OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \
} else { \
KMP_CHECK_GTID; \
OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \
} \
}
#if USE_CMPXCHG_FIX
// -------------------------------------------------------------------------
// workaround for C78287 (complex(kind=4) data type)
#define ATOMIC_CMPXCHG_WORKAROUND(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \
OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \
} else { \
KMP_CHECK_GTID; \
OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \
} \
}
// end of the second part of the workaround for C78287
#endif // USE_CMPXCHG_FIX
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
// Routines for ATOMIC 4-byte operands addition and subtraction
ATOMIC_FIXED_ADD( fixed4, add, kmp_int32, 32, +, 4i, 3, 0 ) // __kmpc_atomic_fixed4_add
ATOMIC_FIXED_ADD( fixed4, sub, kmp_int32, 32, -, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub
ATOMIC_CMPXCHG( float4, add, kmp_real32, 32, +, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add
ATOMIC_CMPXCHG( float4, sub, kmp_real32, 32, -, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub
// Routines for ATOMIC 8-byte operands addition and subtraction
ATOMIC_FIXED_ADD( fixed8, add, kmp_int64, 64, +, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add
ATOMIC_FIXED_ADD( fixed8, sub, kmp_int64, 64, -, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub
ATOMIC_CMPXCHG( float8, add, kmp_real64, 64, +, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add
ATOMIC_CMPXCHG( float8, sub, kmp_real64, 64, -, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub
// ------------------------------------------------------------------------
// Entries definition for integer operands
// TYPE_ID - operands type and size (fixed4, float4)
// OP_ID - operation identifier (add, sub, mul, ...)
// TYPE - operand type
// BITS - size in bits, used to distinguish low level calls
// OP - operator (used in critical section)
// LCK_ID - lock identifier, used to possibly distinguish lock variable
// MASK - used for alignment check
// TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID,MASK,GOMP_FLAG
// ------------------------------------------------------------------------
// Routines for ATOMIC integer operands, other operators
// ------------------------------------------------------------------------
// TYPE_ID,OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG
ATOMIC_CMPXCHG( fixed1, add, kmp_int8, 8, +, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add
ATOMIC_CMPXCHG( fixed1, andb, kmp_int8, 8, &, 1i, 0, 0 ) // __kmpc_atomic_fixed1_andb
ATOMIC_CMPXCHG( fixed1, div, kmp_int8, 8, /, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div
ATOMIC_CMPXCHG( fixed1u, div, kmp_uint8, 8, /, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div
ATOMIC_CMPXCHG( fixed1, mul, kmp_int8, 8, *, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul
ATOMIC_CMPXCHG( fixed1, orb, kmp_int8, 8, |, 1i, 0, 0 ) // __kmpc_atomic_fixed1_orb
ATOMIC_CMPXCHG( fixed1, shl, kmp_int8, 8, <<, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl
ATOMIC_CMPXCHG( fixed1, shr, kmp_int8, 8, >>, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr
ATOMIC_CMPXCHG( fixed1u, shr, kmp_uint8, 8, >>, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr
ATOMIC_CMPXCHG( fixed1, sub, kmp_int8, 8, -, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub
ATOMIC_CMPXCHG( fixed1, xor, kmp_int8, 8, ^, 1i, 0, 0 ) // __kmpc_atomic_fixed1_xor
ATOMIC_CMPXCHG( fixed2, add, kmp_int16, 16, +, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add
ATOMIC_CMPXCHG( fixed2, andb, kmp_int16, 16, &, 2i, 1, 0 ) // __kmpc_atomic_fixed2_andb
ATOMIC_CMPXCHG( fixed2, div, kmp_int16, 16, /, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div
ATOMIC_CMPXCHG( fixed2u, div, kmp_uint16, 16, /, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div
ATOMIC_CMPXCHG( fixed2, mul, kmp_int16, 16, *, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul
ATOMIC_CMPXCHG( fixed2, orb, kmp_int16, 16, |, 2i, 1, 0 ) // __kmpc_atomic_fixed2_orb
ATOMIC_CMPXCHG( fixed2, shl, kmp_int16, 16, <<, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl
ATOMIC_CMPXCHG( fixed2, shr, kmp_int16, 16, >>, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr
ATOMIC_CMPXCHG( fixed2u, shr, kmp_uint16, 16, >>, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr
ATOMIC_CMPXCHG( fixed2, sub, kmp_int16, 16, -, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub
ATOMIC_CMPXCHG( fixed2, xor, kmp_int16, 16, ^, 2i, 1, 0 ) // __kmpc_atomic_fixed2_xor
ATOMIC_CMPXCHG( fixed4, andb, kmp_int32, 32, &, 4i, 3, 0 ) // __kmpc_atomic_fixed4_andb
ATOMIC_CMPXCHG( fixed4, div, kmp_int32, 32, /, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div
ATOMIC_CMPXCHG( fixed4u, div, kmp_uint32, 32, /, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div
ATOMIC_CMPXCHG( fixed4, mul, kmp_int32, 32, *, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_mul
ATOMIC_CMPXCHG( fixed4, orb, kmp_int32, 32, |, 4i, 3, 0 ) // __kmpc_atomic_fixed4_orb
ATOMIC_CMPXCHG( fixed4, shl, kmp_int32, 32, <<, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl
ATOMIC_CMPXCHG( fixed4, shr, kmp_int32, 32, >>, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr
ATOMIC_CMPXCHG( fixed4u, shr, kmp_uint32, 32, >>, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr
ATOMIC_CMPXCHG( fixed4, xor, kmp_int32, 32, ^, 4i, 3, 0 ) // __kmpc_atomic_fixed4_xor
ATOMIC_CMPXCHG( fixed8, andb, kmp_int64, 64, &, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andb
ATOMIC_CMPXCHG( fixed8, div, kmp_int64, 64, /, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div
ATOMIC_CMPXCHG( fixed8u, div, kmp_uint64, 64, /, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div
ATOMIC_CMPXCHG( fixed8, mul, kmp_int64, 64, *, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul
ATOMIC_CMPXCHG( fixed8, orb, kmp_int64, 64, |, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orb
ATOMIC_CMPXCHG( fixed8, shl, kmp_int64, 64, <<, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl
ATOMIC_CMPXCHG( fixed8, shr, kmp_int64, 64, >>, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr
ATOMIC_CMPXCHG( fixed8u, shr, kmp_uint64, 64, >>, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr
ATOMIC_CMPXCHG( fixed8, xor, kmp_int64, 64, ^, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_xor
ATOMIC_CMPXCHG( float4, div, kmp_real32, 32, /, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div
ATOMIC_CMPXCHG( float4, mul, kmp_real32, 32, *, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul
ATOMIC_CMPXCHG( float8, div, kmp_real64, 64, /, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div
ATOMIC_CMPXCHG( float8, mul, kmp_real64, 64, *, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul
// TYPE_ID,OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG
/* ------------------------------------------------------------------------ */
/* Routines for C/C++ Reduction operators && and || */
/* ------------------------------------------------------------------------ */
// ------------------------------------------------------------------------
// Need separate macros for &&, || because there is no combined assignment
// TODO: eliminate ATOMIC_CRIT_{L,EQV} macros as not used
#define ATOMIC_CRIT_L(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL( = *lhs OP, GOMP_FLAG ) \
OP_CRITICAL( = *lhs OP, LCK_ID ) \
}
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// ------------------------------------------------------------------------
// X86 or X86_64: no alignment problems ===================================
#define ATOMIC_CMPX_L(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL( = *lhs OP, GOMP_FLAG ) \
OP_CMPXCHG(TYPE,BITS,OP) \
}
#else
// ------------------------------------------------------------------------
// Code for other architectures that don't handle unaligned accesses.
#define ATOMIC_CMPX_L(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(= *lhs OP,GOMP_FLAG) \
if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \
OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \
} else { \
KMP_CHECK_GTID; \
OP_CRITICAL(= *lhs OP,LCK_ID) /* unaligned - use critical */ \
} \
}
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
ATOMIC_CMPX_L( fixed1, andl, char, 8, &&, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_andl
ATOMIC_CMPX_L( fixed1, orl, char, 8, ||, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_orl
ATOMIC_CMPX_L( fixed2, andl, short, 16, &&, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_andl
ATOMIC_CMPX_L( fixed2, orl, short, 16, ||, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_orl
ATOMIC_CMPX_L( fixed4, andl, kmp_int32, 32, &&, 4i, 3, 0 ) // __kmpc_atomic_fixed4_andl
ATOMIC_CMPX_L( fixed4, orl, kmp_int32, 32, ||, 4i, 3, 0 ) // __kmpc_atomic_fixed4_orl
ATOMIC_CMPX_L( fixed8, andl, kmp_int64, 64, &&, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andl
ATOMIC_CMPX_L( fixed8, orl, kmp_int64, 64, ||, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orl
/* ------------------------------------------------------------------------- */
/* Routines for Fortran operators that matched no one in C: */
/* MAX, MIN, .EQV., .NEQV. */
/* Operators .AND., .OR. are covered by __kmpc_atomic_*_{andl,orl} */
/* Intrinsics IAND, IOR, IEOR are covered by __kmpc_atomic_*_{andb,orb,xor} */
/* ------------------------------------------------------------------------- */
// -------------------------------------------------------------------------
// MIN and MAX need separate macros
// OP - operator to check if we need any actions?
#define MIN_MAX_CRITSECT(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
if ( *lhs OP rhs ) { /* still need actions? */ \
*lhs = rhs; \
} \
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid );
// -------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define GOMP_MIN_MAX_CRITSECT(OP,FLAG) \
if (( FLAG ) && ( __kmp_atomic_mode == 2 )) { \
KMP_CHECK_GTID; \
MIN_MAX_CRITSECT( OP, 0 ); \
return; \
}
#else
#define GOMP_MIN_MAX_CRITSECT(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// -------------------------------------------------------------------------
#define MIN_MAX_CMPXCHG(TYPE,BITS,OP) \
{ \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
TYPE old_value; \
temp_val = *lhs; \
old_value = temp_val; \
while ( old_value OP rhs && /* still need actions? */ \
! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value, \
*VOLATILE_CAST(kmp_int##BITS *) &rhs ) ) \
{ \
KMP_CPU_PAUSE(); \
temp_val = *lhs; \
old_value = temp_val; \
} \
}
// -------------------------------------------------------------------------
// 1-byte, 2-byte operands - use critical section
#define MIN_MAX_CRITICAL(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
if ( *lhs OP rhs ) { /* need actions? */ \
GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \
MIN_MAX_CRITSECT(OP,LCK_ID) \
} \
}
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// -------------------------------------------------------------------------
// X86 or X86_64: no alignment problems ====================================
#define MIN_MAX_COMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
if ( *lhs OP rhs ) { \
GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \
MIN_MAX_CMPXCHG(TYPE,BITS,OP) \
} \
}
#else
// -------------------------------------------------------------------------
// Code for other architectures that don't handle unaligned accesses.
#define MIN_MAX_COMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
if ( *lhs OP rhs ) { \
GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \
if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \
MIN_MAX_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \
} else { \
KMP_CHECK_GTID; \
MIN_MAX_CRITSECT(OP,LCK_ID) /* unaligned address */ \
} \
} \
}
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
MIN_MAX_COMPXCHG( fixed1, max, char, 8, <, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_max
MIN_MAX_COMPXCHG( fixed1, min, char, 8, >, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_min
MIN_MAX_COMPXCHG( fixed2, max, short, 16, <, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_max
MIN_MAX_COMPXCHG( fixed2, min, short, 16, >, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_min
MIN_MAX_COMPXCHG( fixed4, max, kmp_int32, 32, <, 4i, 3, 0 ) // __kmpc_atomic_fixed4_max
MIN_MAX_COMPXCHG( fixed4, min, kmp_int32, 32, >, 4i, 3, 0 ) // __kmpc_atomic_fixed4_min
MIN_MAX_COMPXCHG( fixed8, max, kmp_int64, 64, <, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_max
MIN_MAX_COMPXCHG( fixed8, min, kmp_int64, 64, >, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_min
MIN_MAX_COMPXCHG( float4, max, kmp_real32, 32, <, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_max
MIN_MAX_COMPXCHG( float4, min, kmp_real32, 32, >, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_min
MIN_MAX_COMPXCHG( float8, max, kmp_real64, 64, <, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_max
MIN_MAX_COMPXCHG( float8, min, kmp_real64, 64, >, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_min
#if KMP_HAVE_QUAD
MIN_MAX_CRITICAL( float16, max, QUAD_LEGACY, <, 16r, 1 ) // __kmpc_atomic_float16_max
MIN_MAX_CRITICAL( float16, min, QUAD_LEGACY, >, 16r, 1 ) // __kmpc_atomic_float16_min
#if ( KMP_ARCH_X86 )
MIN_MAX_CRITICAL( float16, max_a16, Quad_a16_t, <, 16r, 1 ) // __kmpc_atomic_float16_max_a16
MIN_MAX_CRITICAL( float16, min_a16, Quad_a16_t, >, 16r, 1 ) // __kmpc_atomic_float16_min_a16
#endif
#endif
// ------------------------------------------------------------------------
// Need separate macros for .EQV. because of the need of complement (~)
// OP ignored for critical sections, ^=~ used instead
#define ATOMIC_CRIT_EQV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(^=~,GOMP_FLAG) /* send assignment */ \
OP_CRITICAL(^=~,LCK_ID) /* send assignment and complement */ \
}
// ------------------------------------------------------------------------
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// ------------------------------------------------------------------------
// X86 or X86_64: no alignment problems ===================================
#define ATOMIC_CMPX_EQV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(^=~,GOMP_FLAG) /* send assignment */ \
OP_CMPXCHG(TYPE,BITS,OP) \
}
// ------------------------------------------------------------------------
#else
// ------------------------------------------------------------------------
// Code for other architectures that don't handle unaligned accesses.
#define ATOMIC_CMPX_EQV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(^=~,GOMP_FLAG) \
if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \
OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \
} else { \
KMP_CHECK_GTID; \
OP_CRITICAL(^=~,LCK_ID) /* unaligned address - use critical */ \
} \
}
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
ATOMIC_CMPXCHG( fixed1, neqv, kmp_int8, 8, ^, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_neqv
ATOMIC_CMPXCHG( fixed2, neqv, kmp_int16, 16, ^, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_neqv
ATOMIC_CMPXCHG( fixed4, neqv, kmp_int32, 32, ^, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_neqv
ATOMIC_CMPXCHG( fixed8, neqv, kmp_int64, 64, ^, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_neqv
ATOMIC_CMPX_EQV( fixed1, eqv, kmp_int8, 8, ^~, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_eqv
ATOMIC_CMPX_EQV( fixed2, eqv, kmp_int16, 16, ^~, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_eqv
ATOMIC_CMPX_EQV( fixed4, eqv, kmp_int32, 32, ^~, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_eqv
ATOMIC_CMPX_EQV( fixed8, eqv, kmp_int64, 64, ^~, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_eqv
// ------------------------------------------------------------------------
// Routines for Extended types: long double, _Quad, complex flavours (use critical section)
// TYPE_ID, OP_ID, TYPE - detailed above
// OP - operator
// LCK_ID - lock identifier, used to possibly distinguish lock variable
#define ATOMIC_CRITICAL(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) /* send assignment */ \
OP_CRITICAL(OP##=,LCK_ID) /* send assignment */ \
}
/* ------------------------------------------------------------------------- */
// routines for long double type
ATOMIC_CRITICAL( float10, add, long double, +, 10r, 1 ) // __kmpc_atomic_float10_add
ATOMIC_CRITICAL( float10, sub, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub
ATOMIC_CRITICAL( float10, mul, long double, *, 10r, 1 ) // __kmpc_atomic_float10_mul
ATOMIC_CRITICAL( float10, div, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div
#if KMP_HAVE_QUAD
// routines for _Quad type
ATOMIC_CRITICAL( float16, add, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_add
ATOMIC_CRITICAL( float16, sub, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub
ATOMIC_CRITICAL( float16, mul, QUAD_LEGACY, *, 16r, 1 ) // __kmpc_atomic_float16_mul
ATOMIC_CRITICAL( float16, div, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL( float16, add_a16, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_add_a16
ATOMIC_CRITICAL( float16, sub_a16, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16
ATOMIC_CRITICAL( float16, mul_a16, Quad_a16_t, *, 16r, 1 ) // __kmpc_atomic_float16_mul_a16
ATOMIC_CRITICAL( float16, div_a16, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16
#endif
#endif
// routines for complex types
#if USE_CMPXCHG_FIX
// workaround for C78287 (complex(kind=4) data type)
ATOMIC_CMPXCHG_WORKAROUND( cmplx4, add, kmp_cmplx32, 64, +, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_add
ATOMIC_CMPXCHG_WORKAROUND( cmplx4, sub, kmp_cmplx32, 64, -, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_sub
ATOMIC_CMPXCHG_WORKAROUND( cmplx4, mul, kmp_cmplx32, 64, *, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_mul
ATOMIC_CMPXCHG_WORKAROUND( cmplx4, div, kmp_cmplx32, 64, /, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_div
// end of the workaround for C78287
#else
ATOMIC_CRITICAL( cmplx4, add, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_add
ATOMIC_CRITICAL( cmplx4, sub, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub
ATOMIC_CRITICAL( cmplx4, mul, kmp_cmplx32, *, 8c, 1 ) // __kmpc_atomic_cmplx4_mul
ATOMIC_CRITICAL( cmplx4, div, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div
#endif // USE_CMPXCHG_FIX
ATOMIC_CRITICAL( cmplx8, add, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_add
ATOMIC_CRITICAL( cmplx8, sub, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub
ATOMIC_CRITICAL( cmplx8, mul, kmp_cmplx64, *, 16c, 1 ) // __kmpc_atomic_cmplx8_mul
ATOMIC_CRITICAL( cmplx8, div, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div
ATOMIC_CRITICAL( cmplx10, add, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_add
ATOMIC_CRITICAL( cmplx10, sub, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub
ATOMIC_CRITICAL( cmplx10, mul, kmp_cmplx80, *, 20c, 1 ) // __kmpc_atomic_cmplx10_mul
ATOMIC_CRITICAL( cmplx10, div, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL( cmplx16, add, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add
ATOMIC_CRITICAL( cmplx16, sub, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub
ATOMIC_CRITICAL( cmplx16, mul, CPLX128_LEG, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul
ATOMIC_CRITICAL( cmplx16, div, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL( cmplx16, add_a16, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_a16
ATOMIC_CRITICAL( cmplx16, sub_a16, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16
ATOMIC_CRITICAL( cmplx16, mul_a16, kmp_cmplx128_a16_t, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_a16
ATOMIC_CRITICAL( cmplx16, div_a16, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16
#endif
#endif
#if OMP_40_ENABLED
// OpenMP 4.0: x = expr binop x for non-commutative operations.
// Supported only on IA-32 architecture and Intel(R) 64
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// ------------------------------------------------------------------------
// Operation on *lhs, rhs bound by critical section
// OP - operator (it's supposed to contain an assignment)
// LCK_ID - lock identifier
// Note: don't check gtid as it should always be valid
// 1, 2-byte - expect valid parameter, other - check before this macro
#define OP_CRITICAL_REV(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
(*lhs) = (rhs) OP (*lhs); \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid );
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_REV(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_REV( OP, 0 ); \
return; \
}
#else
#define OP_GOMP_CRITICAL_REV(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// Beginning of a definition (provides name, parameters, gebug trace)
// TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed)
// OP_ID - operation identifier (add, sub, mul, ...)
// TYPE - operands' type
#define ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE, RET_TYPE) \
RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID##_rev( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \
{ \
KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_rev: T#%d\n", gtid ));
// ------------------------------------------------------------------------
// Operation on *lhs, rhs using "compare_and_store" routine
// TYPE - operands' type
// BITS - size in bits, used to distinguish low level calls
// OP - operator
// Note: temp_val introduced in order to force the compiler to read
// *lhs only once (w/o it the compiler reads *lhs twice)
#define OP_CMPXCHG_REV(TYPE,BITS,OP) \
{ \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
TYPE old_value, new_value; \
temp_val = *lhs; \
old_value = temp_val; \
new_value = rhs OP old_value; \
while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value, \
*VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \
{ \
KMP_DO_PAUSE; \
\
temp_val = *lhs; \
old_value = temp_val; \
new_value = rhs OP old_value; \
} \
}
// -------------------------------------------------------------------------
#define ATOMIC_CMPXCHG_REV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \
OP_CMPXCHG_REV(TYPE,BITS,OP) \
}
// ------------------------------------------------------------------------
// Entries definition for integer operands
// TYPE_ID - operands type and size (fixed4, float4)
// OP_ID - operation identifier (add, sub, mul, ...)
// TYPE - operand type
// BITS - size in bits, used to distinguish low level calls
// OP - operator (used in critical section)
// LCK_ID - lock identifier, used to possibly distinguish lock variable
// TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID,GOMP_FLAG
// ------------------------------------------------------------------------
// Routines for ATOMIC integer operands, other operators
// ------------------------------------------------------------------------
// TYPE_ID,OP_ID, TYPE, BITS, OP, LCK_ID, GOMP_FLAG
ATOMIC_CMPXCHG_REV( fixed1, div, kmp_int8, 8, /, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_rev
ATOMIC_CMPXCHG_REV( fixed1u, div, kmp_uint8, 8, /, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_rev
ATOMIC_CMPXCHG_REV( fixed1, shl, kmp_int8, 8, <<, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_rev
ATOMIC_CMPXCHG_REV( fixed1, shr, kmp_int8, 8, >>, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_rev
ATOMIC_CMPXCHG_REV( fixed1u, shr, kmp_uint8, 8, >>, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_rev
ATOMIC_CMPXCHG_REV( fixed1, sub, kmp_int8, 8, -, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_rev
ATOMIC_CMPXCHG_REV( fixed2, div, kmp_int16, 16, /, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_rev
ATOMIC_CMPXCHG_REV( fixed2u, div, kmp_uint16, 16, /, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_rev
ATOMIC_CMPXCHG_REV( fixed2, shl, kmp_int16, 16, <<, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_rev
ATOMIC_CMPXCHG_REV( fixed2, shr, kmp_int16, 16, >>, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_rev
ATOMIC_CMPXCHG_REV( fixed2u, shr, kmp_uint16, 16, >>, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_rev
ATOMIC_CMPXCHG_REV( fixed2, sub, kmp_int16, 16, -, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_rev
ATOMIC_CMPXCHG_REV( fixed4, div, kmp_int32, 32, /, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_rev
ATOMIC_CMPXCHG_REV( fixed4u, div, kmp_uint32, 32, /, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_rev
ATOMIC_CMPXCHG_REV( fixed4, shl, kmp_int32, 32, <<, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_rev
ATOMIC_CMPXCHG_REV( fixed4, shr, kmp_int32, 32, >>, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_rev
ATOMIC_CMPXCHG_REV( fixed4u, shr, kmp_uint32, 32, >>, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_rev
ATOMIC_CMPXCHG_REV( fixed4, sub, kmp_int32, 32, -, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_sub_rev
ATOMIC_CMPXCHG_REV( fixed8, div, kmp_int64, 64, /, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_rev
ATOMIC_CMPXCHG_REV( fixed8u, div, kmp_uint64, 64, /, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_rev
ATOMIC_CMPXCHG_REV( fixed8, shl, kmp_int64, 64, <<, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_rev
ATOMIC_CMPXCHG_REV( fixed8, shr, kmp_int64, 64, >>, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_rev
ATOMIC_CMPXCHG_REV( fixed8u, shr, kmp_uint64, 64, >>, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_rev
ATOMIC_CMPXCHG_REV( fixed8, sub, kmp_int64, 64, -, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_rev
ATOMIC_CMPXCHG_REV( float4, div, kmp_real32, 32, /, 4r, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_rev
ATOMIC_CMPXCHG_REV( float4, sub, kmp_real32, 32, -, 4r, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_rev
ATOMIC_CMPXCHG_REV( float8, div, kmp_real64, 64, /, 8r, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_rev
ATOMIC_CMPXCHG_REV( float8, sub, kmp_real64, 64, -, 8r, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_rev
// TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID, GOMP_FLAG
// ------------------------------------------------------------------------
// Routines for Extended types: long double, _Quad, complex flavours (use critical section)
// TYPE_ID, OP_ID, TYPE - detailed above
// OP - operator
// LCK_ID - lock identifier, used to possibly distinguish lock variable
#define ATOMIC_CRITICAL_REV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \
OP_CRITICAL_REV(OP,LCK_ID) \
}
/* ------------------------------------------------------------------------- */
// routines for long double type
ATOMIC_CRITICAL_REV( float10, sub, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_rev
ATOMIC_CRITICAL_REV( float10, div, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_rev
#if KMP_HAVE_QUAD
// routines for _Quad type
ATOMIC_CRITICAL_REV( float16, sub, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_rev
ATOMIC_CRITICAL_REV( float16, div, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_rev
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_REV( float16, sub_a16, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_rev
ATOMIC_CRITICAL_REV( float16, div_a16, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_rev
#endif
#endif
// routines for complex types
ATOMIC_CRITICAL_REV( cmplx4, sub, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_rev
ATOMIC_CRITICAL_REV( cmplx4, div, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_rev
ATOMIC_CRITICAL_REV( cmplx8, sub, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_rev
ATOMIC_CRITICAL_REV( cmplx8, div, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_rev
ATOMIC_CRITICAL_REV( cmplx10, sub, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_rev
ATOMIC_CRITICAL_REV( cmplx10, div, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_rev
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_REV( cmplx16, sub, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_rev
ATOMIC_CRITICAL_REV( cmplx16, div, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_rev
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_REV( cmplx16, sub_a16, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_rev
ATOMIC_CRITICAL_REV( cmplx16, div_a16, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_rev
#endif
#endif
#endif //KMP_ARCH_X86 || KMP_ARCH_X86_64
// End of OpenMP 4.0: x = expr binop x for non-commutative operations.
#endif //OMP_40_ENABLED
/* ------------------------------------------------------------------------ */
/* Routines for mixed types of LHS and RHS, when RHS is "larger" */
/* Note: in order to reduce the total number of types combinations */
/* it is supposed that compiler converts RHS to longest floating type,*/
/* that is _Quad, before call to any of these routines */
/* Conversion to _Quad will be done by the compiler during calculation, */
/* conversion back to TYPE - before the assignment, like: */
/* *lhs = (TYPE)( (_Quad)(*lhs) OP rhs ) */
/* Performance penalty expected because of SW emulation use */
/* ------------------------------------------------------------------------ */
#define ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \
void __kmpc_atomic_##TYPE_ID##_##OP_ID##_##RTYPE_ID( ident_t *id_ref, int gtid, TYPE * lhs, RTYPE rhs ) \
{ \
KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_" #RTYPE_ID ": T#%d\n", gtid ));
// -------------------------------------------------------------------------
#define ATOMIC_CRITICAL_FP(TYPE_ID,TYPE,OP_ID,OP,RTYPE_ID,RTYPE,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) /* send assignment */ \
OP_CRITICAL(OP##=,LCK_ID) /* send assignment */ \
}
// -------------------------------------------------------------------------
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// -------------------------------------------------------------------------
// X86 or X86_64: no alignment problems ====================================
#define ATOMIC_CMPXCHG_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
OP_CMPXCHG(TYPE,BITS,OP) \
}
// -------------------------------------------------------------------------
#else
// ------------------------------------------------------------------------
// Code for other architectures that don't handle unaligned accesses.
#define ATOMIC_CMPXCHG_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \
OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \
} else { \
KMP_CHECK_GTID; \
OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \
} \
}
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
// RHS=float8
ATOMIC_CMPXCHG_MIX( fixed1, char, mul, 8, *, float8, kmp_real64, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_float8
ATOMIC_CMPXCHG_MIX( fixed1, char, div, 8, /, float8, kmp_real64, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_float8
ATOMIC_CMPXCHG_MIX( fixed2, short, mul, 16, *, float8, kmp_real64, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_float8
ATOMIC_CMPXCHG_MIX( fixed2, short, div, 16, /, float8, kmp_real64, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_float8
ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, mul, 32, *, float8, kmp_real64, 4i, 3, 0 ) // __kmpc_atomic_fixed4_mul_float8
ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, div, 32, /, float8, kmp_real64, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_float8
ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, mul, 64, *, float8, kmp_real64, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_float8
ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, div, 64, /, float8, kmp_real64, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_float8
ATOMIC_CMPXCHG_MIX( float4, kmp_real32, add, 32, +, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_float8
ATOMIC_CMPXCHG_MIX( float4, kmp_real32, sub, 32, -, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_float8
ATOMIC_CMPXCHG_MIX( float4, kmp_real32, mul, 32, *, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_float8
ATOMIC_CMPXCHG_MIX( float4, kmp_real32, div, 32, /, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_float8
// RHS=float16 (deprecated, to be removed when we are sure the compiler does not use them)
#if KMP_HAVE_QUAD
ATOMIC_CMPXCHG_MIX( fixed1, char, add, 8, +, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add_fp
ATOMIC_CMPXCHG_MIX( fixed1, char, sub, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_fp
ATOMIC_CMPXCHG_MIX( fixed1, char, mul, 8, *, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_fp
ATOMIC_CMPXCHG_MIX( fixed1, char, div, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_fp
ATOMIC_CMPXCHG_MIX( fixed1u, uchar, div, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_fp
ATOMIC_CMPXCHG_MIX( fixed2, short, add, 16, +, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add_fp
ATOMIC_CMPXCHG_MIX( fixed2, short, sub, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_fp
ATOMIC_CMPXCHG_MIX( fixed2, short, mul, 16, *, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_fp
ATOMIC_CMPXCHG_MIX( fixed2, short, div, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_fp
ATOMIC_CMPXCHG_MIX( fixed2u, ushort, div, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_fp
ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, add, 32, +, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_add_fp
ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, sub, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub_fp
ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, mul, 32, *, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_mul_fp
ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, div, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_fp
ATOMIC_CMPXCHG_MIX( fixed4u, kmp_uint32, div, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_div_fp
ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, add, 64, +, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add_fp
ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, sub, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_fp
ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, mul, 64, *, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_fp
ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, div, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_fp
ATOMIC_CMPXCHG_MIX( fixed8u, kmp_uint64, div, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_fp
ATOMIC_CMPXCHG_MIX( float4, kmp_real32, add, 32, +, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_fp
ATOMIC_CMPXCHG_MIX( float4, kmp_real32, sub, 32, -, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_fp
ATOMIC_CMPXCHG_MIX( float4, kmp_real32, mul, 32, *, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_fp
ATOMIC_CMPXCHG_MIX( float4, kmp_real32, div, 32, /, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_fp
ATOMIC_CMPXCHG_MIX( float8, kmp_real64, add, 64, +, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add_fp
ATOMIC_CMPXCHG_MIX( float8, kmp_real64, sub, 64, -, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_fp
ATOMIC_CMPXCHG_MIX( float8, kmp_real64, mul, 64, *, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul_fp
ATOMIC_CMPXCHG_MIX( float8, kmp_real64, div, 64, /, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_fp
ATOMIC_CRITICAL_FP( float10, long double, add, +, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_add_fp
ATOMIC_CRITICAL_FP( float10, long double, sub, -, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_sub_fp
ATOMIC_CRITICAL_FP( float10, long double, mul, *, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_mul_fp
ATOMIC_CRITICAL_FP( float10, long double, div, /, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_div_fp
#endif
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// ------------------------------------------------------------------------
// X86 or X86_64: no alignment problems ====================================
#if USE_CMPXCHG_FIX
// workaround for C78287 (complex(kind=4) data type)
#define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \
}
// end of the second part of the workaround for C78287
#else
#define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
OP_CMPXCHG(TYPE,BITS,OP) \
}
#endif // USE_CMPXCHG_FIX
#else
// ------------------------------------------------------------------------
// Code for other architectures that don't handle unaligned accesses.
#define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \
ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \
OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \
if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \
OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \
} else { \
KMP_CHECK_GTID; \
OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \
} \
}
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, add, 64, +, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_add_cmplx8
ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, sub, 64, -, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_sub_cmplx8
ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, mul, 64, *, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_mul_cmplx8
ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, div, 64, /, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_div_cmplx8
// READ, WRITE, CAPTURE are supported only on IA-32 architecture and Intel(R) 64
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
//////////////////////////////////////////////////////////////////////////////////////////////////////
// ------------------------------------------------------------------------
// Atomic READ routines
// ------------------------------------------------------------------------
// ------------------------------------------------------------------------
// Beginning of a definition (provides name, parameters, gebug trace)
// TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed)
// OP_ID - operation identifier (add, sub, mul, ...)
// TYPE - operands' type
#define ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE, RET_TYPE) \
RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * loc ) \
{ \
KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid ));
// ------------------------------------------------------------------------
// Operation on *lhs, rhs using "compare_and_store_ret" routine
// TYPE - operands' type
// BITS - size in bits, used to distinguish low level calls
// OP - operator
// Note: temp_val introduced in order to force the compiler to read
// *lhs only once (w/o it the compiler reads *lhs twice)
// TODO: check if it is still necessary
// Return old value regardless of the result of "compare & swap# operation
#define OP_CMPXCHG_READ(TYPE,BITS,OP) \
{ \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
union f_i_union { \
TYPE f_val; \
kmp_int##BITS i_val; \
}; \
union f_i_union old_value; \
temp_val = *loc; \
old_value.f_val = temp_val; \
old_value.i_val = KMP_COMPARE_AND_STORE_RET##BITS( (kmp_int##BITS *) loc, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value.i_val, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value.i_val ); \
new_value = old_value.f_val; \
return new_value; \
}
// -------------------------------------------------------------------------
// Operation on *lhs, rhs bound by critical section
// OP - operator (it's supposed to contain an assignment)
// LCK_ID - lock identifier
// Note: don't check gtid as it should always be valid
// 1, 2-byte - expect valid parameter, other - check before this macro
#define OP_CRITICAL_READ(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
new_value = (*loc); \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid );
// -------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_READ(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_READ( OP, 0 ); \
return new_value; \
}
#else
#define OP_GOMP_CRITICAL_READ(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// -------------------------------------------------------------------------
#define ATOMIC_FIXED_READ(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) \
new_value = KMP_TEST_THEN_ADD##BITS( loc, OP 0 ); \
return new_value; \
}
// -------------------------------------------------------------------------
#define ATOMIC_CMPXCHG_READ(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) \
OP_CMPXCHG_READ(TYPE,BITS,OP) \
}
// ------------------------------------------------------------------------
// Routines for Extended types: long double, _Quad, complex flavours (use critical section)
// TYPE_ID, OP_ID, TYPE - detailed above
// OP - operator
// LCK_ID - lock identifier, used to possibly distinguish lock variable
#define ATOMIC_CRITICAL_READ(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) /* send assignment */ \
OP_CRITICAL_READ(OP,LCK_ID) /* send assignment */ \
return new_value; \
}
// ------------------------------------------------------------------------
// Fix for cmplx4 read (CQ220361) on Windows* OS. Regular routine with return value doesn't work.
// Let's return the read value through the additional parameter.
#if ( KMP_OS_WINDOWS )
#define OP_CRITICAL_READ_WRK(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
(*out) = (*loc); \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid );
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_READ_WRK(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_READ_WRK( OP, 0 ); \
}
#else
#define OP_GOMP_CRITICAL_READ_WRK(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// ------------------------------------------------------------------------
#define ATOMIC_BEGIN_READ_WRK(TYPE_ID,OP_ID,TYPE) \
void __kmpc_atomic_##TYPE_ID##_##OP_ID( TYPE * out, ident_t *id_ref, int gtid, TYPE * loc ) \
{ \
KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid ));
// ------------------------------------------------------------------------
#define ATOMIC_CRITICAL_READ_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_READ_WRK(TYPE_ID,OP_ID,TYPE) \
OP_GOMP_CRITICAL_READ_WRK(OP##=,GOMP_FLAG) /* send assignment */ \
OP_CRITICAL_READ_WRK(OP,LCK_ID) /* send assignment */ \
}
#endif // KMP_OS_WINDOWS
// ------------------------------------------------------------------------
// TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG
ATOMIC_FIXED_READ( fixed4, rd, kmp_int32, 32, +, 0 ) // __kmpc_atomic_fixed4_rd
ATOMIC_FIXED_READ( fixed8, rd, kmp_int64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_rd
ATOMIC_CMPXCHG_READ( float4, rd, kmp_real32, 32, +, KMP_ARCH_X86 ) // __kmpc_atomic_float4_rd
ATOMIC_CMPXCHG_READ( float8, rd, kmp_real64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_float8_rd
// !!! TODO: Remove lock operations for "char" since it can't be non-atomic
ATOMIC_CMPXCHG_READ( fixed1, rd, kmp_int8, 8, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_rd
ATOMIC_CMPXCHG_READ( fixed2, rd, kmp_int16, 16, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_rd
ATOMIC_CRITICAL_READ( float10, rd, long double, +, 10r, 1 ) // __kmpc_atomic_float10_rd
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_READ( float16, rd, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_rd
#endif // KMP_HAVE_QUAD
// Fix for CQ220361 on Windows* OS
#if ( KMP_OS_WINDOWS )
ATOMIC_CRITICAL_READ_WRK( cmplx4, rd, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_rd
#else
ATOMIC_CRITICAL_READ( cmplx4, rd, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_rd
#endif
ATOMIC_CRITICAL_READ( cmplx8, rd, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_rd
ATOMIC_CRITICAL_READ( cmplx10, rd, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_rd
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_READ( cmplx16, rd, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_rd
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_READ( float16, a16_rd, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_a16_rd
ATOMIC_CRITICAL_READ( cmplx16, a16_rd, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_rd
#endif
#endif
// ------------------------------------------------------------------------
// Atomic WRITE routines
// ------------------------------------------------------------------------
#define ATOMIC_XCHG_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP,GOMP_FLAG) \
KMP_XCHG_FIXED##BITS( lhs, rhs ); \
}
// ------------------------------------------------------------------------
#define ATOMIC_XCHG_FLOAT_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP,GOMP_FLAG) \
KMP_XCHG_REAL##BITS( lhs, rhs ); \
}
// ------------------------------------------------------------------------
// Operation on *lhs, rhs using "compare_and_store" routine
// TYPE - operands' type
// BITS - size in bits, used to distinguish low level calls
// OP - operator
// Note: temp_val introduced in order to force the compiler to read
// *lhs only once (w/o it the compiler reads *lhs twice)
#define OP_CMPXCHG_WR(TYPE,BITS,OP) \
{ \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
TYPE old_value, new_value; \
temp_val = *lhs; \
old_value = temp_val; \
new_value = rhs; \
while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value, \
*VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \
{ \
KMP_CPU_PAUSE(); \
\
temp_val = *lhs; \
old_value = temp_val; \
new_value = rhs; \
} \
}
// -------------------------------------------------------------------------
#define ATOMIC_CMPXCHG_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP,GOMP_FLAG) \
OP_CMPXCHG_WR(TYPE,BITS,OP) \
}
// ------------------------------------------------------------------------
// Routines for Extended types: long double, _Quad, complex flavours (use critical section)
// TYPE_ID, OP_ID, TYPE - detailed above
// OP - operator
// LCK_ID - lock identifier, used to possibly distinguish lock variable
#define ATOMIC_CRITICAL_WR(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \
OP_GOMP_CRITICAL(OP,GOMP_FLAG) /* send assignment */ \
OP_CRITICAL(OP,LCK_ID) /* send assignment */ \
}
// -------------------------------------------------------------------------
ATOMIC_XCHG_WR( fixed1, wr, kmp_int8, 8, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_wr
ATOMIC_XCHG_WR( fixed2, wr, kmp_int16, 16, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_wr
ATOMIC_XCHG_WR( fixed4, wr, kmp_int32, 32, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_wr
#if ( KMP_ARCH_X86 )
ATOMIC_CMPXCHG_WR( fixed8, wr, kmp_int64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_wr
#else
ATOMIC_XCHG_WR( fixed8, wr, kmp_int64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_wr
#endif
ATOMIC_XCHG_FLOAT_WR( float4, wr, kmp_real32, 32, =, KMP_ARCH_X86 ) // __kmpc_atomic_float4_wr
#if ( KMP_ARCH_X86 )
ATOMIC_CMPXCHG_WR( float8, wr, kmp_real64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_float8_wr
#else
ATOMIC_XCHG_FLOAT_WR( float8, wr, kmp_real64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_float8_wr
#endif
ATOMIC_CRITICAL_WR( float10, wr, long double, =, 10r, 1 ) // __kmpc_atomic_float10_wr
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_WR( float16, wr, QUAD_LEGACY, =, 16r, 1 ) // __kmpc_atomic_float16_wr
#endif
ATOMIC_CRITICAL_WR( cmplx4, wr, kmp_cmplx32, =, 8c, 1 ) // __kmpc_atomic_cmplx4_wr
ATOMIC_CRITICAL_WR( cmplx8, wr, kmp_cmplx64, =, 16c, 1 ) // __kmpc_atomic_cmplx8_wr
ATOMIC_CRITICAL_WR( cmplx10, wr, kmp_cmplx80, =, 20c, 1 ) // __kmpc_atomic_cmplx10_wr
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_WR( cmplx16, wr, CPLX128_LEG, =, 32c, 1 ) // __kmpc_atomic_cmplx16_wr
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_WR( float16, a16_wr, Quad_a16_t, =, 16r, 1 ) // __kmpc_atomic_float16_a16_wr
ATOMIC_CRITICAL_WR( cmplx16, a16_wr, kmp_cmplx128_a16_t, =, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_wr
#endif
#endif
// ------------------------------------------------------------------------
// Atomic CAPTURE routines
// ------------------------------------------------------------------------
// Beginning of a definition (provides name, parameters, gebug trace)
// TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed)
// OP_ID - operation identifier (add, sub, mul, ...)
// TYPE - operands' type
#define ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,RET_TYPE) \
RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, int flag ) \
{ \
KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid ));
// -------------------------------------------------------------------------
// Operation on *lhs, rhs bound by critical section
// OP - operator (it's supposed to contain an assignment)
// LCK_ID - lock identifier
// Note: don't check gtid as it should always be valid
// 1, 2-byte - expect valid parameter, other - check before this macro
#define OP_CRITICAL_CPT(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
if( flag ) { \
(*lhs) OP rhs; \
new_value = (*lhs); \
} else { \
new_value = (*lhs); \
(*lhs) OP rhs; \
} \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
return new_value;
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_CPT(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_CPT( OP##=, 0 ); \
}
#else
#define OP_GOMP_CRITICAL_CPT(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// ------------------------------------------------------------------------
// Operation on *lhs, rhs using "compare_and_store" routine
// TYPE - operands' type
// BITS - size in bits, used to distinguish low level calls
// OP - operator
// Note: temp_val introduced in order to force the compiler to read
// *lhs only once (w/o it the compiler reads *lhs twice)
#define OP_CMPXCHG_CPT(TYPE,BITS,OP) \
{ \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
TYPE old_value, new_value; \
temp_val = *lhs; \
old_value = temp_val; \
new_value = old_value OP rhs; \
while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value, \
*VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \
{ \
KMP_CPU_PAUSE(); \
\
temp_val = *lhs; \
old_value = temp_val; \
new_value = old_value OP rhs; \
} \
if( flag ) { \
return new_value; \
} else \
return old_value; \
}
// -------------------------------------------------------------------------
#define ATOMIC_CMPXCHG_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) \
OP_CMPXCHG_CPT(TYPE,BITS,OP) \
}
// -------------------------------------------------------------------------
#define ATOMIC_FIXED_ADD_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE old_value, new_value; \
OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) \
/* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \
old_value = KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \
if( flag ) { \
return old_value OP rhs; \
} else \
return old_value; \
}
// -------------------------------------------------------------------------
ATOMIC_FIXED_ADD_CPT( fixed4, add_cpt, kmp_int32, 32, +, 0 ) // __kmpc_atomic_fixed4_add_cpt
ATOMIC_FIXED_ADD_CPT( fixed4, sub_cpt, kmp_int32, 32, -, 0 ) // __kmpc_atomic_fixed4_sub_cpt
ATOMIC_FIXED_ADD_CPT( fixed8, add_cpt, kmp_int64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add_cpt
ATOMIC_FIXED_ADD_CPT( fixed8, sub_cpt, kmp_int64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt
ATOMIC_CMPXCHG_CPT( float4, add_cpt, kmp_real32, 32, +, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_cpt
ATOMIC_CMPXCHG_CPT( float4, sub_cpt, kmp_real32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt
ATOMIC_CMPXCHG_CPT( float8, add_cpt, kmp_real64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add_cpt
ATOMIC_CMPXCHG_CPT( float8, sub_cpt, kmp_real64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt
// ------------------------------------------------------------------------
// Entries definition for integer operands
// TYPE_ID - operands type and size (fixed4, float4)
// OP_ID - operation identifier (add, sub, mul, ...)
// TYPE - operand type
// BITS - size in bits, used to distinguish low level calls
// OP - operator (used in critical section)
// TYPE_ID,OP_ID, TYPE, BITS,OP,GOMP_FLAG
// ------------------------------------------------------------------------
// Routines for ATOMIC integer operands, other operators
// ------------------------------------------------------------------------
// TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG
ATOMIC_CMPXCHG_CPT( fixed1, add_cpt, kmp_int8, 8, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add_cpt
ATOMIC_CMPXCHG_CPT( fixed1, andb_cpt, kmp_int8, 8, &, 0 ) // __kmpc_atomic_fixed1_andb_cpt
ATOMIC_CMPXCHG_CPT( fixed1, div_cpt, kmp_int8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt
ATOMIC_CMPXCHG_CPT( fixed1u, div_cpt, kmp_uint8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt
ATOMIC_CMPXCHG_CPT( fixed1, mul_cpt, kmp_int8, 8, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_cpt
ATOMIC_CMPXCHG_CPT( fixed1, orb_cpt, kmp_int8, 8, |, 0 ) // __kmpc_atomic_fixed1_orb_cpt
ATOMIC_CMPXCHG_CPT( fixed1, shl_cpt, kmp_int8, 8, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_cpt
ATOMIC_CMPXCHG_CPT( fixed1, shr_cpt, kmp_int8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_cpt
ATOMIC_CMPXCHG_CPT( fixed1u, shr_cpt, kmp_uint8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_cpt
ATOMIC_CMPXCHG_CPT( fixed1, sub_cpt, kmp_int8, 8, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt
ATOMIC_CMPXCHG_CPT( fixed1, xor_cpt, kmp_int8, 8, ^, 0 ) // __kmpc_atomic_fixed1_xor_cpt
ATOMIC_CMPXCHG_CPT( fixed2, add_cpt, kmp_int16, 16, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add_cpt
ATOMIC_CMPXCHG_CPT( fixed2, andb_cpt, kmp_int16, 16, &, 0 ) // __kmpc_atomic_fixed2_andb_cpt
ATOMIC_CMPXCHG_CPT( fixed2, div_cpt, kmp_int16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt
ATOMIC_CMPXCHG_CPT( fixed2u, div_cpt, kmp_uint16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt
ATOMIC_CMPXCHG_CPT( fixed2, mul_cpt, kmp_int16, 16, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_cpt
ATOMIC_CMPXCHG_CPT( fixed2, orb_cpt, kmp_int16, 16, |, 0 ) // __kmpc_atomic_fixed2_orb_cpt
ATOMIC_CMPXCHG_CPT( fixed2, shl_cpt, kmp_int16, 16, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_cpt
ATOMIC_CMPXCHG_CPT( fixed2, shr_cpt, kmp_int16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_cpt
ATOMIC_CMPXCHG_CPT( fixed2u, shr_cpt, kmp_uint16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_cpt
ATOMIC_CMPXCHG_CPT( fixed2, sub_cpt, kmp_int16, 16, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt
ATOMIC_CMPXCHG_CPT( fixed2, xor_cpt, kmp_int16, 16, ^, 0 ) // __kmpc_atomic_fixed2_xor_cpt
ATOMIC_CMPXCHG_CPT( fixed4, andb_cpt, kmp_int32, 32, &, 0 ) // __kmpc_atomic_fixed4_andb_cpt
ATOMIC_CMPXCHG_CPT( fixed4, div_cpt, kmp_int32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_cpt
ATOMIC_CMPXCHG_CPT( fixed4u, div_cpt, kmp_uint32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_cpt
ATOMIC_CMPXCHG_CPT( fixed4, mul_cpt, kmp_int32, 32, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_mul_cpt
ATOMIC_CMPXCHG_CPT( fixed4, orb_cpt, kmp_int32, 32, |, 0 ) // __kmpc_atomic_fixed4_orb_cpt
ATOMIC_CMPXCHG_CPT( fixed4, shl_cpt, kmp_int32, 32, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_cpt
ATOMIC_CMPXCHG_CPT( fixed4, shr_cpt, kmp_int32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_cpt
ATOMIC_CMPXCHG_CPT( fixed4u, shr_cpt, kmp_uint32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_cpt
ATOMIC_CMPXCHG_CPT( fixed4, xor_cpt, kmp_int32, 32, ^, 0 ) // __kmpc_atomic_fixed4_xor_cpt
ATOMIC_CMPXCHG_CPT( fixed8, andb_cpt, kmp_int64, 64, &, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andb_cpt
ATOMIC_CMPXCHG_CPT( fixed8, div_cpt, kmp_int64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt
ATOMIC_CMPXCHG_CPT( fixed8u, div_cpt, kmp_uint64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt
ATOMIC_CMPXCHG_CPT( fixed8, mul_cpt, kmp_int64, 64, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_cpt
ATOMIC_CMPXCHG_CPT( fixed8, orb_cpt, kmp_int64, 64, |, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orb_cpt
ATOMIC_CMPXCHG_CPT( fixed8, shl_cpt, kmp_int64, 64, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_cpt
ATOMIC_CMPXCHG_CPT( fixed8, shr_cpt, kmp_int64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_cpt
ATOMIC_CMPXCHG_CPT( fixed8u, shr_cpt, kmp_uint64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_cpt
ATOMIC_CMPXCHG_CPT( fixed8, xor_cpt, kmp_int64, 64, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_xor_cpt
ATOMIC_CMPXCHG_CPT( float4, div_cpt, kmp_real32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt
ATOMIC_CMPXCHG_CPT( float4, mul_cpt, kmp_real32, 32, *, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_cpt
ATOMIC_CMPXCHG_CPT( float8, div_cpt, kmp_real64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt
ATOMIC_CMPXCHG_CPT( float8, mul_cpt, kmp_real64, 64, *, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul_cpt
// TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG
// ------------------------------------------------------------------------
// Routines for C/C++ Reduction operators && and ||
// ------------------------------------------------------------------------
// -------------------------------------------------------------------------
// Operation on *lhs, rhs bound by critical section
// OP - operator (it's supposed to contain an assignment)
// LCK_ID - lock identifier
// Note: don't check gtid as it should always be valid
// 1, 2-byte - expect valid parameter, other - check before this macro
#define OP_CRITICAL_L_CPT(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
if( flag ) { \
new_value OP rhs; \
} else \
new_value = (*lhs); \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid );
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_L_CPT(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_L_CPT( OP, 0 ); \
return new_value; \
}
#else
#define OP_GOMP_CRITICAL_L_CPT(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// ------------------------------------------------------------------------
// Need separate macros for &&, || because there is no combined assignment
#define ATOMIC_CMPX_L_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
OP_GOMP_CRITICAL_L_CPT( = *lhs OP, GOMP_FLAG ) \
OP_CMPXCHG_CPT(TYPE,BITS,OP) \
}
ATOMIC_CMPX_L_CPT( fixed1, andl_cpt, char, 8, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_andl_cpt
ATOMIC_CMPX_L_CPT( fixed1, orl_cpt, char, 8, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_orl_cpt
ATOMIC_CMPX_L_CPT( fixed2, andl_cpt, short, 16, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_andl_cpt
ATOMIC_CMPX_L_CPT( fixed2, orl_cpt, short, 16, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_orl_cpt
ATOMIC_CMPX_L_CPT( fixed4, andl_cpt, kmp_int32, 32, &&, 0 ) // __kmpc_atomic_fixed4_andl_cpt
ATOMIC_CMPX_L_CPT( fixed4, orl_cpt, kmp_int32, 32, ||, 0 ) // __kmpc_atomic_fixed4_orl_cpt
ATOMIC_CMPX_L_CPT( fixed8, andl_cpt, kmp_int64, 64, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andl_cpt
ATOMIC_CMPX_L_CPT( fixed8, orl_cpt, kmp_int64, 64, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orl_cpt
// -------------------------------------------------------------------------
// Routines for Fortran operators that matched no one in C:
// MAX, MIN, .EQV., .NEQV.
// Operators .AND., .OR. are covered by __kmpc_atomic_*_{andl,orl}_cpt
// Intrinsics IAND, IOR, IEOR are covered by __kmpc_atomic_*_{andb,orb,xor}_cpt
// -------------------------------------------------------------------------
// -------------------------------------------------------------------------
// MIN and MAX need separate macros
// OP - operator to check if we need any actions?
#define MIN_MAX_CRITSECT_CPT(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
if ( *lhs OP rhs ) { /* still need actions? */ \
old_value = *lhs; \
*lhs = rhs; \
if ( flag ) \
new_value = rhs; \
else \
new_value = old_value; \
} \
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
return new_value; \
// -------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define GOMP_MIN_MAX_CRITSECT_CPT(OP,FLAG) \
if (( FLAG ) && ( __kmp_atomic_mode == 2 )) { \
KMP_CHECK_GTID; \
MIN_MAX_CRITSECT_CPT( OP, 0 ); \
}
#else
#define GOMP_MIN_MAX_CRITSECT_CPT(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// -------------------------------------------------------------------------
#define MIN_MAX_CMPXCHG_CPT(TYPE,BITS,OP) \
{ \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
/*TYPE old_value; */ \
temp_val = *lhs; \
old_value = temp_val; \
while ( old_value OP rhs && /* still need actions? */ \
! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value, \
*VOLATILE_CAST(kmp_int##BITS *) &rhs ) ) \
{ \
KMP_CPU_PAUSE(); \
temp_val = *lhs; \
old_value = temp_val; \
} \
if( flag ) \
return rhs; \
else \
return old_value; \
}
// -------------------------------------------------------------------------
// 1-byte, 2-byte operands - use critical section
#define MIN_MAX_CRITICAL_CPT(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value, old_value; \
if ( *lhs OP rhs ) { /* need actions? */ \
GOMP_MIN_MAX_CRITSECT_CPT(OP,GOMP_FLAG) \
MIN_MAX_CRITSECT_CPT(OP,LCK_ID) \
} \
return *lhs; \
}
#define MIN_MAX_COMPXCHG_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value, old_value; \
if ( *lhs OP rhs ) { \
GOMP_MIN_MAX_CRITSECT_CPT(OP,GOMP_FLAG) \
MIN_MAX_CMPXCHG_CPT(TYPE,BITS,OP) \
} \
return *lhs; \
}
MIN_MAX_COMPXCHG_CPT( fixed1, max_cpt, char, 8, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_max_cpt
MIN_MAX_COMPXCHG_CPT( fixed1, min_cpt, char, 8, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_min_cpt
MIN_MAX_COMPXCHG_CPT( fixed2, max_cpt, short, 16, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_max_cpt
MIN_MAX_COMPXCHG_CPT( fixed2, min_cpt, short, 16, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_min_cpt
MIN_MAX_COMPXCHG_CPT( fixed4, max_cpt, kmp_int32, 32, <, 0 ) // __kmpc_atomic_fixed4_max_cpt
MIN_MAX_COMPXCHG_CPT( fixed4, min_cpt, kmp_int32, 32, >, 0 ) // __kmpc_atomic_fixed4_min_cpt
MIN_MAX_COMPXCHG_CPT( fixed8, max_cpt, kmp_int64, 64, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_max_cpt
MIN_MAX_COMPXCHG_CPT( fixed8, min_cpt, kmp_int64, 64, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_min_cpt
MIN_MAX_COMPXCHG_CPT( float4, max_cpt, kmp_real32, 32, <, KMP_ARCH_X86 ) // __kmpc_atomic_float4_max_cpt
MIN_MAX_COMPXCHG_CPT( float4, min_cpt, kmp_real32, 32, >, KMP_ARCH_X86 ) // __kmpc_atomic_float4_min_cpt
MIN_MAX_COMPXCHG_CPT( float8, max_cpt, kmp_real64, 64, <, KMP_ARCH_X86 ) // __kmpc_atomic_float8_max_cpt
MIN_MAX_COMPXCHG_CPT( float8, min_cpt, kmp_real64, 64, >, KMP_ARCH_X86 ) // __kmpc_atomic_float8_min_cpt
#if KMP_HAVE_QUAD
MIN_MAX_CRITICAL_CPT( float16, max_cpt, QUAD_LEGACY, <, 16r, 1 ) // __kmpc_atomic_float16_max_cpt
MIN_MAX_CRITICAL_CPT( float16, min_cpt, QUAD_LEGACY, >, 16r, 1 ) // __kmpc_atomic_float16_min_cpt
#if ( KMP_ARCH_X86 )
MIN_MAX_CRITICAL_CPT( float16, max_a16_cpt, Quad_a16_t, <, 16r, 1 ) // __kmpc_atomic_float16_max_a16_cpt
MIN_MAX_CRITICAL_CPT( float16, min_a16_cpt, Quad_a16_t, >, 16r, 1 ) // __kmpc_atomic_float16_mix_a16_cpt
#endif
#endif
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_EQV_CPT(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_CPT( OP, 0 ); \
}
#else
#define OP_GOMP_CRITICAL_EQV_CPT(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// ------------------------------------------------------------------------
#define ATOMIC_CMPX_EQV_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
OP_GOMP_CRITICAL_EQV_CPT(^=~,GOMP_FLAG) /* send assignment */ \
OP_CMPXCHG_CPT(TYPE,BITS,OP) \
}
// ------------------------------------------------------------------------
ATOMIC_CMPXCHG_CPT( fixed1, neqv_cpt, kmp_int8, 8, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_neqv_cpt
ATOMIC_CMPXCHG_CPT( fixed2, neqv_cpt, kmp_int16, 16, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_neqv_cpt
ATOMIC_CMPXCHG_CPT( fixed4, neqv_cpt, kmp_int32, 32, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_neqv_cpt
ATOMIC_CMPXCHG_CPT( fixed8, neqv_cpt, kmp_int64, 64, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_neqv_cpt
ATOMIC_CMPX_EQV_CPT( fixed1, eqv_cpt, kmp_int8, 8, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_eqv_cpt
ATOMIC_CMPX_EQV_CPT( fixed2, eqv_cpt, kmp_int16, 16, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_eqv_cpt
ATOMIC_CMPX_EQV_CPT( fixed4, eqv_cpt, kmp_int32, 32, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_eqv_cpt
ATOMIC_CMPX_EQV_CPT( fixed8, eqv_cpt, kmp_int64, 64, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_eqv_cpt
// ------------------------------------------------------------------------
// Routines for Extended types: long double, _Quad, complex flavours (use critical section)
// TYPE_ID, OP_ID, TYPE - detailed above
// OP - operator
// LCK_ID - lock identifier, used to possibly distinguish lock variable
#define ATOMIC_CRITICAL_CPT(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) /* send assignment */ \
OP_CRITICAL_CPT(OP##=,LCK_ID) /* send assignment */ \
}
// ------------------------------------------------------------------------
// Workaround for cmplx4. Regular routines with return value don't work
// on Win_32e. Let's return captured values through the additional parameter.
#define OP_CRITICAL_CPT_WRK(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
if( flag ) { \
(*lhs) OP rhs; \
(*out) = (*lhs); \
} else { \
(*out) = (*lhs); \
(*lhs) OP rhs; \
} \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
return;
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_CPT_WRK(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_CPT_WRK( OP##=, 0 ); \
}
#else
#define OP_GOMP_CRITICAL_CPT_WRK(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// ------------------------------------------------------------------------
#define ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \
void __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, TYPE * out, int flag ) \
{ \
KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid ));
// ------------------------------------------------------------------------
#define ATOMIC_CRITICAL_CPT_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \
OP_GOMP_CRITICAL_CPT_WRK(OP,GOMP_FLAG) \
OP_CRITICAL_CPT_WRK(OP##=,LCK_ID) \
}
// The end of workaround for cmplx4
/* ------------------------------------------------------------------------- */
// routines for long double type
ATOMIC_CRITICAL_CPT( float10, add_cpt, long double, +, 10r, 1 ) // __kmpc_atomic_float10_add_cpt
ATOMIC_CRITICAL_CPT( float10, sub_cpt, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt
ATOMIC_CRITICAL_CPT( float10, mul_cpt, long double, *, 10r, 1 ) // __kmpc_atomic_float10_mul_cpt
ATOMIC_CRITICAL_CPT( float10, div_cpt, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_cpt
#if KMP_HAVE_QUAD
// routines for _Quad type
ATOMIC_CRITICAL_CPT( float16, add_cpt, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_add_cpt
ATOMIC_CRITICAL_CPT( float16, sub_cpt, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_cpt
ATOMIC_CRITICAL_CPT( float16, mul_cpt, QUAD_LEGACY, *, 16r, 1 ) // __kmpc_atomic_float16_mul_cpt
ATOMIC_CRITICAL_CPT( float16, div_cpt, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_cpt
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_CPT( float16, add_a16_cpt, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_add_a16_cpt
ATOMIC_CRITICAL_CPT( float16, sub_a16_cpt, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_cpt
ATOMIC_CRITICAL_CPT( float16, mul_a16_cpt, Quad_a16_t, *, 16r, 1 ) // __kmpc_atomic_float16_mul_a16_cpt
ATOMIC_CRITICAL_CPT( float16, div_a16_cpt, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_cpt
#endif
#endif
// routines for complex types
// cmplx4 routines to return void
ATOMIC_CRITICAL_CPT_WRK( cmplx4, add_cpt, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_add_cpt
ATOMIC_CRITICAL_CPT_WRK( cmplx4, sub_cpt, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_cpt
ATOMIC_CRITICAL_CPT_WRK( cmplx4, mul_cpt, kmp_cmplx32, *, 8c, 1 ) // __kmpc_atomic_cmplx4_mul_cpt
ATOMIC_CRITICAL_CPT_WRK( cmplx4, div_cpt, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_cpt
ATOMIC_CRITICAL_CPT( cmplx8, add_cpt, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_add_cpt
ATOMIC_CRITICAL_CPT( cmplx8, sub_cpt, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_cpt
ATOMIC_CRITICAL_CPT( cmplx8, mul_cpt, kmp_cmplx64, *, 16c, 1 ) // __kmpc_atomic_cmplx8_mul_cpt
ATOMIC_CRITICAL_CPT( cmplx8, div_cpt, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_cpt
ATOMIC_CRITICAL_CPT( cmplx10, add_cpt, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_add_cpt
ATOMIC_CRITICAL_CPT( cmplx10, sub_cpt, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_cpt
ATOMIC_CRITICAL_CPT( cmplx10, mul_cpt, kmp_cmplx80, *, 20c, 1 ) // __kmpc_atomic_cmplx10_mul_cpt
ATOMIC_CRITICAL_CPT( cmplx10, div_cpt, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_cpt
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_CPT( cmplx16, add_cpt, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_cpt
ATOMIC_CRITICAL_CPT( cmplx16, sub_cpt, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_cpt
ATOMIC_CRITICAL_CPT( cmplx16, mul_cpt, CPLX128_LEG, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_cpt
ATOMIC_CRITICAL_CPT( cmplx16, div_cpt, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_cpt
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_CPT( cmplx16, add_a16_cpt, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_a16_cpt
ATOMIC_CRITICAL_CPT( cmplx16, sub_a16_cpt, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_cpt
ATOMIC_CRITICAL_CPT( cmplx16, mul_a16_cpt, kmp_cmplx128_a16_t, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_a16_cpt
ATOMIC_CRITICAL_CPT( cmplx16, div_a16_cpt, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_cpt
#endif
#endif
#if OMP_40_ENABLED
// OpenMP 4.0: v = x = expr binop x; { v = x; x = expr binop x; } { x = expr binop x; v = x; } for non-commutative operations.
// Supported only on IA-32 architecture and Intel(R) 64
// -------------------------------------------------------------------------
// Operation on *lhs, rhs bound by critical section
// OP - operator (it's supposed to contain an assignment)
// LCK_ID - lock identifier
// Note: don't check gtid as it should always be valid
// 1, 2-byte - expect valid parameter, other - check before this macro
#define OP_CRITICAL_CPT_REV(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
if( flag ) { \
/*temp_val = (*lhs);*/\
(*lhs) = (rhs) OP (*lhs); \
new_value = (*lhs); \
} else { \
new_value = (*lhs);\
(*lhs) = (rhs) OP (*lhs); \
} \
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
return new_value;
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_CPT_REV(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_CPT_REV( OP, 0 ); \
}
#else
#define OP_GOMP_CRITICAL_CPT_REV(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// ------------------------------------------------------------------------
// Operation on *lhs, rhs using "compare_and_store" routine
// TYPE - operands' type
// BITS - size in bits, used to distinguish low level calls
// OP - operator
// Note: temp_val introduced in order to force the compiler to read
// *lhs only once (w/o it the compiler reads *lhs twice)
#define OP_CMPXCHG_CPT_REV(TYPE,BITS,OP) \
{ \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
TYPE old_value, new_value; \
temp_val = *lhs; \
old_value = temp_val; \
new_value = rhs OP old_value; \
while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value, \
*VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \
{ \
KMP_CPU_PAUSE(); \
\
temp_val = *lhs; \
old_value = temp_val; \
new_value = rhs OP old_value; \
} \
if( flag ) { \
return new_value; \
} else \
return old_value; \
}
// -------------------------------------------------------------------------
#define ATOMIC_CMPXCHG_CPT_REV(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) \
OP_CMPXCHG_CPT_REV(TYPE,BITS,OP) \
}
ATOMIC_CMPXCHG_CPT_REV( fixed1, div_cpt_rev, kmp_int8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed1u, div_cpt_rev, kmp_uint8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed1, shl_cpt_rev, kmp_int8, 8, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed1, shr_cpt_rev, kmp_int8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed1u, shr_cpt_rev, kmp_uint8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed1, sub_cpt_rev, kmp_int8, 8, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed2, div_cpt_rev, kmp_int16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed2u, div_cpt_rev, kmp_uint16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed2, shl_cpt_rev, kmp_int16, 16, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed2, shr_cpt_rev, kmp_int16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed2u, shr_cpt_rev, kmp_uint16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed2, sub_cpt_rev, kmp_int16, 16, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed4, div_cpt_rev, kmp_int32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed4u, div_cpt_rev, kmp_uint32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed4, shl_cpt_rev, kmp_int32, 32, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed4, shr_cpt_rev, kmp_int32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed4u, shr_cpt_rev, kmp_uint32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed4, sub_cpt_rev, kmp_int32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_sub_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed8, div_cpt_rev, kmp_int64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed8u, div_cpt_rev, kmp_uint64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed8, shl_cpt_rev, kmp_int64, 64, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed8, shr_cpt_rev, kmp_int64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed8u, shr_cpt_rev, kmp_uint64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( fixed8, sub_cpt_rev, kmp_int64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( float4, div_cpt_rev, kmp_real32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( float4, sub_cpt_rev, kmp_real32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( float8, div_cpt_rev, kmp_real64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt_rev
ATOMIC_CMPXCHG_CPT_REV( float8, sub_cpt_rev, kmp_real64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt_rev
// TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG
// ------------------------------------------------------------------------
// Routines for Extended types: long double, _Quad, complex flavours (use critical section)
// TYPE_ID, OP_ID, TYPE - detailed above
// OP - operator
// LCK_ID - lock identifier, used to possibly distinguish lock variable
#define ATOMIC_CRITICAL_CPT_REV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \
TYPE new_value; \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
/*printf("__kmp_atomic_mode = %d\n", __kmp_atomic_mode);*/\
OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) \
OP_CRITICAL_CPT_REV(OP,LCK_ID) \
}
/* ------------------------------------------------------------------------- */
// routines for long double type
ATOMIC_CRITICAL_CPT_REV( float10, sub_cpt_rev, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt_rev
ATOMIC_CRITICAL_CPT_REV( float10, div_cpt_rev, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_cpt_rev
#if KMP_HAVE_QUAD
// routines for _Quad type
ATOMIC_CRITICAL_CPT_REV( float16, sub_cpt_rev, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_cpt_rev
ATOMIC_CRITICAL_CPT_REV( float16, div_cpt_rev, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_cpt_rev
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_CPT_REV( float16, sub_a16_cpt_rev, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_cpt_rev
ATOMIC_CRITICAL_CPT_REV( float16, div_a16_cpt_rev, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_cpt_rev
#endif
#endif
// routines for complex types
// ------------------------------------------------------------------------
// Workaround for cmplx4. Regular routines with return value don't work
// on Win_32e. Let's return captured values through the additional parameter.
#define OP_CRITICAL_CPT_REV_WRK(OP,LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
if( flag ) { \
(*lhs) = (rhs) OP (*lhs); \
(*out) = (*lhs); \
} else { \
(*out) = (*lhs); \
(*lhs) = (rhs) OP (*lhs); \
} \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
return;
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define OP_GOMP_CRITICAL_CPT_REV_WRK(OP,FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
OP_CRITICAL_CPT_REV_WRK( OP, 0 ); \
}
#else
#define OP_GOMP_CRITICAL_CPT_REV_WRK(OP,FLAG)
#endif /* KMP_GOMP_COMPAT */
// ------------------------------------------------------------------------
#define ATOMIC_CRITICAL_CPT_REV_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \
OP_GOMP_CRITICAL_CPT_REV_WRK(OP,GOMP_FLAG) \
OP_CRITICAL_CPT_REV_WRK(OP,LCK_ID) \
}
// The end of workaround for cmplx4
// !!! TODO: check if we need to return void for cmplx4 routines
// cmplx4 routines to return void
ATOMIC_CRITICAL_CPT_REV_WRK( cmplx4, sub_cpt_rev, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_cpt_rev
ATOMIC_CRITICAL_CPT_REV_WRK( cmplx4, div_cpt_rev, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_cpt_rev
ATOMIC_CRITICAL_CPT_REV( cmplx8, sub_cpt_rev, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_cpt_rev
ATOMIC_CRITICAL_CPT_REV( cmplx8, div_cpt_rev, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_cpt_rev
ATOMIC_CRITICAL_CPT_REV( cmplx10, sub_cpt_rev, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_cpt_rev
ATOMIC_CRITICAL_CPT_REV( cmplx10, div_cpt_rev, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_cpt_rev
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_CPT_REV( cmplx16, sub_cpt_rev, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_cpt_rev
ATOMIC_CRITICAL_CPT_REV( cmplx16, div_cpt_rev, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_cpt_rev
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_CPT_REV( cmplx16, sub_a16_cpt_rev, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_cpt_rev
ATOMIC_CRITICAL_CPT_REV( cmplx16, div_a16_cpt_rev, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_cpt_rev
#endif
#endif
// OpenMP 4.0 Capture-write (swap): {v = x; x = expr;}
#define ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \
TYPE __kmpc_atomic_##TYPE_ID##_swp( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \
{ \
KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_swp: T#%d\n", gtid ));
#define CRITICAL_SWP(LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
old_value = (*lhs); \
(*lhs) = rhs; \
\
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
return old_value;
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define GOMP_CRITICAL_SWP(FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
CRITICAL_SWP( 0 ); \
}
#else
#define GOMP_CRITICAL_SWP(FLAG)
#endif /* KMP_GOMP_COMPAT */
#define ATOMIC_XCHG_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \
ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \
TYPE old_value; \
GOMP_CRITICAL_SWP(GOMP_FLAG) \
old_value = KMP_XCHG_FIXED##BITS( lhs, rhs ); \
return old_value; \
}
// ------------------------------------------------------------------------
#define ATOMIC_XCHG_FLOAT_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \
ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \
TYPE old_value; \
GOMP_CRITICAL_SWP(GOMP_FLAG) \
old_value = KMP_XCHG_REAL##BITS( lhs, rhs ); \
return old_value; \
}
// ------------------------------------------------------------------------
#define CMPXCHG_SWP(TYPE,BITS) \
{ \
TYPE KMP_ATOMIC_VOLATILE temp_val; \
TYPE old_value, new_value; \
temp_val = *lhs; \
old_value = temp_val; \
new_value = rhs; \
while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \
*VOLATILE_CAST(kmp_int##BITS *) &old_value, \
*VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \
{ \
KMP_CPU_PAUSE(); \
\
temp_val = *lhs; \
old_value = temp_val; \
new_value = rhs; \
} \
return old_value; \
}
// -------------------------------------------------------------------------
#define ATOMIC_CMPXCHG_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \
ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \
TYPE old_value; \
GOMP_CRITICAL_SWP(GOMP_FLAG) \
CMPXCHG_SWP(TYPE,BITS) \
}
ATOMIC_XCHG_SWP( fixed1, kmp_int8, 8, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_swp
ATOMIC_XCHG_SWP( fixed2, kmp_int16, 16, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_swp
ATOMIC_XCHG_SWP( fixed4, kmp_int32, 32, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_swp
ATOMIC_XCHG_FLOAT_SWP( float4, kmp_real32, 32, KMP_ARCH_X86 ) // __kmpc_atomic_float4_swp
#if ( KMP_ARCH_X86 )
ATOMIC_CMPXCHG_SWP( fixed8, kmp_int64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_swp
ATOMIC_CMPXCHG_SWP( float8, kmp_real64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_float8_swp
#else
ATOMIC_XCHG_SWP( fixed8, kmp_int64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_swp
ATOMIC_XCHG_FLOAT_SWP( float8, kmp_real64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_float8_swp
#endif
// ------------------------------------------------------------------------
// Routines for Extended types: long double, _Quad, complex flavours (use critical section)
#define ATOMIC_CRITICAL_SWP(TYPE_ID,TYPE,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \
TYPE old_value; \
GOMP_CRITICAL_SWP(GOMP_FLAG) \
CRITICAL_SWP(LCK_ID) \
}
// ------------------------------------------------------------------------
// !!! TODO: check if we need to return void for cmplx4 routines
// Workaround for cmplx4. Regular routines with return value don't work
// on Win_32e. Let's return captured values through the additional parameter.
#define ATOMIC_BEGIN_SWP_WRK(TYPE_ID,TYPE) \
void __kmpc_atomic_##TYPE_ID##_swp( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, TYPE * out ) \
{ \
KMP_DEBUG_ASSERT( __kmp_global.init_serial ); \
KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_swp: T#%d\n", gtid ));
#define CRITICAL_SWP_WRK(LCK_ID) \
__kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
\
tmp = (*lhs); \
(*lhs) = (rhs); \
(*out) = tmp; \
__kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \
return;
// ------------------------------------------------------------------------
#ifdef KMP_GOMP_COMPAT
#define GOMP_CRITICAL_SWP_WRK(FLAG) \
if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \
KMP_CHECK_GTID; \
CRITICAL_SWP_WRK( 0 ); \
}
#else
#define GOMP_CRITICAL_SWP_WRK(FLAG)
#endif /* KMP_GOMP_COMPAT */
// ------------------------------------------------------------------------
#define ATOMIC_CRITICAL_SWP_WRK(TYPE_ID, TYPE,LCK_ID,GOMP_FLAG) \
ATOMIC_BEGIN_SWP_WRK(TYPE_ID,TYPE) \
TYPE tmp; \
GOMP_CRITICAL_SWP_WRK(GOMP_FLAG) \
CRITICAL_SWP_WRK(LCK_ID) \
}
// The end of workaround for cmplx4
ATOMIC_CRITICAL_SWP( float10, long double, 10r, 1 ) // __kmpc_atomic_float10_swp
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_SWP( float16, QUAD_LEGACY, 16r, 1 ) // __kmpc_atomic_float16_swp
#endif
// cmplx4 routine to return void
ATOMIC_CRITICAL_SWP_WRK( cmplx4, kmp_cmplx32, 8c, 1 ) // __kmpc_atomic_cmplx4_swp
//ATOMIC_CRITICAL_SWP( cmplx4, kmp_cmplx32, 8c, 1 ) // __kmpc_atomic_cmplx4_swp
ATOMIC_CRITICAL_SWP( cmplx8, kmp_cmplx64, 16c, 1 ) // __kmpc_atomic_cmplx8_swp
ATOMIC_CRITICAL_SWP( cmplx10, kmp_cmplx80, 20c, 1 ) // __kmpc_atomic_cmplx10_swp
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL_SWP( cmplx16, CPLX128_LEG, 32c, 1 ) // __kmpc_atomic_cmplx16_swp
#if ( KMP_ARCH_X86 )
ATOMIC_CRITICAL_SWP( float16_a16, Quad_a16_t, 16r, 1 ) // __kmpc_atomic_float16_a16_swp
ATOMIC_CRITICAL_SWP( cmplx16_a16, kmp_cmplx128_a16_t, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_swp
#endif
#endif
// End of OpenMP 4.0 Capture
#endif //OMP_40_ENABLED
#endif //KMP_ARCH_X86 || KMP_ARCH_X86_64
#undef OP_CRITICAL
/* ------------------------------------------------------------------------ */
/* Generic atomic routines */
/* ------------------------------------------------------------------------ */
void
__kmpc_atomic_1( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) )
{
KMP_DEBUG_ASSERT( __kmp_global.init_serial );
if (
#if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT)
FALSE /* must use lock */
#else
TRUE
#endif
)
{
kmp_int8 old_value, new_value;
old_value = *(kmp_int8 *) lhs;
(*f)( &new_value, &old_value, rhs );
/* TODO: Should this be acquire or release? */
while ( ! KMP_COMPARE_AND_STORE_ACQ8 ( (kmp_int8 *) lhs,
*(kmp_int8 *) &old_value, *(kmp_int8 *) &new_value ) )
{
KMP_CPU_PAUSE();
old_value = *(kmp_int8 *) lhs;
(*f)( &new_value, &old_value, rhs );
}
return;
}
else {
//
// All 1-byte data is of integer data type.
//
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_acquire_atomic_lock( & __kmp_atomic_lock_1i, gtid );
(*f)( lhs, lhs, rhs );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_release_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock( & __kmp_atomic_lock_1i, gtid );
}
}
void
__kmpc_atomic_2( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) )
{
if (
#if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT)
FALSE /* must use lock */
#elif KMP_ARCH_X86 || KMP_ARCH_X86_64
TRUE /* no alignment problems */
#else
! ( (kmp_uintptr_t) lhs & 0x1) /* make sure address is 2-byte aligned */
#endif
)
{
kmp_int16 old_value, new_value;
old_value = *(kmp_int16 *) lhs;
(*f)( &new_value, &old_value, rhs );
/* TODO: Should this be acquire or release? */
while ( ! KMP_COMPARE_AND_STORE_ACQ16 ( (kmp_int16 *) lhs,
*(kmp_int16 *) &old_value, *(kmp_int16 *) &new_value ) )
{
KMP_CPU_PAUSE();
old_value = *(kmp_int16 *) lhs;
(*f)( &new_value, &old_value, rhs );
}
return;
}
else {
//
// All 2-byte data is of integer data type.
//
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_acquire_atomic_lock( & __kmp_atomic_lock_2i, gtid );
(*f)( lhs, lhs, rhs );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_release_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock( & __kmp_atomic_lock_2i, gtid );
}
}
void
__kmpc_atomic_4( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) )
{
KMP_DEBUG_ASSERT( __kmp_global.init_serial );
if (
//
// FIXME: On IA-32 architecture, gcc uses cmpxchg only for 4-byte ints.
// Gomp compatibility is broken if this routine is called for floats.
//
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
TRUE /* no alignment problems */
#else
! ( (kmp_uintptr_t) lhs & 0x3) /* make sure address is 4-byte aligned */
#endif
)
{
kmp_int32 old_value, new_value;
old_value = *(kmp_int32 *) lhs;
(*f)( &new_value, &old_value, rhs );
/* TODO: Should this be acquire or release? */
while ( ! KMP_COMPARE_AND_STORE_ACQ32 ( (kmp_int32 *) lhs,
*(kmp_int32 *) &old_value, *(kmp_int32 *) &new_value ) )
{
KMP_CPU_PAUSE();
old_value = *(kmp_int32 *) lhs;
(*f)( &new_value, &old_value, rhs );
}
return;
}
else {
//
// Use __kmp_atomic_lock_4i for all 4-byte data,
// even if it isn't of integer data type.
//
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_acquire_atomic_lock( & __kmp_atomic_lock_4i, gtid );
(*f)( lhs, lhs, rhs );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_release_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock( & __kmp_atomic_lock_4i, gtid );
}
}
void
__kmpc_atomic_8( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) )
{
KMP_DEBUG_ASSERT( __kmp_global.init_serial );
if (
#if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT)
FALSE /* must use lock */
#elif KMP_ARCH_X86 || KMP_ARCH_X86_64
TRUE /* no alignment problems */
#else
! ( (kmp_uintptr_t) lhs & 0x7) /* make sure address is 8-byte aligned */
#endif
)
{
kmp_int64 old_value, new_value;
old_value = *(kmp_int64 *) lhs;
(*f)( &new_value, &old_value, rhs );
/* TODO: Should this be acquire or release? */
while ( ! KMP_COMPARE_AND_STORE_ACQ64 ( (kmp_int64 *) lhs,
*(kmp_int64 *) &old_value,
*(kmp_int64 *) &new_value ) )
{
KMP_CPU_PAUSE();
old_value = *(kmp_int64 *) lhs;
(*f)( &new_value, &old_value, rhs );
}
return;
} else {
//
// Use __kmp_atomic_lock_8i for all 8-byte data,
// even if it isn't of integer data type.
//
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_acquire_atomic_lock( & __kmp_atomic_lock_8i, gtid );
(*f)( lhs, lhs, rhs );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_release_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock( & __kmp_atomic_lock_8i, gtid );
}
}
void
__kmpc_atomic_10( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) )
{
KMP_DEBUG_ASSERT( __kmp_global.init_serial );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_acquire_atomic_lock( & __kmp_atomic_lock_10r, gtid );
(*f)( lhs, lhs, rhs );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_release_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock( & __kmp_atomic_lock_10r, gtid );
}
void
__kmpc_atomic_16( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) )
{
KMP_DEBUG_ASSERT( __kmp_global.init_serial );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_acquire_atomic_lock( & __kmp_atomic_lock_16c, gtid );
(*f)( lhs, lhs, rhs );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_release_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock( & __kmp_atomic_lock_16c, gtid );
}
void
__kmpc_atomic_20( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) )
{
KMP_DEBUG_ASSERT( __kmp_global.init_serial );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_acquire_atomic_lock( & __kmp_atomic_lock_20c, gtid );
(*f)( lhs, lhs, rhs );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_release_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock( & __kmp_atomic_lock_20c, gtid );
}
void
__kmpc_atomic_32( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) )
{
KMP_DEBUG_ASSERT( __kmp_global.init_serial );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_acquire_atomic_lock( & __kmp_atomic_lock_32c, gtid );
(*f)( lhs, lhs, rhs );
#ifdef KMP_GOMP_COMPAT
if ( __kmp_atomic_mode == 2 ) {
__kmp_release_atomic_lock( & __kmp_atomic_lock, gtid );
}
else
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock( & __kmp_atomic_lock_32c, gtid );
}
// AC: same two routines as GOMP_atomic_start/end, but will be called by our compiler
// duplicated in order to not use 3-party names in pure Intel code
// TODO: consider adding GTID parameter after consultation with Ernesto/Xinmin.
void
__kmpc_atomic_start(void)
{
int gtid = __kmp_entry_gtid();
KA_TRACE(20, ("__kmpc_atomic_start: T#%d\n", gtid));
__kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid);
}
void
__kmpc_atomic_end(void)
{
int gtid = __kmp_get_gtid();
KA_TRACE(20, ("__kmpc_atomic_end: T#%d\n", gtid));
__kmp_release_atomic_lock(&__kmp_atomic_lock, gtid);
}
/* ------------------------------------------------------------------------ */
/* ------------------------------------------------------------------------ */
/*!
@}
*/
// end of file
|
egcs.c | /**
* @file egcs.c
*
* An implemenation of the ElGamal cryptosystem.
*/
#include "../include/libhcs/egcs.h"
#include <gmp.h>
#include <stdio.h>
#include "../include/libhcs/hcs_random.h"
#include "com/omp.h"
#include "com/util.h"
egcs_public_key *egcs_init_public_key(void) {
egcs_public_key *pk = malloc(sizeof(egcs_public_key));
if (pk == NULL) return NULL;
mpz_inits(pk->g, pk->q, pk->h, NULL);
return pk;
}
egcs_private_key *egcs_init_private_key(void) {
egcs_private_key *vk = malloc(sizeof(egcs_private_key));
if (vk == NULL) return NULL;
mpz_inits(vk->x, vk->q, NULL);
return vk;
}
void egcs_generate_key_pair(egcs_public_key *pk, egcs_private_key *vk, hcs_random *hr,
const unsigned long bits) {
mpz_t t;
mpz_init(t);
mpz_random_prime(pk->q, hr->rstate, bits);
mpz_sub_ui(pk->q, pk->q, 1);
mpz_urandomm(pk->g, hr->rstate, pk->q);
mpz_urandomm(vk->x, hr->rstate, pk->q);
mpz_add_ui(pk->q, pk->q, 1);
mpz_add_ui(pk->g, pk->g, 1);
mpz_add_ui(vk->x, vk->x, 1);
mpz_powm(pk->h, pk->g, vk->x, pk->q);
mpz_set(vk->q, pk->q);
mpz_clear(t);
}
egcs_cipher *egcs_init_cipher(void) {
egcs_cipher *ct = malloc(sizeof(egcs_cipher));
if (ct == NULL) return NULL;
mpz_inits(ct->c1, ct->c2, NULL);
return ct;
}
void egcs_set(egcs_cipher *rop, egcs_cipher *op) {
mpz_set(rop->c1, op->c1);
mpz_set(rop->c2, op->c2);
}
void egcs_encrypt(egcs_public_key *pk, hcs_random *hr, egcs_cipher *rop, mpz_t plain1) {
mpz_t t;
mpz_init(t);
mpz_sub_ui(pk->q, pk->q, 1);
mpz_urandomm(t, hr->rstate, pk->q);
mpz_add_ui(t, t, 1);
mpz_add_ui(pk->q, pk->q, 1);
#pragma omp parallel sections
{
#pragma omp section
{ mpz_powm(rop->c1, pk->g, t, pk->q); }
#pragma omp section
{
mpz_powm(rop->c2, pk->h, t, pk->q);
mpz_mul(rop->c2, rop->c2, plain1);
mpz_mod(rop->c2, rop->c2, pk->q);
}
}
mpz_clear(t);
}
void egcs_ee_mul(egcs_public_key *pk, egcs_cipher *rop, egcs_cipher *ct1, egcs_cipher *ct2) {
#pragma omp parallel sections
{
#pragma omp section
{
mpz_mul(rop->c1, ct1->c1, ct2->c1);
mpz_mod(rop->c1, rop->c1, pk->q);
}
#pragma omp section
{
mpz_mul(rop->c2, ct1->c2, ct2->c2);
mpz_mod(rop->c2, rop->c2, pk->q);
}
}
}
void egcs_decrypt(egcs_private_key *vk, mpz_t rop, egcs_cipher *ct) {
mpz_t t;
mpz_init(t);
mpz_sub_ui(t, vk->q, 1);
mpz_sub(t, t, vk->x);
mpz_powm(rop, ct->c1, t, vk->q);
mpz_mul(rop, rop, ct->c2);
mpz_mod(rop, rop, vk->q);
mpz_clear(t);
}
void egcs_clear_cipher(egcs_cipher *ct) {
mpz_zero(ct->c1);
mpz_zero(ct->c2);
}
void egcs_free_cipher(egcs_cipher *ct) {
mpz_clear(ct->c1);
mpz_clear(ct->c2);
free(ct);
}
void egcs_clear_public_key(egcs_public_key *pk) {
mpz_zero(pk->g);
mpz_zero(pk->q);
mpz_zero(pk->h);
}
void egcs_clear_private_key(egcs_private_key *vk) {
mpz_zero(vk->x);
mpz_zero(vk->q);
}
void egcs_free_public_key(egcs_public_key *pk) {
mpz_clear(pk->g);
mpz_clear(pk->q);
mpz_clear(pk->h);
free(pk);
}
void egcs_free_private_key(egcs_private_key *vk) {
mpz_clear(vk->x);
mpz_clear(vk->q);
free(vk);
}
|
statistic.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC %
% SS T A A T I SS T I C %
% SSS T AAAAA T I SSS T I C %
% SS T A A T I SS T I C %
% SSSSS T A A T IIIII SSSSS T IIIII CCCC %
% %
% %
% MagickCore Image Statistical Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E v a l u a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EvaluateImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the EvaluateImage method is:
%
% MagickBooleanType EvaluateImage(Image *image,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImages(Image *images,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o op: A channel op.
%
% o value: A value value.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _PixelChannels
{
double
channel[CompositePixelChannel];
} PixelChannels;
static PixelChannels **DestroyPixelThreadSet(const Image *images,
PixelChannels **pixels)
{
register ssize_t
i;
size_t
rows;
assert(pixels != (PixelChannels **) NULL);
rows=MagickMax(GetImageListLength(images),
(size_t) GetMagickResourceLimit(ThreadResource));
for (i=0; i < (ssize_t) rows; i++)
if (pixels[i] != (PixelChannels *) NULL)
pixels[i]=(PixelChannels *) RelinquishMagickMemory(pixels[i]);
pixels=(PixelChannels **) RelinquishMagickMemory(pixels);
return(pixels);
}
static PixelChannels **AcquirePixelThreadSet(const Image *images)
{
const Image
*next;
PixelChannels
**pixels;
register ssize_t
i;
size_t
columns,
rows;
rows=MagickMax(GetImageListLength(images),
(size_t) GetMagickResourceLimit(ThreadResource));
pixels=(PixelChannels **) AcquireQuantumMemory(rows,sizeof(*pixels));
if (pixels == (PixelChannels **) NULL)
return((PixelChannels **) NULL);
(void) memset(pixels,0,rows*sizeof(*pixels));
columns=MagickMax(GetImageListLength(images),MaxPixelChannels);
for (next=images; next != (Image *) NULL; next=next->next)
columns=MagickMax(next->columns,columns);
for (i=0; i < (ssize_t) rows; i++)
{
register ssize_t
j;
pixels[i]=(PixelChannels *) AcquireQuantumMemory(columns,sizeof(**pixels));
if (pixels[i] == (PixelChannels *) NULL)
return(DestroyPixelThreadSet(images,pixels));
for (j=0; j < (ssize_t) columns; j++)
{
register ssize_t
k;
for (k=0; k < MaxPixelChannels; k++)
pixels[i][j].channel[k]=0.0;
}
}
return(pixels);
}
static inline double EvaluateMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const PixelChannels
*color_1,
*color_2;
double
distance;
register ssize_t
i;
color_1=(const PixelChannels *) x;
color_2=(const PixelChannels *) y;
distance=0.0;
for (i=0; i < MaxPixelChannels; i++)
distance+=color_1->channel[i]-(double) color_2->channel[i];
return(distance < 0 ? -1 : distance > 0 ? 1 : 0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static double ApplyEvaluateOperator(RandomInfo *random_info,const Quantum pixel,
const MagickEvaluateOperator op,const double value)
{
double
result;
register ssize_t
i;
result=0.0;
switch (op)
{
case UndefinedEvaluateOperator:
break;
case AbsEvaluateOperator:
{
result=(double) fabs((double) (pixel+value));
break;
}
case AddEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case AddModulusEvaluateOperator:
{
/*
This returns a 'floored modulus' of the addition which is a positive
result. It differs from % or fmod() that returns a 'truncated modulus'
result, where floor() is replaced by trunc() and could return a
negative result (which is clipped).
*/
result=pixel+value;
result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0));
break;
}
case AndEvaluateOperator:
{
result=(double) ((ssize_t) pixel & (ssize_t) (value+0.5));
break;
}
case CosineEvaluateOperator:
{
result=(double) (QuantumRange*(0.5*cos((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case DivideEvaluateOperator:
{
result=pixel/(value == 0.0 ? 1.0 : value);
break;
}
case ExponentialEvaluateOperator:
{
result=(double) (QuantumRange*exp((double) (value*QuantumScale*pixel)));
break;
}
case GaussianNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
GaussianNoise,value);
break;
}
case ImpulseNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,ImpulseNoise,
value);
break;
}
case LaplacianNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
LaplacianNoise,value);
break;
}
case LeftShiftEvaluateOperator:
{
result=(double) pixel;
for (i=0; i < (ssize_t) value; i++)
result*=2.0;
break;
}
case LogEvaluateOperator:
{
if ((QuantumScale*pixel) >= MagickEpsilon)
result=(double) (QuantumRange*log((double) (QuantumScale*value*pixel+
1.0))/log((double) (value+1.0)));
break;
}
case MaxEvaluateOperator:
{
result=(double) EvaluateMax((double) pixel,value);
break;
}
case MeanEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case MedianEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case MinEvaluateOperator:
{
result=(double) MagickMin((double) pixel,value);
break;
}
case MultiplicativeNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
MultiplicativeGaussianNoise,value);
break;
}
case MultiplyEvaluateOperator:
{
result=(double) (value*pixel);
break;
}
case OrEvaluateOperator:
{
result=(double) ((ssize_t) pixel | (ssize_t) (value+0.5));
break;
}
case PoissonNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,PoissonNoise,
value);
break;
}
case PowEvaluateOperator:
{
if (pixel < 0)
result=(double) -(QuantumRange*pow((double) -(QuantumScale*pixel),
(double) value));
else
result=(double) (QuantumRange*pow((double) (QuantumScale*pixel),
(double) value));
break;
}
case RightShiftEvaluateOperator:
{
result=(double) pixel;
for (i=0; i < (ssize_t) value; i++)
result/=2.0;
break;
}
case RootMeanSquareEvaluateOperator:
{
result=(double) (pixel*pixel+value);
break;
}
case SetEvaluateOperator:
{
result=value;
break;
}
case SineEvaluateOperator:
{
result=(double) (QuantumRange*(0.5*sin((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case SubtractEvaluateOperator:
{
result=(double) (pixel-value);
break;
}
case SumEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case ThresholdEvaluateOperator:
{
result=(double) (((double) pixel <= value) ? 0 : QuantumRange);
break;
}
case ThresholdBlackEvaluateOperator:
{
result=(double) (((double) pixel <= value) ? 0 : pixel);
break;
}
case ThresholdWhiteEvaluateOperator:
{
result=(double) (((double) pixel > value) ? QuantumRange : pixel);
break;
}
case UniformNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,UniformNoise,
value);
break;
}
case XorEvaluateOperator:
{
result=(double) ((ssize_t) pixel ^ (ssize_t) (value+0.5));
break;
}
}
return(result);
}
static Image *AcquireImageCanvas(const Image *images,ExceptionInfo *exception)
{
const Image
*p,
*q;
size_t
columns,
rows;
q=images;
columns=images->columns;
rows=images->rows;
for (p=images; p != (Image *) NULL; p=p->next)
{
if (p->number_channels > q->number_channels)
q=p;
if (p->columns > columns)
columns=p->columns;
if (p->rows > rows)
rows=p->rows;
}
return(CloneImage(q,columns,rows,MagickTrue,exception));
}
MagickExport Image *EvaluateImages(const Image *images,
const MagickEvaluateOperator op,ExceptionInfo *exception)
{
#define EvaluateImageTag "Evaluate/Image"
CacheView
*evaluate_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelChannels
**magick_restrict evaluate_pixels;
RandomInfo
**magick_restrict random_info;
size_t
number_images;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
evaluate_pixels=AcquirePixelThreadSet(images);
if (evaluate_pixels == (PixelChannels **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Evaluate image pixels.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
evaluate_view=AcquireAuthenticCacheView(image,exception);
if (op == MedianEvaluateOperator)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register PixelChannels
*evaluate_pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j,
k;
for (j=0; j < (ssize_t) number_images; j++)
for (k=0; k < MaxPixelChannels; k++)
evaluate_pixel[j].channel[k]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
register const Quantum
*p;
register ssize_t
i;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,x,y,1,1,exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait evaluate_traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(evaluate_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
evaluate_pixel[j].channel[i]=ApplyEvaluateOperator(
random_info[id],GetPixelChannel(next,channel,p),op,
evaluate_pixel[j].channel[i]);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel),
IntensityCompare);
for (k=0; k < (ssize_t) GetPixelChannels(image); k++)
q[k]=ClampToQuantum(evaluate_pixel[j/2].channel[k]);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,EvaluateImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
else
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register ssize_t
i,
x;
register PixelChannels
*evaluate_pixel;
register Quantum
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_pixel=evaluate_pixels[id];
for (j=0; j < (ssize_t) image->columns; j++)
for (i=0; i < MaxPixelChannels; i++)
evaluate_pixel[j].channel[i]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
register const Quantum
*p;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(next); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait evaluate_traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(evaluate_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
evaluate_pixel[x].channel[i]=ApplyEvaluateOperator(
random_info[id],GetPixelChannel(next,channel,p),j == 0 ?
AddEvaluateOperator : op,evaluate_pixel[x].channel[i]);
}
p+=GetPixelChannels(next);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
switch (op)
{
case MeanEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
evaluate_pixel[x].channel[i]/=(double) number_images;
break;
}
case MultiplyEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) (number_images-1); j++)
evaluate_pixel[x].channel[i]*=QuantumScale;
}
break;
}
case RootMeanSquareEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
evaluate_pixel[x].channel[i]=sqrt(evaluate_pixel[x].channel[i]/
number_images);
break;
}
default:
break;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(evaluate_pixel[x].channel[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,EvaluateImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
evaluate_view=DestroyCacheView(evaluate_view);
evaluate_pixels=DestroyPixelThreadSet(images,evaluate_pixels);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
MagickExport MagickBooleanType EvaluateImage(Image *image,
const MagickEvaluateOperator op,const double value,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
result;
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
result=ApplyEvaluateOperator(random_info[id],q[i],op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
q[i]=ClampToQuantum(result);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EvaluateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F u n c t i o n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FunctionImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the FunctionImage method is:
%
% MagickBooleanType FunctionImage(Image *image,
% const MagickFunction function,const ssize_t number_parameters,
% const double *parameters,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o function: A channel function.
%
% o parameters: one or more parameters.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum ApplyFunction(Quantum pixel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
double
result;
register ssize_t
i;
(void) exception;
result=0.0;
switch (function)
{
case PolynomialFunction:
{
/*
Polynomial: polynomial constants, highest to lowest order (e.g. c0*x^3+
c1*x^2+c2*x+c3).
*/
result=0.0;
for (i=0; i < (ssize_t) number_parameters; i++)
result=result*QuantumScale*pixel+parameters[i];
result*=QuantumRange;
break;
}
case SinusoidFunction:
{
double
amplitude,
bias,
frequency,
phase;
/*
Sinusoid: frequency, phase, amplitude, bias.
*/
frequency=(number_parameters >= 1) ? parameters[0] : 1.0;
phase=(number_parameters >= 2) ? parameters[1] : 0.0;
amplitude=(number_parameters >= 3) ? parameters[2] : 0.5;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=(double) (QuantumRange*(amplitude*sin((double) (2.0*
MagickPI*(frequency*QuantumScale*pixel+phase/360.0)))+bias));
break;
}
case ArcsinFunction:
{
double
bias,
center,
range,
width;
/*
Arcsin (peged at range limits for invalid results): width, center,
range, and bias.
*/
width=(number_parameters >= 1) ? parameters[0] : 1.0;
center=(number_parameters >= 2) ? parameters[1] : 0.5;
range=(number_parameters >= 3) ? parameters[2] : 1.0;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=2.0/width*(QuantumScale*pixel-center);
if ( result <= -1.0 )
result=bias-range/2.0;
else
if (result >= 1.0)
result=bias+range/2.0;
else
result=(double) (range/MagickPI*asin((double) result)+bias);
result*=QuantumRange;
break;
}
case ArctanFunction:
{
double
center,
bias,
range,
slope;
/*
Arctan: slope, center, range, and bias.
*/
slope=(number_parameters >= 1) ? parameters[0] : 1.0;
center=(number_parameters >= 2) ? parameters[1] : 0.5;
range=(number_parameters >= 3) ? parameters[2] : 1.0;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=(double) (MagickPI*slope*(QuantumScale*pixel-center));
result=(double) (QuantumRange*(range/MagickPI*atan((double)
result)+bias));
break;
}
case UndefinedFunction:
break;
}
return(ClampToQuantum(result));
}
MagickExport MagickBooleanType FunctionImage(Image *image,
const MagickFunction function,const size_t number_parameters,
const double *parameters,ExceptionInfo *exception)
{
#define FunctionImageTag "Function/Image "
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateFunctionImage(image,function,number_parameters,parameters,
exception) != MagickFalse)
return(MagickTrue);
#endif
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ApplyFunction(q[i],function,number_parameters,parameters,
exception);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FunctionImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E n t r o p y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageEntropy() returns the entropy of one or more image channels.
%
% The format of the GetImageEntropy method is:
%
% MagickBooleanType GetImageEntropy(const Image *image,double *entropy,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o entropy: the average entropy of the selected channels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageEntropy(const Image *image,
double *entropy,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*entropy=channel_statistics[CompositePixelChannel].entropy;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtrema() returns the extrema of one or more image channels.
%
% The format of the GetImageExtrema method is:
%
% MagickBooleanType GetImageExtrema(const Image *image,size_t *minima,
% size_t *maxima,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageExtrema(const Image *image,
size_t *minima,size_t *maxima,ExceptionInfo *exception)
{
double
max,
min;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageRange(image,&min,&max,exception);
*minima=(size_t) ceil(min-0.5);
*maxima=(size_t) floor(max+0.5);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e K u r t o s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageKurtosis() returns the kurtosis and skewness of one or more image
% channels.
%
% The format of the GetImageKurtosis method is:
%
% MagickBooleanType GetImageKurtosis(const Image *image,double *kurtosis,
% double *skewness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o kurtosis: the kurtosis of the channel.
%
% o skewness: the skewness of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageKurtosis(const Image *image,
double *kurtosis,double *skewness,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*kurtosis=channel_statistics[CompositePixelChannel].kurtosis;
*skewness=channel_statistics[CompositePixelChannel].skewness;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M e a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMean() returns the mean and standard deviation of one or more image
% channels.
%
% The format of the GetImageMean method is:
%
% MagickBooleanType GetImageMean(const Image *image,double *mean,
% double *standard_deviation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mean: the average value in the channel.
%
% o standard_deviation: the standard deviation of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean,
double *standard_deviation,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*mean=channel_statistics[CompositePixelChannel].mean;
*standard_deviation=
channel_statistics[CompositePixelChannel].standard_deviation;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M o m e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMoments() returns the normalized moments of one or more image
% channels.
%
% The format of the GetImageMoments method is:
%
% ChannelMoments *GetImageMoments(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t GetImageChannels(const Image *image)
{
register ssize_t
i;
size_t
channels;
channels=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
channels++;
}
return((size_t) (channels == 0 ? 1 : channels));
}
MagickExport ChannelMoments *GetImageMoments(const Image *image,
ExceptionInfo *exception)
{
#define MaxNumberImageMoments 8
CacheView
*image_view;
ChannelMoments
*channel_moments;
double
M00[MaxPixelChannels+1],
M01[MaxPixelChannels+1],
M02[MaxPixelChannels+1],
M03[MaxPixelChannels+1],
M10[MaxPixelChannels+1],
M11[MaxPixelChannels+1],
M12[MaxPixelChannels+1],
M20[MaxPixelChannels+1],
M21[MaxPixelChannels+1],
M22[MaxPixelChannels+1],
M30[MaxPixelChannels+1];
PointInfo
centroid[MaxPixelChannels+1];
ssize_t
channel,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_moments=(ChannelMoments *) AcquireQuantumMemory(MaxPixelChannels+1,
sizeof(*channel_moments));
if (channel_moments == (ChannelMoments *) NULL)
return(channel_moments);
(void) memset(channel_moments,0,(MaxPixelChannels+1)*
sizeof(*channel_moments));
(void) memset(centroid,0,sizeof(centroid));
(void) memset(M00,0,sizeof(M00));
(void) memset(M01,0,sizeof(M01));
(void) memset(M02,0,sizeof(M02));
(void) memset(M03,0,sizeof(M03));
(void) memset(M10,0,sizeof(M10));
(void) memset(M11,0,sizeof(M11));
(void) memset(M12,0,sizeof(M12));
(void) memset(M20,0,sizeof(M20));
(void) memset(M21,0,sizeof(M21));
(void) memset(M22,0,sizeof(M22));
(void) memset(M30,0,sizeof(M30));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute center of mass (centroid).
*/
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
M00[channel]+=QuantumScale*p[i];
M00[MaxPixelChannels]+=QuantumScale*p[i];
M10[channel]+=x*QuantumScale*p[i];
M10[MaxPixelChannels]+=x*QuantumScale*p[i];
M01[channel]+=y*QuantumScale*p[i];
M01[MaxPixelChannels]+=y*QuantumScale*p[i];
}
p+=GetPixelChannels(image);
}
}
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute center of mass (centroid).
*/
if (M00[channel] < MagickEpsilon)
{
M00[channel]+=MagickEpsilon;
centroid[channel].x=(double) image->columns/2.0;
centroid[channel].y=(double) image->rows/2.0;
continue;
}
M00[channel]+=MagickEpsilon;
centroid[channel].x=M10[channel]/M00[channel];
centroid[channel].y=M01[channel]/M00[channel];
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute the image moments.
*/
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
M11[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
QuantumScale*p[i];
M11[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
QuantumScale*p[i];
M20[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
QuantumScale*p[i];
M20[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
QuantumScale*p[i];
M02[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
QuantumScale*p[i];
M02[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
QuantumScale*p[i];
M21[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*QuantumScale*p[i];
M21[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*QuantumScale*p[i];
M12[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M12[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M22[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i];
M22[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i];
M30[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(x-centroid[channel].x)*QuantumScale*p[i];
M30[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(x-centroid[channel].x)*QuantumScale*p[i];
M03[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M03[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
}
p+=GetPixelChannels(image);
}
}
M00[MaxPixelChannels]/=GetImageChannels(image);
M01[MaxPixelChannels]/=GetImageChannels(image);
M02[MaxPixelChannels]/=GetImageChannels(image);
M03[MaxPixelChannels]/=GetImageChannels(image);
M10[MaxPixelChannels]/=GetImageChannels(image);
M11[MaxPixelChannels]/=GetImageChannels(image);
M12[MaxPixelChannels]/=GetImageChannels(image);
M20[MaxPixelChannels]/=GetImageChannels(image);
M21[MaxPixelChannels]/=GetImageChannels(image);
M22[MaxPixelChannels]/=GetImageChannels(image);
M30[MaxPixelChannels]/=GetImageChannels(image);
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute elliptical angle, major and minor axes, eccentricity, & intensity.
*/
channel_moments[channel].centroid=centroid[channel];
channel_moments[channel].ellipse_axis.x=sqrt((2.0/M00[channel])*
((M20[channel]+M02[channel])+sqrt(4.0*M11[channel]*M11[channel]+
(M20[channel]-M02[channel])*(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_axis.y=sqrt((2.0/M00[channel])*
((M20[channel]+M02[channel])-sqrt(4.0*M11[channel]*M11[channel]+
(M20[channel]-M02[channel])*(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_angle=RadiansToDegrees(0.5*atan(2.0*
M11[channel]/(M20[channel]-M02[channel]+MagickEpsilon)));
if (fabs(M11[channel]) < MagickEpsilon)
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=0.0;
}
else
if (M11[channel] < 0.0)
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=180.0;
}
else
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=0.0;
}
channel_moments[channel].ellipse_eccentricity=sqrt(1.0-(
channel_moments[channel].ellipse_axis.y/
(channel_moments[channel].ellipse_axis.x+MagickEpsilon)));
channel_moments[channel].ellipse_intensity=M00[channel]/
(MagickPI*channel_moments[channel].ellipse_axis.x*
channel_moments[channel].ellipse_axis.y+MagickEpsilon);
}
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Normalize image moments.
*/
M10[channel]=0.0;
M01[channel]=0.0;
M11[channel]/=pow(M00[channel],1.0+(1.0+1.0)/2.0);
M20[channel]/=pow(M00[channel],1.0+(2.0+0.0)/2.0);
M02[channel]/=pow(M00[channel],1.0+(0.0+2.0)/2.0);
M21[channel]/=pow(M00[channel],1.0+(2.0+1.0)/2.0);
M12[channel]/=pow(M00[channel],1.0+(1.0+2.0)/2.0);
M22[channel]/=pow(M00[channel],1.0+(2.0+2.0)/2.0);
M30[channel]/=pow(M00[channel],1.0+(3.0+0.0)/2.0);
M03[channel]/=pow(M00[channel],1.0+(0.0+3.0)/2.0);
M00[channel]=1.0;
}
image_view=DestroyCacheView(image_view);
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute Hu invariant moments.
*/
channel_moments[channel].invariant[0]=M20[channel]+M02[channel];
channel_moments[channel].invariant[1]=(M20[channel]-M02[channel])*
(M20[channel]-M02[channel])+4.0*M11[channel]*M11[channel];
channel_moments[channel].invariant[2]=(M30[channel]-3.0*M12[channel])*
(M30[channel]-3.0*M12[channel])+(3.0*M21[channel]-M03[channel])*
(3.0*M21[channel]-M03[channel]);
channel_moments[channel].invariant[3]=(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])+(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]);
channel_moments[channel].invariant[4]=(M30[channel]-3.0*M12[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))+(3.0*M21[channel]-M03[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].invariant[5]=(M20[channel]-M02[channel])*
((M30[channel]+M12[channel])*(M30[channel]+M12[channel])-
(M21[channel]+M03[channel])*(M21[channel]+M03[channel]))+
4.0*M11[channel]*(M30[channel]+M12[channel])*(M21[channel]+M03[channel]);
channel_moments[channel].invariant[6]=(3.0*M21[channel]-M03[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))-(M30[channel]-3*M12[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].invariant[7]=M11[channel]*((M30[channel]+
M12[channel])*(M30[channel]+M12[channel])-(M03[channel]+M21[channel])*
(M03[channel]+M21[channel]))-(M20[channel]-M02[channel])*
(M30[channel]+M12[channel])*(M03[channel]+M21[channel]);
}
if (y < (ssize_t) image->rows)
channel_moments=(ChannelMoments *) RelinquishMagickMemory(channel_moments);
return(channel_moments);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l P e r c e p t u a l H a s h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePerceptualHash() returns the perceptual hash of one or more
% image channels.
%
% The format of the GetImagePerceptualHash method is:
%
% ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
ExceptionInfo *exception)
{
ChannelPerceptualHash
*perceptual_hash;
char
*colorspaces,
*q;
const char
*artifact;
MagickBooleanType
status;
register char
*p;
register ssize_t
i;
perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory(
MaxPixelChannels+1UL,sizeof(*perceptual_hash));
if (perceptual_hash == (ChannelPerceptualHash *) NULL)
return((ChannelPerceptualHash *) NULL);
artifact=GetImageArtifact(image,"phash:colorspaces");
if (artifact != NULL)
colorspaces=AcquireString(artifact);
else
colorspaces=AcquireString("sRGB,HCLp");
perceptual_hash[0].number_colorspaces=0;
perceptual_hash[0].number_channels=0;
q=colorspaces;
for (i=0; (p=StringToken(",",&q)) != (char *) NULL; i++)
{
ChannelMoments
*moments;
Image
*hash_image;
size_t
j;
ssize_t
channel,
colorspace;
if (i >= MaximumNumberOfPerceptualColorspaces)
break;
colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,p);
if (colorspace < 0)
break;
perceptual_hash[0].colorspace[i]=(ColorspaceType) colorspace;
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
break;
hash_image->depth=8;
status=TransformImageColorspace(hash_image,(ColorspaceType) colorspace,
exception);
if (status == MagickFalse)
break;
moments=GetImageMoments(hash_image,exception);
perceptual_hash[0].number_colorspaces++;
perceptual_hash[0].number_channels+=GetImageChannels(hash_image);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
break;
for (channel=0; channel <= MaxPixelChannels; channel++)
for (j=0; j < MaximumNumberOfImageMoments; j++)
perceptual_hash[channel].phash[i][j]=
(-MagickLog10(moments[channel].invariant[j]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
}
colorspaces=DestroyString(colorspaces);
return(perceptual_hash);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e R a n g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageRange() returns the range of one or more image channels.
%
% The format of the GetImageRange method is:
%
% MagickBooleanType GetImageRange(const Image *image,double *minima,
% double *maxima,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageRange(const Image *image,double *minima,
double *maxima,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
initialize,
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
initialize=MagickTrue;
*maxima=0.0;
*minima=0.0;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status,initialize) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
row_maxima = 0.0,
row_minima = 0.0;
MagickBooleanType
row_initialize;
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
row_initialize=MagickTrue;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
if (row_initialize != MagickFalse)
{
row_minima=(double) p[i];
row_maxima=(double) p[i];
row_initialize=MagickFalse;
}
else
{
if ((double) p[i] < row_minima)
row_minima=(double) p[i];
if ((double) p[i] > row_maxima)
row_maxima=(double) p[i];
}
}
p+=GetPixelChannels(image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetImageRange)
#endif
{
if (initialize != MagickFalse)
{
*minima=row_minima;
*maxima=row_maxima;
initialize=MagickFalse;
}
else
{
if (row_minima < *minima)
*minima=row_minima;
if (row_maxima > *maxima)
*maxima=row_maxima;
}
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e S t a t i s t i c s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageStatistics() returns statistics for each channel in the image. The
% statistics include the channel depth, its minima, maxima, mean, standard
% deviation, kurtosis and skewness. You can access the red channel mean, for
% example, like this:
%
% channel_statistics=GetImageStatistics(image,exception);
% red_mean=channel_statistics[RedPixelChannel].mean;
%
% Use MagickRelinquishMemory() to free the statistics buffer.
%
% The format of the GetImageStatistics method is:
%
% ChannelStatistics *GetImageStatistics(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ChannelStatistics *GetImageStatistics(const Image *image,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
double
area,
*histogram,
standard_deviation;
MagickStatusType
status;
QuantumAny
range;
register ssize_t
i;
size_t
depth;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)*
sizeof(*histogram));
channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(
MaxPixelChannels+1,sizeof(*channel_statistics));
if ((channel_statistics == (ChannelStatistics *) NULL) ||
(histogram == (double *) NULL))
{
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (channel_statistics != (ChannelStatistics *) NULL)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
(void) memset(channel_statistics,0,(MaxPixelChannels+1)*
sizeof(*channel_statistics));
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
channel_statistics[i].depth=1;
channel_statistics[i].maxima=(-MagickMaximumValue);
channel_statistics[i].minima=MagickMaximumValue;
}
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute pixel statistics.
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
if (channel_statistics[channel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[channel].depth;
range=GetQuantumRange(depth);
status=p[i] != ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),
range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[channel].depth++;
i--;
continue;
}
}
if ((double) p[i] < channel_statistics[channel].minima)
channel_statistics[channel].minima=(double) p[i];
if ((double) p[i] > channel_statistics[channel].maxima)
channel_statistics[channel].maxima=(double) p[i];
channel_statistics[channel].sum+=p[i];
channel_statistics[channel].sum_squared+=(double) p[i]*p[i];
channel_statistics[channel].sum_cubed+=(double) p[i]*p[i]*p[i];
channel_statistics[channel].sum_fourth_power+=(double) p[i]*p[i]*p[i]*
p[i];
channel_statistics[channel].area++;
if ((double) p[i] < channel_statistics[CompositePixelChannel].minima)
channel_statistics[CompositePixelChannel].minima=(double) p[i];
if ((double) p[i] > channel_statistics[CompositePixelChannel].maxima)
channel_statistics[CompositePixelChannel].maxima=(double) p[i];
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum((double) p[i]))+i]++;
channel_statistics[CompositePixelChannel].sum+=(double) p[i];
channel_statistics[CompositePixelChannel].sum_squared+=(double)
p[i]*p[i];
channel_statistics[CompositePixelChannel].sum_cubed+=(double)
p[i]*p[i]*p[i];
channel_statistics[CompositePixelChannel].sum_fourth_power+=(double)
p[i]*p[i]*p[i]*p[i];
channel_statistics[CompositePixelChannel].area++;
}
p+=GetPixelChannels(image);
}
}
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
/*
Normalize pixel statistics.
*/
area=PerceptibleReciprocal(channel_statistics[i].area);
channel_statistics[i].sum*=area;
channel_statistics[i].sum_squared*=area;
channel_statistics[i].sum_cubed*=area;
channel_statistics[i].sum_fourth_power*=area;
channel_statistics[i].mean=channel_statistics[i].sum;
channel_statistics[i].variance=channel_statistics[i].sum_squared;
standard_deviation=sqrt(channel_statistics[i].variance-
(channel_statistics[i].mean*channel_statistics[i].mean));
standard_deviation=sqrt(PerceptibleReciprocal(channel_statistics[i].area-
1.0)*channel_statistics[i].area*standard_deviation*standard_deviation);
channel_statistics[i].standard_deviation=standard_deviation;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
number_bins;
register ssize_t
j;
/*
Compute pixel entropy.
*/
PixelChannel channel = GetPixelChannelChannel(image,i);
number_bins=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
if (histogram[GetPixelChannels(image)*j+i] > 0.0)
number_bins++;
area=PerceptibleReciprocal(channel_statistics[channel].area);
for (j=0; j <= (ssize_t) MaxMap; j++)
{
double
count;
count=area*histogram[GetPixelChannels(image)*j+i];
channel_statistics[channel].entropy+=-count*MagickLog10(count)*
PerceptibleReciprocal(MagickLog10(number_bins));
channel_statistics[CompositePixelChannel].entropy+=-count*
MagickLog10(count)*PerceptibleReciprocal(MagickLog10(number_bins))/
GetPixelChannels(image);
}
}
histogram=(double *) RelinquishMagickMemory(histogram);
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
/*
Compute kurtosis & skewness statistics.
*/
standard_deviation=PerceptibleReciprocal(
channel_statistics[i].standard_deviation);
channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-3.0*
channel_statistics[i].mean*channel_statistics[i].sum_squared+2.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation);
channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-4.0*
channel_statistics[i].mean*channel_statistics[i].sum_cubed+6.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean*
channel_statistics[i].mean*1.0*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation*standard_deviation)-3.0;
}
channel_statistics[CompositePixelChannel].mean=0.0;
channel_statistics[CompositePixelChannel].standard_deviation=0.0;
channel_statistics[CompositePixelChannel].entropy=0.0;
for (i=0; i < (ssize_t) MaxPixelChannels; i++)
{
channel_statistics[CompositePixelChannel].mean+=
channel_statistics[i].mean;
channel_statistics[CompositePixelChannel].standard_deviation+=
channel_statistics[i].standard_deviation;
channel_statistics[CompositePixelChannel].entropy+=
channel_statistics[i].entropy;
}
channel_statistics[CompositePixelChannel].mean/=(double)
GetImageChannels(image);
channel_statistics[CompositePixelChannel].standard_deviation/=(double)
GetImageChannels(image);
channel_statistics[CompositePixelChannel].entropy/=(double)
GetImageChannels(image);
if (y < (ssize_t) image->rows)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l y n o m i a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolynomialImage() returns a new image where each pixel is the sum of the
% pixels in the image sequence after applying its corresponding terms
% (coefficient and degree pairs).
%
% The format of the PolynomialImage method is:
%
% Image *PolynomialImage(const Image *images,const size_t number_terms,
% const double *terms,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o number_terms: the number of terms in the list. The actual list length
% is 2 x number_terms + 1 (the constant).
%
% o terms: the list of polynomial coefficients and degree pairs and a
% constant.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolynomialImage(const Image *images,
const size_t number_terms,const double *terms,ExceptionInfo *exception)
{
#define PolynomialImageTag "Polynomial/Image"
CacheView
*polynomial_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelChannels
**magick_restrict polynomial_pixels;
size_t
number_images;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
polynomial_pixels=AcquirePixelThreadSet(images);
if (polynomial_pixels == (PixelChannels **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Polynomial image pixels.
*/
status=MagickTrue;
progress=0;
polynomial_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register ssize_t
i,
x;
register PixelChannels
*polynomial_pixel;
register Quantum
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
polynomial_pixel=polynomial_pixels[id];
for (j=0; j < (ssize_t) image->columns; j++)
for (i=0; i < MaxPixelChannels; i++)
polynomial_pixel[j].channel[i]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
register const Quantum
*p;
if (j >= (ssize_t) number_terms)
continue;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(next); i++)
{
MagickRealType
coefficient,
degree;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait polynomial_traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(polynomial_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
coefficient=(MagickRealType) terms[2*j];
degree=(MagickRealType) terms[(j << 1)+1];
polynomial_pixel[x].channel[i]+=coefficient*
pow(QuantumScale*GetPixelChannel(image,channel,p),degree);
}
p+=GetPixelChannels(next);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumRange*polynomial_pixel[x].channel[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,PolynomialImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
polynomial_view=DestroyCacheView(polynomial_view);
polynomial_pixels=DestroyPixelThreadSet(images,polynomial_pixels);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t a t i s t i c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StatisticImage() makes each pixel the min / max / median / mode / etc. of
% the neighborhood of the specified width and height.
%
% The format of the StatisticImage method is:
%
% Image *StatisticImage(const Image *image,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the statistic type (median, mode, etc.).
%
% o width: the width of the pixel neighborhood.
%
% o height: the height of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _SkipNode
{
size_t
next[9],
count,
signature;
} SkipNode;
typedef struct _SkipList
{
ssize_t
level;
SkipNode
*nodes;
} SkipList;
typedef struct _PixelList
{
size_t
length,
seed;
SkipList
skip_list;
size_t
signature;
} PixelList;
static PixelList *DestroyPixelList(PixelList *pixel_list)
{
if (pixel_list == (PixelList *) NULL)
return((PixelList *) NULL);
if (pixel_list->skip_list.nodes != (SkipNode *) NULL)
pixel_list->skip_list.nodes=(SkipNode *) RelinquishAlignedMemory(
pixel_list->skip_list.nodes);
pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list)
{
register ssize_t
i;
assert(pixel_list != (PixelList **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixel_list[i] != (PixelList *) NULL)
pixel_list[i]=DestroyPixelList(pixel_list[i]);
pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList *AcquirePixelList(const size_t width,const size_t height)
{
PixelList
*pixel_list;
pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list));
if (pixel_list == (PixelList *) NULL)
return(pixel_list);
(void) memset((void *) pixel_list,0,sizeof(*pixel_list));
pixel_list->length=width*height;
pixel_list->skip_list.nodes=(SkipNode *) AcquireAlignedMemory(65537UL,
sizeof(*pixel_list->skip_list.nodes));
if (pixel_list->skip_list.nodes == (SkipNode *) NULL)
return(DestroyPixelList(pixel_list));
(void) memset(pixel_list->skip_list.nodes,0,65537UL*
sizeof(*pixel_list->skip_list.nodes));
pixel_list->signature=MagickCoreSignature;
return(pixel_list);
}
static PixelList **AcquirePixelListThreadSet(const size_t width,
const size_t height)
{
PixelList
**pixel_list;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixel_list=(PixelList **) AcquireQuantumMemory(number_threads,
sizeof(*pixel_list));
if (pixel_list == (PixelList **) NULL)
return((PixelList **) NULL);
(void) memset(pixel_list,0,number_threads*sizeof(*pixel_list));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_list[i]=AcquirePixelList(width,height);
if (pixel_list[i] == (PixelList *) NULL)
return(DestroyPixelListThreadSet(pixel_list));
}
return(pixel_list);
}
static void AddNodePixelList(PixelList *pixel_list,const size_t color)
{
register SkipList
*p;
register ssize_t
level;
size_t
search,
update[9];
/*
Initialize the node.
*/
p=(&pixel_list->skip_list);
p->nodes[color].signature=pixel_list->signature;
p->nodes[color].count=1;
/*
Determine where it belongs in the list.
*/
search=65536UL;
for (level=p->level; level >= 0; level--)
{
while (p->nodes[search].next[level] < color)
search=p->nodes[search].next[level];
update[level]=search;
}
/*
Generate a pseudo-random level for this node.
*/
for (level=0; ; level++)
{
pixel_list->seed=(pixel_list->seed*42893621L)+1L;
if ((pixel_list->seed & 0x300) != 0x300)
break;
}
if (level > 8)
level=8;
if (level > (p->level+2))
level=p->level+2;
/*
If we're raising the list's level, link back to the root node.
*/
while (level > p->level)
{
p->level++;
update[p->level]=65536UL;
}
/*
Link the node into the skip-list.
*/
do
{
p->nodes[color].next[level]=p->nodes[update[level]].next[level];
p->nodes[update[level]].next[level]=color;
} while (level-- > 0);
}
static inline void GetMaximumPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
maximum;
ssize_t
count;
/*
Find the maximum value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
maximum=p->nodes[color].next[0];
do
{
color=p->nodes[color].next[0];
if (color > maximum)
maximum=color;
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) maximum);
}
static inline void GetMeanPixelList(PixelList *pixel_list,Quantum *pixel)
{
double
sum;
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the mean value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
sum=0.0;
do
{
color=p->nodes[color].next[0];
sum+=(double) p->nodes[color].count*color;
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
*pixel=ScaleShortToQuantum((unsigned short) sum);
}
static inline void GetMedianPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the median value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
do
{
color=p->nodes[color].next[0];
count+=p->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
*pixel=ScaleShortToQuantum((unsigned short) color);
}
static inline void GetMinimumPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
minimum;
ssize_t
count;
/*
Find the minimum value for each of the color.
*/
p=(&pixel_list->skip_list);
count=0;
color=65536UL;
minimum=p->nodes[color].next[0];
do
{
color=p->nodes[color].next[0];
if (color < minimum)
minimum=color;
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) minimum);
}
static inline void GetModePixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
max_count,
mode;
ssize_t
count;
/*
Make each pixel the 'predominant color' of the specified neighborhood.
*/
p=(&pixel_list->skip_list);
color=65536L;
mode=color;
max_count=p->nodes[mode].count;
count=0;
do
{
color=p->nodes[color].next[0];
if (p->nodes[color].count > max_count)
{
mode=color;
max_count=p->nodes[mode].count;
}
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) mode);
}
static inline void GetNonpeakPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
next,
previous;
ssize_t
count;
/*
Finds the non peak value for each of the colors.
*/
p=(&pixel_list->skip_list);
color=65536L;
next=p->nodes[color].next[0];
count=0;
do
{
previous=color;
color=next;
next=p->nodes[color].next[0];
count+=p->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
if ((previous == 65536UL) && (next != 65536UL))
color=next;
else
if ((previous != 65536UL) && (next == 65536UL))
color=previous;
*pixel=ScaleShortToQuantum((unsigned short) color);
}
static inline void GetRootMeanSquarePixelList(PixelList *pixel_list,
Quantum *pixel)
{
double
sum;
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the root mean square value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
sum=0.0;
do
{
color=p->nodes[color].next[0];
sum+=(double) (p->nodes[color].count*color*color);
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
*pixel=ScaleShortToQuantum((unsigned short) sqrt(sum));
}
static inline void GetStandardDeviationPixelList(PixelList *pixel_list,
Quantum *pixel)
{
double
sum,
sum_squared;
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the standard-deviation value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
sum=0.0;
sum_squared=0.0;
do
{
register ssize_t
i;
color=p->nodes[color].next[0];
sum+=(double) p->nodes[color].count*color;
for (i=0; i < (ssize_t) p->nodes[color].count; i++)
sum_squared+=((double) color)*((double) color);
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
sum_squared/=pixel_list->length;
*pixel=ScaleShortToQuantum((unsigned short) sqrt(sum_squared-(sum*sum)));
}
static inline void InsertPixelList(const Quantum pixel,PixelList *pixel_list)
{
size_t
signature;
unsigned short
index;
index=ScaleQuantumToShort(pixel);
signature=pixel_list->skip_list.nodes[index].signature;
if (signature == pixel_list->signature)
{
pixel_list->skip_list.nodes[index].count++;
return;
}
AddNodePixelList(pixel_list,index);
}
static void ResetPixelList(PixelList *pixel_list)
{
int
level;
register SkipNode
*root;
register SkipList
*p;
/*
Reset the skip-list.
*/
p=(&pixel_list->skip_list);
root=p->nodes+65536UL;
p->level=0;
for (level=0; level < 9; level++)
root->next[level]=65536UL;
pixel_list->seed=pixel_list->signature++;
}
MagickExport Image *StatisticImage(const Image *image,const StatisticType type,
const size_t width,const size_t height,ExceptionInfo *exception)
{
#define StatisticImageTag "Statistic/Image"
CacheView
*image_view,
*statistic_view;
Image
*statistic_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelList
**magick_restrict pixel_list;
ssize_t
center,
y;
/*
Initialize statistics image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
statistic_image=CloneImage(image,0,0,MagickTrue,
exception);
if (statistic_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(statistic_image,DirectClass,exception);
if (status == MagickFalse)
{
statistic_image=DestroyImage(statistic_image);
return((Image *) NULL);
}
pixel_list=AcquirePixelListThreadSet(MagickMax(width,1),MagickMax(height,1));
if (pixel_list == (PixelList **) NULL)
{
statistic_image=DestroyImage(statistic_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Make each pixel the min / max / median / mode / etc. of the neighborhood.
*/
center=(ssize_t) GetPixelChannels(image)*(image->columns+MagickMax(width,1))*
(MagickMax(height,1)/2L)+GetPixelChannels(image)*(MagickMax(width,1)/2L);
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
statistic_view=AcquireAuthenticCacheView(statistic_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,statistic_image,statistic_image->rows,1)
#endif
for (y=0; y < (ssize_t) statistic_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) MagickMax(width,1)/2L),y-
(ssize_t) (MagickMax(height,1)/2L),image->columns+MagickMax(width,1),
MagickMax(height,1),exception);
q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) statistic_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
Quantum
pixel;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait statistic_traits=GetPixelChannelTraits(statistic_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(statistic_traits == UndefinedPixelTrait))
continue;
if (((statistic_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) <= (QuantumRange/2)))
{
SetPixelChannel(statistic_image,channel,p[center+i],q);
continue;
}
if ((statistic_traits & UpdatePixelTrait) == 0)
continue;
pixels=p;
ResetPixelList(pixel_list[id]);
for (v=0; v < (ssize_t) MagickMax(height,1); v++)
{
for (u=0; u < (ssize_t) MagickMax(width,1); u++)
{
InsertPixelList(pixels[i],pixel_list[id]);
pixels+=GetPixelChannels(image);
}
pixels+=GetPixelChannels(image)*image->columns;
}
switch (type)
{
case GradientStatistic:
{
double
maximum,
minimum;
GetMinimumPixelList(pixel_list[id],&pixel);
minimum=(double) pixel;
GetMaximumPixelList(pixel_list[id],&pixel);
maximum=(double) pixel;
pixel=ClampToQuantum(MagickAbsoluteValue(maximum-minimum));
break;
}
case MaximumStatistic:
{
GetMaximumPixelList(pixel_list[id],&pixel);
break;
}
case MeanStatistic:
{
GetMeanPixelList(pixel_list[id],&pixel);
break;
}
case MedianStatistic:
default:
{
GetMedianPixelList(pixel_list[id],&pixel);
break;
}
case MinimumStatistic:
{
GetMinimumPixelList(pixel_list[id],&pixel);
break;
}
case ModeStatistic:
{
GetModePixelList(pixel_list[id],&pixel);
break;
}
case NonpeakStatistic:
{
GetNonpeakPixelList(pixel_list[id],&pixel);
break;
}
case RootMeanSquareStatistic:
{
GetRootMeanSquarePixelList(pixel_list[id],&pixel);
break;
}
case StandardDeviationStatistic:
{
GetStandardDeviationPixelList(pixel_list[id],&pixel);
break;
}
}
SetPixelChannel(statistic_image,channel,pixel,q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(statistic_image);
}
if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,StatisticImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
statistic_view=DestroyCacheView(statistic_view);
image_view=DestroyCacheView(image_view);
pixel_list=DestroyPixelListThreadSet(pixel_list);
if (status == MagickFalse)
statistic_image=DestroyImage(statistic_image);
return(statistic_image);
}
|
demo.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(void)
{
int a = 2;
#pragma omp parallel private(a)
{
int numThreads = omp_get_num_threads();
a = 3;
printf("Hello from %d/%d thread\n", omp_get_thread_num(), numThreads);
}
printf("[%d]: %d\n", omp_get_thread_num(), a);
return 0;
} |
matMul.c | /*
Test and timing harness program for developing a dense matrix multiplication
routine for the CS3014 module.
Authors: Basil L. Contovounesios
Ben Lynch
Simon Markham
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <assert.h>
#include <omp.h>
#include <xmmintrin.h>
#include <pthread.h>
/*
Uncomment the following to use sysconf(_SC_NPROCESSORS_ONLN) to determine
number of online cores on the system.
*/
// #include <unistd.h>
/*
The following two definitions of DEBUGGING control whether or not debugging
information is written out. Defining DEBUG, e.g. via preprocessor options
at compilation, puts the program into debugging mode.
*/
#ifdef DEBUG
#define DEBUGGING(_x) _x
#else
#define DEBUGGING(_x)
#endif
/*
The following definition reflects the number of online cores on the system
and determines the maximum number of pthreads created. It defaults to 64,
which is the number of cores on the target machine stoker. It is intended to
be defined at compilation via a preprocessor option if run on a different
target.
*/
#ifndef NCORES
#define NCORES 64
#endif
/*
Complex number unit stored in matrices.
*/
struct complex {
float real;
float imag;
};
/*
Write matrix to stdout.
*/
void write_out(struct complex ** a, int dim1, int dim2) {
for (int i = 0; (i < dim1); i++) {
for (int j = 0; (j < dim2 - 1); j++) {
printf("%f + %fi ", a[i][j].real, a[i][j].imag);
}
printf("%f + %fi\n", a[i][dim2 - 1].real, a[i][dim2 - 1].imag);
}
}
/*
Create new empty matrix.
*/
struct complex ** new_empty_matrix(int dim1, int dim2) {
struct complex ** result = malloc(sizeof(struct complex*) * dim1);
struct complex * new_matrix = malloc(sizeof(struct complex) * dim1 * dim2);
for (int i = 0; (i < dim1); i++) {
result[i] = &new_matrix[i * dim2];
}
return result;
}
/*
Free matrix.
*/
void free_matrix(struct complex ** matrix) {
free (matrix[0]); // Free the contents
free (matrix); // Free the header
}
/*
Take a copy of the matrix and return in a newly allocated matrix.
*/
struct complex ** copy_matrix(struct complex ** source_matrix,
int dim1, int dim2) {
struct complex ** result = new_empty_matrix(dim1, dim2);
for (int i = 0; (i < dim1); i++) {
for (int j = 0; (j < dim2); j++) {
result[i][j] = source_matrix[i][j];
}
}
return result;
}
/*
Create a matrix and fill it with random numbers.
*/
struct complex ** gen_random_matrix(int dim1, int dim2) {
const int random_range = 512;
struct complex ** result;
struct timeval seedtime;
int seed;
result = new_empty_matrix(dim1, dim2);
// Use the microsecond part of the current time as a pseudo-random seed
gettimeofday(&seedtime, NULL);
seed = seedtime.tv_usec;
srandom(seed);
// Fill the matrix with random numbers
for (int i = 0; (i < dim1); i++) {
for (int j = 0; (j < dim2); j++) {
// Evenly generate values in the range [0, random_range - 1)
result[i][j].real = (float)(random() % random_range);
result[i][j].imag = (float)(random() % random_range);
// At no loss of precision, negate the values sometimes so the range is
// now (-(random_range - 1), random_range - 1)
if (random() & 1) result[i][j].real = -result[i][j].real;
if (random() & 1) result[i][j].imag = -result[i][j].imag;
}
}
return result;
}
/*
Check the sum of absolute differences is within reasonable epsilon.
*/
void check_result(struct complex ** result, struct complex ** control,
int dim1, int dim2) {
double diff = 0.0;
double sum_abs_diff = 0.0;
const double EPSILON = 0.0625;
for (int i = 0; (i < dim1); i++) {
for (int j = 0; (j < dim2); j++) {
diff = abs(control[i][j].real - result[i][j].real);
sum_abs_diff += diff;
diff = abs(control[i][j].imag - result[i][j].imag);
sum_abs_diff += diff;
}
}
if (sum_abs_diff > EPSILON) {
fprintf(stderr, "WARNING: sum of absolute differences (%f) > EPSILON (%f)\n",
sum_abs_diff, EPSILON);
}
}
/*
Multiply matrix A times matrix B and put result in matrix C.
*/
void matmul(struct complex ** A, struct complex ** B, struct complex ** C,
int a_dim1, int a_dim2, int b_dim2) {
struct complex sum;
for (int i = 0; (i < a_dim1); i++) {
for(int j = 0; (j < b_dim2); j++) {
sum = (struct complex){0.0, 0.0};
for (int k = 0; (k < a_dim2); k++) {
// The following code does: sum += A[i][k] * B[k][j];
sum.real += A[i][k].real * B[k][j].real - A[i][k].imag * B[k][j].imag;
sum.imag += A[i][k].real * B[k][j].imag + A[i][k].imag * B[k][j].real;
}
C[i][j] = sum;
}
}
}
/*
The fast version of matmul() written by the team.
*/
void team_matmul(struct complex ** A, struct complex ** B, struct complex ** C,
int a_dim1, int a_dim2, int b_dim2) {
struct complex sum;
#pragma omp parallel for if (a_dim1 >= NCORES)
for (int i = 0; i < a_dim1; i++) {
#pragma omp parallel for if (b_dim2 >= NCORES) private(sum)
for(int j = 0; j < b_dim2; j++) {
sum = (struct complex){0.0, 0.0};
for (int k = 0; k < a_dim2; k++) {
// The following code does: sum += A[i][k] * B[k][j];
sum.real += A[i][k].real * B[k][j].real - A[i][k].imag * B[k][j].imag;
sum.imag += A[i][k].real * B[k][j].imag + A[i][k].imag * B[k][j].real;
}
C[i][j] = sum;
}
}
}
/*
Returns the difference, in microseconds, between the two given times.
*/
long long time_diff(struct timeval * start, struct timeval *end) {
return ((end->tv_sec - start->tv_sec) * 1000000L) +
(end->tv_usec - start->tv_usec);
}
/*
Main harness.
*/
int main(int argc, char ** argv) {
struct complex ** A, ** B, ** C;
struct complex ** ctrl_matrix;
long long ctrl_time, mult_time;
int a_dim1, a_dim2, b_dim1, b_dim2;
struct timeval time0, time1, time2;
double speedup;
if (argc != 5) {
fputs("Usage: matMul <A nrows> <A ncols> <B nrows> <B ncols>\n", stderr);
exit(1);
} else {
a_dim1 = atoi(argv[1]);
a_dim2 = atoi(argv[2]);
b_dim1 = atoi(argv[3]);
b_dim2 = atoi(argv[4]);
}
// Check the matrix sizes are compatible
if (a_dim2 != b_dim1) {
fprintf(stderr, "FATAL number of columns of A (%d) does not "
"match number of rows of B (%d)\n", a_dim2, b_dim1);
exit(1);
}
// Allocate the matrices
A = gen_random_matrix(a_dim1, a_dim2);
B = gen_random_matrix(b_dim1, b_dim2);
C = new_empty_matrix(a_dim1, b_dim2);
ctrl_matrix = new_empty_matrix(a_dim1, b_dim2);
DEBUGGING({
puts("Matrix A:");
write_out(A, a_dim1, a_dim2);
puts("\nMatrix B:");
write_out(B, b_dim1, b_dim2);
puts("");
})
// Record control start time
gettimeofday(&time0, NULL);
// Use a simple matmul routine to produce control result
matmul(A, B, ctrl_matrix, a_dim1, a_dim2, b_dim2);
DEBUGGING( {
puts("Resultant matrix:");
write_out(ctrl_matrix, a_dim1, b_dim2);
} )
// Record start time
gettimeofday(&time1, NULL);
// Perform matrix multiplication
team_matmul(A, B, C, a_dim1, a_dim2, b_dim2);
// Record finishing time
gettimeofday(&time2, NULL);
// Compute elapsed times and speedup factor
ctrl_time = time_diff(&time0, &time1);
mult_time = time_diff(&time1, &time2);
speedup = (float)ctrl_time / mult_time;
printf("Control time: %lld μs\n", ctrl_time);
printf("Matmul time: %lld μs\n", mult_time);
if ((mult_time > 0) && (ctrl_time > 0)) {
printf("Speedup: %.2fx\n", speedup);
}
// Now check that team_matmul() gives the same answer as the control
check_result(C, ctrl_matrix, a_dim1, b_dim2);
DEBUGGING({
puts("Resultant matrix:");
write_out(C, a_dim1, b_dim2);
})
// Free all matrices
free_matrix(A);
free_matrix(B);
free_matrix(C);
free_matrix(ctrl_matrix);
return EXIT_SUCCESS;
}
|
convlayer.h | #ifndef CONVLAYER_H
#define CONVLAYER_H
#include "layer.h"
#include "vol.h"
#include "utils.h"
#include <boost/archive/text_oarchive.hpp>
#include <boost/archive/text_iarchive.hpp>
#include <boost/serialization/vector.hpp>
#include <boost/serialization/map.hpp>
#include <boost/serialization/utility.hpp>
#include <iostream>
#include <iomanip>
#include <algorithm>
#include <string>
#include <random>
#include <cmath>
#include <ctime>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/legacy/compat.hpp>
#ifndef NOT_USE
#define NOT_USE 2<<20
#endif
using namespace std;
template < typename FP >
class ConvLayer : public Layer<FP>{
private:
public:
int out_depth;
int out_sx;
int out_sy;
int sx;
int sy;
int in_depth;
int in_sx;
int in_sy;
int stride;
int pad;
FP l1_decay_mul;
FP l2_decay_mul;
string layer_type;
FP bias;
Vol<FP>* biases;
vector<Vol<FP>* > filters;
//Out:{d,x,y} In:{d,x,y} Conv:{stride,pad,l1_decay,l2_decay}
ConvLayer(int out_depth,int sx,int sy,int in_depth,int in_sx,int in_sy,int stride=1,int pad=0,FP l1_decay_mul=FP(0),FP l2_decay_mul=FP(1),FP bias_pref=FP(0)):sx(sx),sy(sy),in_depth(in_depth),in_sx(in_sx),in_sy(in_sy),stride(stride),pad(pad),l1_decay_mul(l1_decay_mul),l2_decay_mul(l2_decay_mul),layer_type("conv"),bias(bias_pref),biases(NULL){
this->in_act=NULL;
this->out_act=NULL;
//cout << "cv 0" << endl;
this->out_sx = floor( (this->in_sx + this->pad * 2 - this->sx) / this->stride + 1 );
this->out_sy = floor( (this->in_sy + this->pad * 2 - this->sy) / this->stride + 1 );
//cout << "cv 1" << endl;
this->bias = bias_pref;
//cout << "cv 2" << endl;
this->out_depth=out_depth;
for(int i=0;i<this->out_depth;i++){
//cout << "cv 2.1" << endl;
this->filters.push_back(new Vol<FP>(this->sx,this->sy,this->in_depth));//cout << "cv 2.2" << endl;
}
//cout << "cv 3" << endl;
this->biases = new Vol<FP>(1, 1, this->out_depth , FP(bias) );
//cout << "cv 4" << endl;
}
void dtor(){
cout << "clearrr" << endl;
if(this->biases != NULL){delete this->biases;this->biases=NULL;}
//cout << "clearrr1" << endl;
for(int i=0;i<this->filters.size();i++)
delete this->filters[i];
//cout << "clearrr2" << endl;
this->filters.clear();
//cout << "clearrr3" << endl;
if(this->in_act != NULL){delete this->in_act;this->in_act=NULL;}
//cout << "clearrr5" << endl;
if(this->out_act != NULL){delete this->out_act;this->out_act=NULL;}
//cout << "clearrr4" << endl;
}
~ConvLayer(){
dtor();
}
vector<FP> get_all_w(){
vector<FP> out;
Vol<FP>* V;
vector< Vol<FP>* > list;
for(int q=0;q<this->filters.size();q++)
list.push_back(this->filters[q]);
list.push_back(this->biases);
for(int z=0;z<list.size();z++){
V=list[z];
int size=V->w.size();
//cout << size << endl;
for(int q=0;q<size;q++){
out.push_back(V->w[q]);
}
}
return out;
}
void set_all_w(vector<FP> aw){
Vol<FP>* V;
vector< Vol<FP>* > list;
vector<int> slist;
int as=0;
for(int q=0;q<this->filters.size();q++){
list.push_back(this->filters[q]);
slist.push_back(this->filters[q]->w.size());
as+=this->filters[q]->w.size();
}
slist.push_back(this->biases->w.size());
list.push_back(this->biases);
as+=this->biases->w.size();
for(int i=0,q=0;i<slist.size();i++){
V = list[i];
for(int j=0;j<slist[i];j++,q++){
V->w[j]=aw[q];
}
}
}
Vol<FP>* forward(Vol<FP>* V,bool is_training=false){
//cout << "feed a" << endl;
this->in_act = V;
//cout << "feed b" << endl;
Vol<FP>* A = new Vol<FP>(this->out_sx,this->out_sy,this->out_depth,FP(0.0));
//cout << "feed c" << endl;
int V_sx = V->sx;
int V_sy = V->sy;
int V_depth=V->depth;
int xy_stride = this->stride;
//cout << "feed ddd" << endl;
clock_t begin_time = clock();
//#pragma omp parallel for
for(int d=0;d<this->out_depth;d++){
Vol<FP>* f = this->filters[d];
int x = -this->pad;
int y = -this->pad;
int f_sx=f->sx;
int f_depth=f->depth;
for(int ay=0;ay<this->out_sy;y+=xy_stride,ay++){
x = -this->pad;
for(int ax=0;ax<this->out_sx;x+=xy_stride,ax++){
FP a(0);
for(int fy=0;fy<f->sy;fy++){
int oy = y+fy;
for(int fx=0;fx<f->sx;fx++){
int ox = x+fx;
if(oy>=0 && oy<V_sy && ox>=0 && ox<V_sx){
for(int fd=0;fd<f->depth;fd++){
a += f->w[((f_sx * fy)+fx)*f_depth + fd] * V->w[((V_sx * oy)+ox)*V_depth + fd];
}
}
}
}
a += this->biases->w[d];
A->set(ax,ay,d,a);
}
}
}
//std::cout << "ConvLayer : " << float( clock () - begin_time ) / CLOCKS_PER_SEC << endl;
//cout << "feed e" << endl;
if(this->out_act != NULL){delete this->out_act;this->out_act=NULL;}
//cout << "feed f" << endl;
this->out_act = A;
//cout << "feed g" << endl;
//cout << "feed h" << endl;
return this->out_act;
}
void backward(int tmpy=0){
Vol<FP>* V = this->in_act;
Utils<FP> ut;
V->dw = ut.zeros(V->w.size());
int V_sx = V->sx;
int V_sy = V->sy;
int xy_stride = this->stride;
for(int d=0;d<this->out_depth;d++){
Vol<FP>* f = this->filters[d];
int x = -this->pad;
int y = -this->pad;
for(int ay=0;ay<this->out_sy;y+=xy_stride,ay++){
x = -this->pad;
for(int ax=0;ax<this->out_sx;x+=xy_stride,ax++){
FP chain_grad = this->out_act->get_grad(ax,ay,d);
//if(abs(chain_grad)>0.001)cout << chain_grad << endl;
for(int fy=0;fy<f->sy;fy++){
int oy=y+fy;
for(int fx=0;fx<f->sx;fx++){
int ox=x+fx;
if(oy>=0 && oy<V_sy && ox>=0 && ox<V_sx){
for(int fd=0;fd<f->depth;fd++){
int ix1 = ((V_sx * oy)+ox)*V->depth + fd;
int ix2 = ((f->sx * fy)+fx)*f->depth + fd;
f->dw[ix2] += V->w[ix1]*chain_grad;
V->dw[ix1] += f->w[ix2]*chain_grad;
}
}
}
}
this->biases->dw[d] += chain_grad;
}
}
}
}
vector< map<string,vector<FP>* > > getParamsAndGrads(){
vector< map<string,vector<FP>* > > v;//cout << "!1" << endl;
/*for(int d=0;d<this->filters.size();d++){
for(int l=0;l<this->filters[d]->w.size();l++){
this->filters[d]->w[l]=FP(0.1);
}
}*/
for(int i=0;i<this->out_depth;i++){
map<string,vector<FP>* > m;
m["params"] = &this->filters[i]->w;
/*
vector<FP>& ww= *tt;
for(int l=0;l<ww.size();l++){
ww[l]=FP(0.1);
}*/
m["grads"] = &this->filters[i]->dw;
v.push_back(m);
}//cout << "!4" << endl;
map<string,vector<FP>* > m;
m["params"] = &this->biases->w;
m["grads"] = &this->biases->dw;
v.push_back(m);
return v;
}
string get_layer_type(){
return this->layer_type;
}
Vol<FP>* get_in_act(){
return this->in_act;
}
Vol<FP>* get_out_act(){
return this->out_act;
}
};
/**
* Mat::data Specification
* 2x2 1 channel
* [ R , R ;
* R , R ]
*
* 2x2 2 channel
* [ R , G , R , G ;
* R , G , R , G ]
*
* 2x2 3 channel
* [ R , G , B , R , G , B ;
* R , G , B . R , G , B ]
*/
#endif
|
c3_fmt.c | /*
* Generic crypt(3) support, as well as support for glibc's crypt_r(3) and
* Solaris' MT-safe crypt(3C) with OpenMP parallelization.
*
* This file is part of John the Ripper password cracker,
* Copyright (c) 2009-2015 by Solar Designer
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* There's ABSOLUTELY NO WARRANTY, express or implied.
*/
#if AC_BUILT
#include "autoconfig.h"
#endif
#if HAVE_CRYPT
#undef _XOPEN_SOURCE
#undef _XOPEN_SOURCE_EXTENDED
#undef _XOPEN_VERSION
#undef _XPG4_2
#undef _GNU_SOURCE
#define _XOPEN_SOURCE 4 /* for crypt(3) */
#define _XOPEN_SOURCE_EXTENDED 1 /* for OpenBSD */
#define _XOPEN_VERSION 4
#define _XPG4_2
#define _GNU_SOURCE 1 /* for crypt_r(3) */
#include <stdio.h>
#if !AC_BUILT
#include <string.h>
#ifndef _MSC_VER
#include <strings.h>
#endif
#ifdef __CYGWIN__
#include <crypt.h>
#endif
#if defined(_OPENMP) && defined(__GLIBC__)
#include <crypt.h>
#else
#if (!AC_BUILT || HAVE_UNISTD_H) && !_MSC_VER
#include <unistd.h>
#endif
#endif
#endif
#if STRING_WITH_STRINGS
#include <string.h>
#include <strings.h>
#elif HAVE_STRING_H
#include <string.h>
#elif HAVE_STRINGS_H
#include <strings.h>
#endif
#if (!AC_BUILT && defined(HAVE_CRYPT))
#undef HAVE_CRYPT_H
#define HAVE_CRYPT_H 1
#endif
#if HAVE_CRYPT_H
#include <crypt.h>
#endif
#if (!AC_BUILT || HAVE_UNISTD_H) && !_MSC_VER
#include <unistd.h>
#endif
#if defined(_OPENMP)
#include <omp.h> /* for omp_get_thread_num() */
#endif
#include "options.h"
#include "arch.h"
#include "misc.h"
#include "params.h"
#include "memory.h"
#include "common.h"
#include "formats.h"
#include "loader.h"
#include "john.h"
#ifdef HAVE_MPI
#include "john-mpi.h"
#endif
#include "memdbg.h"
#define FORMAT_LABEL "crypt"
#define FORMAT_NAME "generic crypt(3)"
#define ALGORITHM_NAME "?/" ARCH_BITS_STR
#define BENCHMARK_COMMENT " DES"
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 72
#define BINARY_SIZE 128
#define BINARY_ALIGN 1
#define SALT_SIZE BINARY_SIZE
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 96
#define MAX_KEYS_PER_CRYPT 96
static struct fmt_tests tests[] = {
{"CCNf8Sbh3HDfQ", "U*U*U*U*"},
{"CCX.K.MFy4Ois", "U*U***U"},
{"CC4rMpbg9AMZ.", "U*U***U*"},
{"XXxzOu6maQKqQ", "*U*U*U*U"},
{"SDbsugeBiC58A", ""},
{NULL}
};
static char saved_key[MAX_KEYS_PER_CRYPT][PLAINTEXT_LENGTH + 1];
static char saved_salt[SALT_SIZE];
static char crypt_out[MAX_KEYS_PER_CRYPT][BINARY_SIZE];
#if defined(_OPENMP) && defined(__GLIBC__)
#define MAX_THREADS MAX_KEYS_PER_CRYPT
/* We assume that this is zero-initialized (all NULL pointers) */
static struct crypt_data *crypt_data[MAX_THREADS];
#endif
static void init(struct fmt_main *self)
{
if (options.subformat) {
int i;
char *salt = tests[0].ciphertext;
#if defined(_OPENMP) && defined(__GLIBC__)
struct crypt_data data;
data.initialized = 0;
#endif
/*
* Allow
* ./john --list=format-tests --format=crypt --subformat=md5crypt
* in addition to
* ./john --test --format=crypt --subformat=md5crypt
*
* That's why, don't require FLG_TEST_CHK to be set.
*/
if (options.flags & FLG_PASSWD) {
fprintf(stderr,
"\n%s: --subformat option is only for --test or --list=format-tests\n", FORMAT_LABEL);
error();
}
if (!strcmp(options.subformat, "?")) {
fprintf(stderr, "Subformat may either be a verbatim salt, or: descrypt, md5crypt, bcrypt, sha256crypt, sha512crypt, sun-md5\n\n");
error();
} else if (!strcasecmp(options.subformat, "md5crypt") ||
!strcasecmp(options.subformat, "md5")) {
static struct fmt_tests tests[] = {
{"$1$12345678$aIccj83HRDBo6ux1bVx7D1", "0123456789ABCDE"},
{"$1$12345678$f8QoJuo0DpBRfQSD0vglc1", "12345678"},
{"$1$$qRPK7m23GJusamGpoGLby/", ""},
{NULL} };
self->params.tests = tests;
self->params.benchmark_comment = " MD5";
salt = "$1$dXc3I7Rw$";
} else if (!strcasecmp(options.subformat, "sunmd5") ||
!strcasecmp(options.subformat, "sun-md5")) {
static struct fmt_tests tests[] = {
{"$md5$rounds=904$Vc3VgyFx44iS8.Yu$Scf90iLWN6O6mT9TA06NK/", "test"},
{"$md5$rounds=904$ZZZig8GS.S0pRNhc$dw5NMYJoxLlnFq4E.phLy.", "Don41dL33"},
{"$md5$rounds=904$zSuVTn567UJLv14u$q2n2ZBFwKg2tElFBIzUq/0", "J4ck!3Wood"},
{NULL} };
self->params.tests = tests;
self->params.benchmark_comment = " SunMD5";
salt = "$md5$rounds=904$Vc3VgyFx44iS8.Yu$dummy";
} else if ((!strcasecmp(options.subformat, "sha256crypt")) ||
(!strcasecmp(options.subformat, "sha-256")) ||
(!strcasecmp(options.subformat, "sha256"))) {
static struct fmt_tests tests[] = {
{"$5$LKO/Ute40T3FNF95$U0prpBQd4PloSGU0pnpM4z9wKn4vZ1.jsrzQfPqxph9", "U*U*U*U*"},
{"$5$LKO/Ute40T3FNF95$fdgfoJEBoMajNxCv3Ru9LyQ0xZgv0OBMQoq80LQ/Qd.", "U*U***U"},
{"$5$LKO/Ute40T3FNF95$8Ry82xGnnPI/6HtFYnvPBTYgOL23sdMXn8C29aO.x/A", "U*U***U*"},
{NULL} };
self->params.tests = tests;
self->params.benchmark_comment = " SHA-256 rounds=5000";
salt = "$5$LKO/Ute40T3FNF95$";
} else if ((!strcasecmp(options.subformat, "sha512crypt")) ||
(!strcasecmp(options.subformat, "sha-512")) ||
(!strcasecmp(options.subformat, "sha512"))) {
static struct fmt_tests tests[] = {
{"$6$LKO/Ute40T3FNF95$6S/6T2YuOIHY0N3XpLKABJ3soYcXD9mB7uVbtEZDj/LNscVhZoZ9DEH.sBciDrMsHOWOoASbNLTypH/5X26gN0", "U*U*U*U*"},
{"$6$LKO/Ute40T3FNF95$wK80cNqkiAUzFuVGxW6eFe8J.fSVI65MD5yEm8EjYMaJuDrhwe5XXpHDJpwF/kY.afsUs1LlgQAaOapVNbggZ1", "U*U***U"},
{"$6$LKO/Ute40T3FNF95$YS81pp1uhOHTgKLhSMtQCr2cDiUiN03Ud3gyD4ameviK1Zqz.w3oXsMgO6LrqmIEcG3hiqaUqHi/WEE2zrZqa/", "U*U***U*"},
{NULL} };
self->params.tests = tests;
self->params.benchmark_comment = " SHA-512 rounds=5000";
salt = "$6$LKO/Ute40T3FNF95$";
} else if ((!strcasecmp(options.subformat, "bf")) ||
(!strcasecmp(options.subformat, "blowfish")) ||
(!strcasecmp(options.subformat, "bcrypt"))) {
static struct fmt_tests tests[] = {
{"$2a$05$CCCCCCCCCCCCCCCCCCCCC.E5YPO9kmyuRGyh0XouQYb4YMJKvyOeW","U*U"},
{"$2a$05$CCCCCCCCCCCCCCCCCCCCC.VGOzA784oUp/Z0DY336zx7pLYAy0lwK","U*U*"},
{"$2a$05$XXXXXXXXXXXXXXXXXXXXXOAcXxm9kjPGEMsLznoKqmqw7tc8WCx4a","U*U*U"},
{NULL} };
self->params.tests = tests;
self->params.benchmark_comment = " BF x32";
salt = "$2a$05$AD6y0uWY62Xk2TXZ";
} else if (!strcasecmp(options.subformat, "descrypt") ||
!strcasecmp(options.subformat, "des")) {
salt = "CC";
} else {
char *p = mem_alloc_tiny(strlen(options.subformat) + 2,
MEM_ALIGN_NONE);
strcpy(p, " ");
strcat(p, options.subformat);
self->params.benchmark_comment = p;
salt = options.subformat;
/* turn off many salts test, since we are not updating the */
/* params.tests structure data. */
self->params.benchmark_length = -1;
}
for (i = 0; i < 5; i++)
{
char *c;
#if defined(_OPENMP) && defined(__GLIBC__)
c = crypt_r(tests[i].plaintext, salt, &data);
#else
c = crypt(tests[i].plaintext, salt);
#endif
if (c && strlen(c) >= 7)
tests[i].ciphertext = strdup(c);
else {
fprintf(stderr, "%s not supported on this system\n",
options.subformat);
error();
}
}
if (strlen(tests[0].ciphertext) == 13 &&
strcasecmp(options.subformat, "descrypt") &&
strcasecmp(options.subformat, "des")) {
fprintf(stderr, "%s not supported on this system\n",
options.subformat);
error();
}
}
}
static int valid(char *ciphertext, struct fmt_main *self)
{
int length, count_base64, count_base64_2, id, pw_length;
char pw[PLAINTEXT_LENGTH + 1], *new_ciphertext;
/* We assume that these are zero-initialized */
static char sup_length[BINARY_SIZE], sup_id[0x80];
length = count_base64 = count_base64_2 = 0;
while (ciphertext[length]) {
if (atoi64[ARCH_INDEX(ciphertext[length])] != 0x7F) {
count_base64++;
if (length >= 2)
count_base64_2++;
}
length++;
}
if (length < 13 || length >= BINARY_SIZE)
return 0;
id = 0;
if (length == 13 && count_base64 == 13) /* valid salt */
id = 1;
else
if (length == 13 && count_base64_2 == 11) /* invalid salt */
id = 2;
else
if (length >= 13 &&
count_base64_2 >= length - 2 && /* allow for invalid salt */
(length - 2) % 11 == 0)
id = 3;
else
if (length == 20 && count_base64 == 19 && ciphertext[0] == '_')
id = 4;
else
if (ciphertext[0] == '$') {
id = (unsigned char)ciphertext[1];
if (id <= 0x20 || id >= 0x80)
id = 9;
} else
if (ciphertext[0] == '*' || ciphertext[0] == '!') /* likely locked */
id = 10;
/* Previously detected as supported */
if (sup_length[length] > 0 && sup_id[id] > 0)
return 1;
/* Previously detected as unsupported */
if (sup_length[length] < 0 && sup_id[id] < 0)
return 0;
pw_length = ((length - 2) / 11) << 3;
if (pw_length >= sizeof(pw))
pw_length = sizeof(pw) - 1;
memcpy(pw, ciphertext, pw_length); /* reuse the string, why not? */
pw[pw_length] = 0;
#if defined(_OPENMP) && defined(__GLIBC__)
/*
* Let's use crypt_r(3) just like we will in crypt_all() below.
* It is possible that crypt(3) and crypt_r(3) differ in their supported hash
* types on a given system.
*/
{
struct crypt_data **data = &crypt_data[0];
if (!*data) {
/*
* **data is not exactly tiny, but we use mem_alloc_tiny() for its alignment
* support and error checking. We do not need to free() this memory anyway.
*
* The page alignment is to keep different threads' data on different pages.
*/
*data = mem_alloc_tiny(sizeof(**data), MEM_ALIGN_PAGE);
memset(*data, 0, sizeof(**data));
}
new_ciphertext = crypt_r(pw, ciphertext, *data);
}
#else
new_ciphertext = crypt(pw, ciphertext);
#endif
if (new_ciphertext && strlen(new_ciphertext) == length &&
!strncmp(new_ciphertext, ciphertext, 2)) {
sup_length[length] = 1;
sup_id[id] = 1;
return 1;
}
if (id != 10 && !ldr_in_pot)
if (john_main_process)
fprintf(stderr, "Warning: "
"hash encoding string length %d, type id %c%c\n"
"appears to be unsupported on this system; "
"will not load such hashes.\n",
length, id > 0x20 ? '$' : '#', id > 0x20 ? id : '0' + id);
if (!sup_length[length])
sup_length[length] = -1;
if (!sup_id[id])
sup_id[id] = -1;
return 0;
}
static void *binary(char *ciphertext)
{
static char out[BINARY_SIZE];
strncpy(out, ciphertext, sizeof(out)); /* NUL padding is required */
return out;
}
static void *salt(char *ciphertext)
{
static char out[SALT_SIZE];
int cut = sizeof(out);
#if 1
/* This piece is optional, but matching salts are not detected without it */
int length = strlen(ciphertext);
switch (length) {
case 13:
case 24:
cut = 2;
break;
case 20:
if (ciphertext[0] == '_') cut = 9;
break;
case 35:
case 46:
case 57:
if (ciphertext[0] != '$') cut = 2;
/* fall through */
default:
if ((length >= 26 && length <= 34 &&
!strncmp(ciphertext, "$1$", 3)) ||
(length >= 47 && !strncmp(ciphertext, "$5$", 3)) ||
(length >= 90 && !strncmp(ciphertext, "$6$", 3))) {
char *p = strrchr(ciphertext + 3, '$');
if (p) cut = p - ciphertext;
} else
if (length == 59 && !strncmp(ciphertext, "$2$", 3))
cut = 28;
else
if (length == 60 &&
(!strncmp(ciphertext, "$2a$", 4) ||
!strncmp(ciphertext, "$2b$", 4) ||
!strncmp(ciphertext, "$2x$", 4) ||
!strncmp(ciphertext, "$2y$", 4)))
cut = 29;
else
if (length >= 27 &&
(!strncmp(ciphertext, "$md5$", 5) ||
!strncmp(ciphertext, "$md5,", 5))) {
char *p = strrchr(ciphertext + 4, '$');
if (p) {
/* NUL padding is required */
memset(out, 0, sizeof(out));
memcpy(out, ciphertext, ++p - ciphertext);
/*
* Workaround what looks like a bug in sunmd5.c: crypt_genhash_impl() where it
* takes a different substring as salt depending on whether the optional
* existing hash encoding is present after the salt or not. Specifically, the
* last '$' delimiter is included into the salt when there's no existing hash
* encoding after it, but is omitted from the salt otherwise.
*/
out[p - ciphertext] = 'x';
return out;
}
}
}
#endif
/* NUL padding is required */
memset(out, 0, sizeof(out));
memcpy(out, ciphertext, cut);
return out;
}
#define H(s, i) \
((int)(unsigned char)(atoi64[ARCH_INDEX((s)[(i)])] ^ (s)[(i) - 1]))
#define H0(s) \
int i = strlen(s) - 2; \
return i > 0 ? H((s), i) & PH_MASK_0 : 0
#define H1(s) \
int i = strlen(s) - 2; \
return i > 2 ? (H((s), i) ^ (H((s), i - 2) << 4)) & PH_MASK_1 : 0
#define H2(s) \
int i = strlen(s) - 2; \
return i > 2 ? (H((s), i) ^ (H((s), i - 2) << 6)) & PH_MASK_2 : 0
#define H3(s) \
int i = strlen(s) - 2; \
return i > 4 ? (H((s), i) ^ (H((s), i - 2) << 5) ^ \
(H((s), i - 4) << 10)) & PH_MASK_3 : 0
#define H4(s) \
int i = strlen(s) - 2; \
return i > 6 ? (H((s), i) ^ (H((s), i - 2) << 5) ^ \
(H((s), i - 4) << 10) ^ (H((s), i - 6) << 15)) & PH_MASK_4 : 0
static int binary_hash_0(void *binary)
{
H0((char *)binary);
}
static int binary_hash_1(void *binary)
{
H1((char *)binary);
}
static int binary_hash_2(void *binary)
{
H2((char *)binary);
}
static int binary_hash_3(void *binary)
{
H3((char *)binary);
}
static int binary_hash_4(void *binary)
{
H4((char *)binary);
}
static int get_hash_0(int index)
{
H0(crypt_out[index]);
}
static int get_hash_1(int index)
{
H1(crypt_out[index]);
}
static int get_hash_2(int index)
{
H2(crypt_out[index]);
}
static int get_hash_3(int index)
{
H3(crypt_out[index]);
}
static int get_hash_4(int index)
{
H4(crypt_out[index]);
}
static int salt_hash(void *salt)
{
int i, h;
i = strlen((char *)salt) - 1;
if (i > 1) i--;
h = (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i])];
h ^= ((unsigned char *)salt)[i - 1];
h <<= 6;
h ^= (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i - 1])];
h ^= ((unsigned char *)salt)[i];
return h & (SALT_HASH_SIZE - 1);
}
static void set_salt(void *salt)
{
strcpy(saved_salt, salt);
}
static void set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1);
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
static int warned = 0;
int count = *pcount;
int index;
#if defined(_OPENMP) && defined(__GLIBC__)
#pragma omp parallel for default(none) private(index) shared(warned, count, crypt_out, saved_key, saved_salt, crypt_data, stderr)
for (index = 0; index < count; index++) {
char *hash;
int t = omp_get_thread_num();
if (t < MAX_THREADS) {
struct crypt_data **data = &crypt_data[t];
if (!*data) {
/* Stagger the structs to reduce their competition for the same cache lines */
size_t mask = MEM_ALIGN_PAGE, shift = 0;
while (t) {
mask >>= 1;
if (mask < MEM_ALIGN_CACHE)
break;
if (t & 1)
shift += mask;
t >>= 1;
}
*data = (void *)((char *)
mem_alloc_tiny(sizeof(**data) +
shift, MEM_ALIGN_PAGE) + shift);
memset(*data, 0, sizeof(**data));
}
hash = crypt_r(saved_key[index], saved_salt, *data);
} else { /* should not happen */
struct crypt_data data;
memset(&data, 0, sizeof(data));
hash = crypt_r(saved_key[index], saved_salt, &data);
}
if (!hash) {
#pragma omp critical
if (!warned) {
fprintf(stderr,
"Warning: crypt_r() returned NULL\n");
warned = 1;
}
hash = "";
}
strnzcpy(crypt_out[index], hash, BINARY_SIZE);
}
#else
#if defined(_OPENMP) && defined(__sun)
/*
* crypt(3C) is MT-safe on Solaris. For traditional DES-based hashes, this is
* implemented with locking (hence there's no speedup from the use of multiple
* threads, and the per-thread performance is extremely poor anyway). For
* modern hash types, the function is actually able to compute multiple hashes
* in parallel by different threads (and the performance for some hash types is
* reasonable). Overall, this code is reasonable to use for SHA-crypt and
* SunMD5 hashes, which are not yet supported by non-jumbo John natively.
*/
#pragma omp parallel for /* default(none) private(index) shared(warned, count, crypt_out, saved_key, saved_salt, stderr) or __iob */
#endif
for (index = 0; index < count; index++) {
char *hash = crypt(saved_key[index], saved_salt);
if (!hash) {
#if defined(_OPENMP) && defined(__sun)
#pragma omp critical
#endif
if (!warned) {
fprintf(stderr,
"Warning: crypt() returned NULL\n");
warned = 1;
}
hash = "";
}
strnzcpy(crypt_out[index], hash, BINARY_SIZE);
}
#endif
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (!strcmp((char *)binary, crypt_out[index]))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !strcmp((char *)binary, crypt_out[index]);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
/*
* For generic crypt(3), the algorithm is returned as the first "tunable cost":
* 0: unknown (shouldn't happen
* 1: descrypt
* 2: md5crypt
* 3: sunmd5
* 4: bcrypt
* 5: sha256crypt
* 6: sha512crypt
* New subformats should be added to the end of the list.
* Otherwise, restored sessions might contine cracking different hashes
* if the (not yet implemented) option --cost= had been used
* when starting that session.
*/
static unsigned int c3_subformat_algorithm(void *salt)
{
char *c3_salt;
c3_salt = salt;
if (!c3_salt[0] || !c3_salt[1] )
return 0;
if (!c3_salt[2])
return 1;
if (c3_salt[0] != '$')
return 0;
if (c3_salt[1] == '1')
return 2;
if (c3_salt[1] == 'm')
return 3;
if (c3_salt[1] == '2' && c3_salt[2] == 'a')
return 4;
if (c3_salt[1] == '5')
return 5;
if (c3_salt[1] == '6')
return 6;
return 0;
}
static unsigned int c3_algorithm_specific_cost1(void *salt)
{
unsigned int algorithm, rounds;
char *c3_salt;
c3_salt = salt;
algorithm = c3_subformat_algorithm(salt);
if (algorithm < 3)
/* no tunable cost parameters */
return 1;
switch (algorithm) {
case 1:
// DES
return 25;
case 2:
// cryptmd5
return 1000;
case 3: // sun_md5
c3_salt = strstr(c3_salt, "rounds=");
if (!c3_salt) {
return 904+4096; // default
}
sscanf(c3_salt, "rounds=%d", &rounds);
return rounds+4096;
case 4: // bf
c3_salt += 4;
sscanf(c3_salt, "%d", &rounds);
return rounds;
case 5:
case 6:
// sha256crypt and sha512crypt handled the same: $x$rounds=xxxx$salt$hash (or $x$salt$hash for 5000 round default);
c3_salt += 3;
if (strncmp(c3_salt, "rounds=", 7))
return 5000; // default
sscanf(c3_salt, "rounds=%d", &rounds);
return rounds;
}
return 1;
}
struct fmt_main fmt_crypt = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
/*
* use algorithm as first tunable cost:
* (0: unknown)
* descrypt, md5crypt, sunmd5, bcrypt, sha512crypt, sha256crypt
*/
"algorithm [1:descrypt 2:md5crypt 3:sunmd5 4:bcrypt 5:sha256crypt 6:sha512crypt]",
"algorithm specific iterations",
},
{ NULL },
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
binary,
salt,
{
c3_subformat_algorithm,
#if 1
c3_algorithm_specific_cost1
#endif
},
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
binary_hash_4,
NULL,
NULL
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
NULL,
NULL
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif // HAVE_CRYPT
|
3777.c |
/*
* Compile using the command:
* `cc 27Stencil.c -o oa -fopenmp -lm`
*/
#include <math.h>
#include <omp.h>
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENACC
#include <openacc.h>
#endif
#define DEFAULT_DATASIZE 1048576 /* Default datasize. */
#define DEFAULT_REPS 10 /* Default repetitions. */
#define CONF95 1.96
#define ITERATIONS 10
#define FAC (1./26)
#define TOLERANCE 1.0e-15
extern int reps; /* Repetitions. */
extern double *times; /* Array to store results in. */
extern int flag; /* Flag to set CPU or GPU invocation. */
extern unsigned int datasize; /* Datasize passed to benchmark functions. */
unsigned int datasize = -1; /* Datasize for tests in bytes. */
int reps = -1; /* Repetitions. */
double *times; /* Array of doubles storing the benchmark times in microseconds. */
double testtime; /* The average test time in microseconds for reps runs. */
double testsd; /* The standard deviation in the test time in microseconds for reps runs. */
int flag = 0; /* 0 indicates CPU. */
/*
* Function prototypes for common functions.
*/
void init(int argc, char **argv);
void finalisetest(char *);
void finalise(void);
void benchmark(char *, double (*test)(void));
void print_results(char *, double, double);
/* Forward Declarations of utility functions*/
double max_diff(double *, double *, int);
void wul();
void usage(char *argv[]) {
printf("Usage: %s \n"
"\t--reps <repetitions> (default %d)\n"
"\t--datasize <datasize> (default %d bytes)\n",
argv[0],
DEFAULT_REPS, DEFAULT_DATASIZE);
}
/*
* This function parses the parameters from the command line.
*/
void parse_args(int argc, char *argv[]) {
int arg;
for (arg = 1; arg < argc; arg++) {
if (strcmp(argv[arg], "--reps") == 0) {
reps = atoi(argv[++arg]);
if (reps == 0) {
printf("Invalid integer:--reps: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[arg], "--datasize") == 0) {
datasize = atoi(argv[++arg]);
if (datasize == 0) {
printf("Invalid integer:--datasize: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[arg], "-h") == 0) {
usage(argv);
exit(EXIT_SUCCESS);
} else {
printf("Invalid parameters: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
}
}
void stats(double *mtp, double *sdp) {
double meantime, totaltime, sumsq, mintime, maxtime, sd;
int i, good_reps;
mintime = 1.0e10;
maxtime = 0.;
totaltime = 0.;
good_reps = 0;
for (i = 0; i < reps; i++) {
/* Skip entries where times is 0, this indicates an error occured */
if (times[i] != 0){
mintime = (mintime < times[i]) ? mintime : times[i];
maxtime = (maxtime > times[i]) ? maxtime : times[i];
totaltime += times[i];
good_reps++;
}
}
meantime = totaltime / good_reps;
sumsq = 0;
for (i = 0; i < reps; i++) {
if (times[i] != 0){
sumsq += (times[i] - meantime) * (times[i] - meantime);
}
}
sd = sqrt(sumsq / good_reps);
*mtp = meantime;
*sdp = sd;
}
/*
* This function prints the results of the tests.
* If you use a compiler which sets a different preprocessor flag
* you may wish to add it here.
*/
void print_results(char *name, double testtime, double testsd) {
char compiler[20];
/* Set default compiler idetifier. */
sprintf(compiler, "COMPILER");
/* Set compiler identifier based on known preprocessor flags. */
#ifdef __PGI
sprintf(compiler, "PGI");
#endif
#ifdef __HMPP
sprintf(compiler, "CAPS");
#endif
//printf("%s %s %d %f %f\n", compiler, name, datasize, testtime*1e6, CONF95*testsd*1e6);
printf("%f\n", testtime*1e6);
}
/*
* This function initialises the storage for the test results and set the defaults.
*/
void init(int argc, char **argv)
{
parse_args(argc, argv);
if (reps == -1) {
reps = DEFAULT_REPS;
}
if (datasize == (unsigned int)-1) {
datasize = DEFAULT_DATASIZE;
}
times = (double *)malloc((reps) * sizeof(double));
/*
#ifdef __PGI
acc_init(acc_device_nvidia);
// printf("PGI INIT\n");
#endif
#ifdef __HMPP
int a[5] = {1,2,3,4,5};
#pragma acc data copyin(a[0:5])
{}
#endif
#ifdef _CRAYC
int a[5] = {1,2,3,4,5};
#pragma acc data copyin(a[0:5])
{}
#endif
*/
}
void finalise(void) {
free(times);
}
/*
* This function runs the benchmark specified.
*/
void benchmark(char *name, double (*test)(void))
{
int i = 0;
double tmp = 0;
for (i=0; i<reps; i++) {
tmp = test();
if (tmp == -10000){
printf("Memory allocation failure in %s\n", name);
times[i] = 0;
}
else if (tmp == -11000){
printf("CPU/GPU mismatch in %s\n", name);
times[i] = 0;
}
else{
times[i] = tmp;
}
}
stats(&testtime, &testsd);
//printf("in benchmark\n");
print_results(name, testtime, testsd);
//printf("printed result\n");
}
double stencil()
{
extern unsigned int datasize;
int sz = cbrt((datasize/sizeof(double))/2);
int i, j, k, iter;
int n = sz-2;
double fac = FAC;
double t1, t2;
double md;
//printf("size = %d\n", sz);
/* Work buffers, with halos */
double *a0 = (double*)malloc(sizeof(double)*sz*sz*sz);
double *device_result = (double*)malloc(sizeof(double)*sz*sz*sz);
double *a1 = (double*)malloc(sizeof(double)*sz*sz*sz);
double *host_result = (double*)malloc(sizeof(double)*sz*sz*sz);
double *a0_init = (double*)malloc(sizeof(double)*sz*sz*sz);
if(a0==NULL||device_result==NULL||a1==NULL||host_result==NULL||a0_init==NULL){
/* Something went wrong in the memory allocation here, fail gracefully */
return(-10000);
}
/* initialize input array a0 */
/* zero all of array (including halos) */
//printf("size = %d\n", sz);
for (i = 0; i < sz; i++) {
for (j = 0; j < sz; j++) {
for (k = 0; k < sz; k++) {
a0[i*sz*sz+j*sz+k] = 0.0;
//printf("%d\t", (i*sz*sz+j*sz+k));
}
}
}
//printf("\n");
//int size_of_a0 = sizeof(a0) / sizeof(*a0);
//printf("size of a0 = %d\n", size_of_a0);
/* use random numbers to fill interior */
for (i = 1; i < n+1; i++) {
for (j = 1; j < n+1; j++) {
for (k = 1; k < n+1; k++) {
a0[i*sz*sz+j*sz+k] = (double) rand()/ (double)(1.0 + RAND_MAX);
}
}
}
/* memcpy(&a0_init[0], &a0[0], sizeof(double)*sz*sz*sz); */
/* save initial input array for later GPU run */
for (i = 0; i < sz; i++) {
for (j = 0; j < sz; j++) {
for (k = 0; k < sz; k++) {
a0_init[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k];
}
}
}
//printf("Host computation\n");
/* run main computation on host */
for (iter = 0; iter < ITERATIONS; iter++) {
for (i = 1; i < n+1; i++) {
for (j = 1; j < n+1; j++) {
for (k = 1; k < n+1; k++) {
a1[i*sz*sz+j*sz+k] = (
a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] +
a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] +
a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] +
a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] +
a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] +
a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] +
a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] +
a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] +
a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] +
a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] +
a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] +
a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] +
a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)]
) * fac;
}
}
}
for (i = 1; i < n+1; i++) {
for (j = 1; j < n+1; j++) {
for (k = 1; k < n+1; k++) {
a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k];
}
}
}
} /* end iteration loop */
/* save result */
/* memcpy(&host_result[0], &a0[0], sizeof(double)*sz*sz*sz); */
for (i = 0; i < sz; i++) {
for (j = 0; j < sz; j++) {
for (k = 0; k < sz; k++) {
host_result[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k];
// printf("%lf\t", a0[i*sz*sz+j*sz+k]);
}
}
}
//int size = sizeof(host_result)/sizeof(host_result[0]);
//for(i = 0; i < size; i++) {
// printf("%lf\t", host_result[i]);
//}
//printf("\n");
/* copy initial array back to a0 */
/* memcpy(&a0[0], &a0_init[0], sizeof(double)*sz*sz*sz); */
for (i = 0; i < sz; i++) {
for (j = 0; j < sz; j++) {
for (k = 0; k < sz; k++) {
a0[i*sz*sz+j*sz+k] = a0_init[i*sz*sz+j*sz+k];
}
}
}
//printf("Starting acc pragma code\n");
t1 = omp_get_wtime();
#pragma acc data copy(a0[0:sz*sz*sz]), create(a1[0:sz*sz*sz], i,j,k,iter), copyin(sz,fac,n)
{
for (iter = 0; iter < ITERATIONS; iter++) {
#pragma omp p0 parallel for schedule(dynamic, 28) simd num_threads(28)
for (i = 1; i < n+1; i++) {
#pragma omp p1 parallel for schedule(dynamic, 28) simd num_threads(28)
for (j = 1; j < n+1; j++) {
#pragma omp p2 parallel for schedule(dynamic, 28) simd num_threads(28)
for (k = 1; k < n+1; k++) {
a1[i*sz*sz+j*sz+k] = (
a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] +
a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] +
a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] +
a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] +
a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] +
a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] +
a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] +
a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] +
a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] +
a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] +
a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] +
a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] +
a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)]
) * fac;
}
}
}
#pragma acc parallel loop
for (i = 1; i < n+1; i++) {
#pragma acc loop
for (j = 1; j < n+1; j++) {
#pragma acc loop
for (k = 1; k < n+1; k++) {
a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k];
}
}
}
} /* end iteration loop */
} /* end data region */
#pragma acc wait
t2 = omp_get_wtime();
memcpy(&device_result[0], &a0[0], sizeof(double)*sz*sz*sz);
md = max_diff(&host_result[0],&device_result[0], sz);
/* Free malloc'd memory to prevent leaks */
free(a0);
free(a0_init);
free(a1);
free(host_result);
free(device_result);
//printf("md: %lf \t tolerance: %lf", md, TOLERANCE);
if (md < TOLERANCE ){
//printf ("GPU matches host to within tolerance of %1.1e\n\n", TOLERANCE);
return(t2 - t1);
}
else{
// printf ("WARNING: GPU does not match to within tolerance of %1.1e\nIt is %lf\n", TOLERANCE, md);
return(-11000);
}
}
/* Utility Functions */
double max_diff(double *array1,double *array2, int sz)
{
double tmpdiff, diff;
int i,j,k;
int n = sz-2;
diff=0.0;
for (i = 1; i < n+1; i++) {
for (j = 1; j < n+1; j++) {
for (k = 1; k < n+1; k++) {
tmpdiff = fabs(array1[i*sz*sz+j*sz+k] - array2[i*sz*sz+j*sz+k]);
//printf("diff: %lf", tmpdiff);
if (tmpdiff > diff) diff = tmpdiff;
}
}
}
return diff;
}
/*
* This function ensures the device is awake.
* It is more portable than acc_init().
*/
void wul(){
int data = 8192;
double *arr_a = (double *)malloc(sizeof(double) * data);
double *arr_b = (double *)malloc(sizeof(double) * data);
int i = 0;
if (arr_a==NULL||arr_b==NULL) {
printf("Unable to allocate memory in wul.\n");
}
for (i=0;i<data;i++){
arr_a[i] = (double) (rand()/(1.0+RAND_MAX));
}
#pragma acc data copy(arr_b[0:data]), copyin(arr_a[0:data])
{
#pragma acc parallel loop
for (i=0;i<data;i++){
arr_b[i] = arr_a[i] * 2;
}
}
if (arr_a[0] < 0){
printf("Error in WUL\n");
/*
* This should never be called as rands should be in the range (0,1].
* This stops clever optimizers.
*/
}
free(arr_a);
free(arr_b);
}
int main(int argc, char **argv) {
char testName[32];
//printf("compiler name datasize testtime*1e6 CONF95*testsd*1e6\n");
/* Initialise storage for test results & parse input arguements. */
init(argc, argv);
/* Ensure device is awake. */
wul();
sprintf(testName, "27S");
benchmark(testName, &stencil);
/* Print results & free results storage */
finalise();
return EXIT_SUCCESS;
}
|
GB_unop__identity_bool_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_bool_fc64)
// op(A') function: GB (_unop_tran__identity_bool_fc64)
// C type: bool
// A type: GxB_FC64_t
// cast: bool cij = (creal (aij) != 0) || (cimag (aij) != 0)
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = (creal (aij) != 0) || (cimag (aij) != 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = (creal (aij) != 0) || (cimag (aij) != 0) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_bool_fc64)
(
bool *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
bool z = (creal (aij) != 0) || (cimag (aij) != 0) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
bool z = (creal (aij) != 0) || (cimag (aij) != 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_bool_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
splitters.c |
#include <composer.h>
#include <MagickWand.h>
#include <stdio.h>
#define DEBUG 0
#define DBG(fmt, ...) \
do { if (DEBUG) fprintf(stderr, "%s:%d:%s(): " fmt "\n", __FILE__, \
__LINE__, __func__, __VA_ARGS__); } while (0)
struct WandSplit {
MagickWand *wand;
size_t width;
size_t height;
};
void* WandSplit_new(MagickWand **wand_to_split, struct WandSplit_init_args *_, int64_t *items) {
struct WandSplit *splitter = (struct WandSplit *)malloc(sizeof(struct WandSplit));
splitter->wand = *wand_to_split;
// We'll split the image by row, since there are nice methods for reconstructing an image
// in this way that are builtin.
splitter->width = MagickGetImageWidth(splitter->wand);
splitter->height = MagickGetImageHeight(splitter->wand);
*items = splitter->height;
DBG("items: %ld", splitter->height);
return (void *)splitter;
}
SplitterStatus WandSplit_next(const void *s,
int64_t start,
int64_t end,
MagickWand **out) {
const struct WandSplit *splitter = (const struct WandSplit *)s;
DBG("start: %ld end: %ld height: %ld", start, end, splitter->height);
if (splitter->height <= start) {
DBG("finished got range (%ld %ld)", start, end);
return SplitterFinished;
} else {
size_t region_height = (end - start);
if (splitter->height < end) {
DBG("clipping region height by %ld", end - splitter->height);
region_height = splitter->height - start;
}
DBG("range: %ld, %ld", start, start + region_height);
MagickWand *wand = MagickGetImageRegion(splitter->wand, splitter->width, region_height, 0, start);
*out = wand;
return SplitterContinue;
}
}
MagickWand *aggregate_seq(MagickWand **pieces, int64_t count) {
MagickWand *results = NewMagickWand();
MagickResetIterator(results);
DBG("consturcted results image %p", results);
for (int i = 0; i < count; i++) {
DBG("adding image %d", i);
fflush(stderr);
MagickSetLastIterator(results);
MagickAddImage(results, pieces[i]);
}
MagickResetIterator(results);
MagickWand *final = MagickAppendImages(results, 1);
DestroyMagickWand(results);
return final;
}
MagickWand *aggregate_par(MagickWand **pieces, int count, int threads) {
// Holds aggregation state.
MagickWand **results = (MagickWand **)malloc(sizeof(MagickWand *) * threads);
for (int i = 0; i < threads; i++) {
results[i] = NewMagickWand();
MagickResetIterator(results[i]);
}
int values_per_thread = count / threads;
printf("values per piece: %d\n", values_per_thread);
#pragma omp parallel for
for (int i = 0; i < threads; i++) {
int start = i * values_per_thread;
int end = (i + 1) * values_per_thread;
if (i == threads - 1) {
end = count;
}
MagickWand *result = results[i];
// printf("thread %d: %d->%d\n", omp_get_thread_num(), start, end);
for (int j = start; j < end; j++) {
MagickSetLastIterator(result);
MagickAddImage(result, pieces[j]);
}
MagickResetIterator(result);
MagickWand *final = MagickAppendImages(result, 1);
result = DestroyMagickWand(result);
results[i] = final;
}
MagickWand *final_iterator = NewMagickWand();
MagickResetIterator(final_iterator);
for (int i = 0; i < threads; i++) {
MagickSetLastIterator(final_iterator);
MagickAddImage(final_iterator, results[i]);
}
MagickResetIterator(final_iterator);
MagickWand *final = MagickAppendImages(final_iterator, 1);
for (int i = 0; i < threads; i++) {
DestroyMagickWand(results[i]);
}
free(results);
return final;
}
void *WandSplit_merge(const void *s, int64_t length, int64_t threads) {
MagickWand *final;
MagickWand **pieces = (MagickWand **)s;
if (length == 1) {
DBG("only one item: returning it %d", 0);
return ((MagickWand **)s)[0];
}
MagickWand *results = NewMagickWand();
MagickResetIterator(results);
for (int i = 0; i < length; i++) {
MagickSetLastIterator(results);
MagickAddImage(results, pieces[i]);
}
MagickResetIterator(results);
final = MagickAppendImages(results, 1);
DestroyMagickWand(results);
// DBG("aggregate_seq: %p", aggregate_seq);
// final = aggregate_seq(pieces, length);
return (void *)final;
}
|
DenseTensor.h | //=================================================================================================
/*!
// \file blaze/math/smp/openmp/DenseTensor.h
// \brief Header file for the OpenMP-based dense tensor SMP implementation
//
// Copyright (C) 2012-2018 Klaus Iglberger - All Rights Reserved
// Copyright (C) 2018 Hartmut Kaiser - All Rights Reserved
//
// This file is part of the Blaze library. You can redistribute it and/or modify it under
// the terms of the New (Revised) BSD License. Redistribution and use in source and binary
// forms, with or without modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other materials
// provided with the distribution.
// 3. Neither the names of the Blaze development group nor the names of its contributors
// may be used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
*/
//=================================================================================================
#ifndef _BLAZE_TENSOR_MATH_SMP_OPENMP_DENSETENSOR_H_
#define _BLAZE_TENSOR_MATH_SMP_OPENMP_DENSETENSOR_H_
//*************************************************************************************************
// Includes
//*************************************************************************************************
#include <blaze/math/Aliases.h>
#include <blaze/math/AlignmentFlag.h>
#include <blaze/math/StorageOrder.h>
#include <blaze/math/constraints/SMPAssignable.h>
#include <blaze/math/functors/AddAssign.h>
#include <blaze/math/functors/Assign.h>
#include <blaze/math/functors/MultAssign.h>
#include <blaze/math/functors/SchurAssign.h>
#include <blaze/math/functors/SubAssign.h>
#include <blaze/math/simd/SIMDTrait.h>
#include <blaze/math/smp/ParallelSection.h>
#include <blaze/math/smp/SerialSection.h>
#include <blaze/math/smp/ThreadMapping.h>
#include <blaze/math/typetraits/IsSIMDCombinable.h>
#include <blaze/math/typetraits/IsSMPAssignable.h>
#include <blaze/math/views/Submatrix.h>
#include <blaze/system/SMP.h>
#include <blaze/util/Assert.h>
#include <blaze/util/EnableIf.h>
#include <blaze/util/FunctionTrace.h>
#include <blaze/util/StaticAssert.h>
#include <blaze/util/Types.h>
#include <blaze/util/algorithms/Min.h>
#include <omp.h>
#include <blaze_tensor/math/expressions/DenseTensor.h>
#include <blaze_tensor/math/smp/TensorThreadMapping.h>
#include <blaze_tensor/math/typetraits/IsDenseTensor.h>
#include <blaze_tensor/math/views/PageSlice.h>
namespace blaze {
//=================================================================================================
//
// OPENMP-BASED ASSIGNMENT KERNELS
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP (compound) assignment of a dense tensor to a dense tensor.
// \ingroup math
//
// \param lhs The target left-hand side dense tensor.
// \param rhs The right-hand side dense tensor to be assigned.
// \param op The (compound) assignment operation.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP assignment of a dense
// tensor to a dense tensor.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense tensor
, typename MT2 // Type of the right-hand side dense tensor
, typename OP > // Type of the assignment operation
void openmpAssign( DenseTensor<MT1>& lhs, const DenseTensor<MT2>& rhs, OP op )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
using ET1 = ElementType_t<MT1>;
using ET2 = ElementType_t<MT2>;
constexpr bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSIMDCombinable_v<ET1,ET2> );
constexpr size_t SIMDSIZE( SIMDTrait< ElementType_t<MT1> >::size );
const bool lhsAligned( (~lhs).isAligned() );
const bool rhsAligned( (~rhs).isAligned() );
const int threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t equalShare1( (~rhs).rows() / threadmap.first + addon1 );
const size_t rest1 ( equalShare1 & ( SIMDSIZE - 1UL ) );
const size_t rowsPerThread( ( simdEnabled && rest1 )?( equalShare1 - rest1 + SIMDSIZE ):( equalShare1 ) );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t equalShare2( (~rhs).columns() / threadmap.second + addon2 );
const size_t rest2 ( equalShare2 & ( SIMDSIZE - 1UL ) );
const size_t colsPerThread( ( simdEnabled && rest2 )?( equalShare2 - rest2 + SIMDSIZE ):( equalShare2 ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
for (size_t k = 0; k != (~rhs).pages(); ++k)
{
const size_t m( min( rowsPerThread, (~rhs).rows() - row ) );
const size_t n( min( colsPerThread, (~rhs).columns() - column ) );
auto lhs_slice = pageslice( ~lhs, k );
auto rhs_slice = pageslice( ~rhs, k );
if( simdEnabled && lhsAligned && rhsAligned ) {
auto target( submatrix<aligned> ( ~lhs_slice, row, column, m, n ) );
const auto source( submatrix<aligned> ( ~rhs_slice, row, column, m, n ) );
op( target, source );
}
else if( simdEnabled && lhsAligned ) {
auto target( submatrix<aligned> ( ~lhs_slice, row, column, m, n ) );
const auto source( submatrix<unaligned>( ~rhs_slice, row, column, m, n ) );
op( target, source );
}
else if( simdEnabled && rhsAligned ) {
auto target( submatrix<unaligned>( ~lhs_slice, row, column, m, n ) );
const auto source( submatrix<aligned> ( ~rhs_slice, row, column, m, n ) );
op( target, source );
}
else {
auto target( submatrix<unaligned>( ~lhs_slice, row, column, m, n ) );
const auto source( submatrix<unaligned>( ~rhs_slice, row, column, m, n ) );
op( target, source );
}
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// PLAIN ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP assignment to a dense tensor.
// \ingroup smp
//
// \param lhs The target left-hand side dense tensor.
// \param rhs The right-hand side tensor to be assigned.
// \return void
//
// This function implements the default OpenMP-based SMP assignment to a dense tensor. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense tensor
, typename MT2 > // Type of the right-hand side dense tensor
inline EnableIf_t< IsDenseTensor_v<MT1> && ( !IsSMPAssignable_v<MT1> || !IsSMPAssignable_v<MT2> ) >
smpAssign( Tensor<MT1>& lhs, const Tensor<MT2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_INTERNAL_ASSERT( (~lhs).pages() == (~rhs).pages(), "Invalid number of pages" );
assign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP assignment to a dense tensor.
// \ingroup math
//
// \param lhs The target left-hand side dense tensor.
// \param rhs The right-hand side tensor to be assigned.
// \return void
//
// This function implements the OpenMP-based SMP assignment to a dense tensor. Due to the
// explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense tensor
, typename MT2 > // Type of the right-hand side dense tensor
inline EnableIf_t< IsDenseTensor_v<MT1> && IsSMPAssignable_v<MT1> && IsSMPAssignable_v<MT2> >
smpAssign( Tensor<MT1>& lhs, const Tensor<MT2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_INTERNAL_ASSERT( (~lhs).pages() == (~rhs).pages(), "Invalid number of pages" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
assign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
openmpAssign( ~lhs, ~rhs, Assign() );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// ADDITION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense tensor.
// \ingroup smp
//
// \param lhs The target left-hand side dense tensor.
// \param rhs The right-hand side tensor to be added.
// \return void
//
// This function implements the default OpenMP-based SMP addition assignment to a dense tensor.
// Due to the explicit application of the SFINAE principle, this function can only be selected
// by the compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense tensor
, typename MT2 > // Type of the right-hand side dense tensor
inline EnableIf_t< IsDenseTensor_v<MT1> && ( !IsSMPAssignable_v<MT1> || !IsSMPAssignable_v<MT2> ) >
smpAddAssign( Tensor<MT1>& lhs, const Tensor<MT2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_INTERNAL_ASSERT( (~lhs).pages() == (~rhs).pages(), "Invalid number of pages" );
addAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense tensor.
// \ingroup math
//
// \param lhs The target left-hand side dense tensor.
// \param rhs The right-hand side tensor to be added.
// \return void
//
// This function implements the OpenMP-based SMP addition assignment to a dense tensor. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense tensor
, typename MT2 > // Type of the right-hand side dense tensor
inline EnableIf_t< IsDenseTensor_v<MT1> && IsSMPAssignable_v<MT1> && IsSMPAssignable_v<MT2> >
smpAddAssign( Tensor<MT1>& lhs, const Tensor<MT2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_INTERNAL_ASSERT( (~lhs).pages() == (~rhs).pages(), "Invalid number of pages" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
addAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
openmpAssign( ~lhs, ~rhs, AddAssign() );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// SUBTRACTION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP subtracction assignment to a dense tensor.
// \ingroup smp
//
// \param lhs The target left-hand side dense tensor.
// \param rhs The right-hand side tensor to be subtracted.
// \return void
//
// This function implements the default OpenMP-based SMP subtraction assignment to a dense tensor.
// Due to the explicit application of the SFINAE principle, this function can only be selected by
// the compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense tensor
, typename MT2 > // Type of the right-hand side dense tensor
inline EnableIf_t< IsDenseTensor_v<MT1> && ( !IsSMPAssignable_v<MT1> || !IsSMPAssignable_v<MT2> ) >
smpSubAssign( Tensor<MT1>& lhs, const Tensor<MT2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_INTERNAL_ASSERT( (~lhs).pages() == (~rhs).pages(), "Invalid number of pages" );
subAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP subtracction assignment to a dense tensor.
// \ingroup smp
//
// \param lhs The target left-hand side dense tensor.
// \param rhs The right-hand side tensor to be subtracted.
// \return void
//
// This function implements the default OpenMP-based SMP subtraction assignment of a tensor to a
// dense tensor. Due to the explicit application of the SFINAE principle, this function can only
// be selected by the compiler in case both operands are SMP-assignable and the element types of
// both operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense tensor
, typename MT2 > // Type of the right-hand side dense tensor
inline EnableIf_t< IsDenseTensor_v<MT1> && IsSMPAssignable_v<MT1> && IsSMPAssignable_v<MT2> >
smpSubAssign( Tensor<MT1>& lhs, const Tensor<MT2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_INTERNAL_ASSERT( (~lhs).pages() == (~rhs).pages(), "Invalid number of pages" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
subAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
openmpAssign( ~lhs, ~rhs, SubAssign() );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// SCHUR PRODUCT ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP Schur product assignment to a dense tensor.
// \ingroup smp
//
// \param lhs The target left-hand side dense tensor.
// \param rhs The right-hand side tensor for the Schur product.
// \return void
//
// This function implements the default OpenMP-based SMP Schur product assignment to a dense
// tensor. Due to the explicit application of the SFINAE principle, this function can only be
// selected by the compiler in case both operands are SMP-assignable and the element types of
// both operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense tensor
, typename MT2 > // Type of the right-hand side dense tensor
inline EnableIf_t< IsDenseTensor_v<MT1> && ( !IsSMPAssignable_v<MT1> || !IsSMPAssignable_v<MT2> ) >
smpSchurAssign( Tensor<MT1>& lhs, const Tensor<MT2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_INTERNAL_ASSERT( (~lhs).pages() == (~rhs).pages(), "Invalid number of pages" );
schurAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP Schur product assignment to a dense tensor.
// \ingroup math
//
// \param lhs The target left-hand side dense tensor.
// \param rhs The right-hand side tensor for the Schur product.
// \return void
//
// This function implements the OpenMP-based SMP Schur product assignment to a dense tensor. Due
// to the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense tensor
, typename MT2 > // Type of the right-hand side dense tensor
inline EnableIf_t< IsDenseTensor_v<MT1> && IsSMPAssignable_v<MT1> && IsSMPAssignable_v<MT2> >
smpSchurAssign( Tensor<MT1>& lhs, const Tensor<MT2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_INTERNAL_ASSERT( (~lhs).pages() == (~rhs).pages(), "Invalid number of pages" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
schurAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
openmpAssign( ~lhs, ~rhs, SchurAssign() );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// MULTIPLICATION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense tensor.
// \ingroup smp
//
// \param lhs The target left-hand side dense tensor.
// \param rhs The right-hand side tensor to be multiplied.
// \return void
//
// This function implements the default OpenMP-based SMP multiplication assignment to a dense
// tensor.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense tensor
, typename MT2 > // Type of the right-hand side dense tensor
inline EnableIf_t< IsDenseTensor_v<MT1> >
smpMultAssign( Tensor<MT1>& lhs, const Tensor<MT2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_INTERNAL_ASSERT( (~lhs).pages() == (~rhs).pages(), "Invalid number of pages" );
multAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// COMPILE TIME CONSTRAINT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
namespace {
BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE );
}
/*! \endcond */
//*************************************************************************************************
} // namespace blaze
#endif
|
displacement_contact_criteria.h | // KRATOS ______ __ __ _____ __ __ __
// / ____/___ ____ / /_____ ______/ /_/ ___// /________ _______/ /___ ___________ _/ /
// / / / __ \/ __ \/ __/ __ `/ ___/ __/\__ \/ __/ ___/ / / / ___/ __/ / / / ___/ __ `/ /
// / /___/ /_/ / / / / /_/ /_/ / /__/ /_ ___/ / /_/ / / /_/ / /__/ /_/ /_/ / / / /_/ / /
// \____/\____/_/ /_/\__/\__,_/\___/\__//____/\__/_/ \__,_/\___/\__/\__,_/_/ \__,_/_/ MECHANICS
//
// License: BSD License
// license: ContactStructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_DISPLACEMENT_CONTACT_CRITERIA_H)
#define KRATOS_DISPLACEMENT_CONTACT_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "utilities/table_stream_utility.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
#include "utilities/color_utilities.h"
namespace Kratos
{
///@addtogroup ContactStructuralMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@name Kratos Classes
///@{
/**
* @class DisplacementContactCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Convergence criteria for contact problems
* @details This class implements a convergence control based on nodal displacement (for penalty contact)
* @author Vicente Mataix Ferrandiz
*/
template< class TSparseSpace,
class TDenseSpace >
class DisplacementContactCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of DisplacementContactCriteria
KRATOS_CLASS_POINTER_DEFINITION( DisplacementContactCriteria );
/// Local Flags
KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT );
KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED );
KRATOS_DEFINE_LOCAL_FLAG( ROTATION_DOF_IS_CONSIDERED );
/// The base class definition
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
/// The definition of the current class
typedef DisplacementContactCriteria< TSparseSpace, TDenseSpace > ClassType;
/// The dofs array type
typedef typename BaseType::DofsArrayType DofsArrayType;
/// The sparse matrix type
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
/// The dense vector type
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The table stream definition TODO: Replace by logger
typedef TableStreamUtility::Pointer TablePrinterPointerType;
/// The index type definition
typedef std::size_t IndexType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor.
*/
explicit DisplacementContactCriteria()
: BaseType()
{
}
/**
* @brief Default constructor. (with parameters)
* @param ThisParameters The configuration parameters
*/
explicit DisplacementContactCriteria(Kratos::Parameters ThisParameters)
: BaseType()
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
}
/**
* @brief Default constructor.
* @param DispRatioTolerance Relative tolerance for displacement error
* @param DispAbsTolerance Absolute tolerance for displacement error
* @param RotRatioTolerance Relative tolerance for rotation error
* @param RotAbsTolerance Absolute tolerance for rotation error
* @param pTable The pointer to the output table
* @param PrintingOutput If the output is going to be printed in a txt file
*/
explicit DisplacementContactCriteria(
const double DispRatioTolerance,
const double DispAbsTolerance,
const double RotRatioTolerance,
const double RotAbsTolerance,
const bool PrintingOutput = false
)
: BaseType()
{
// Set local flags
mOptions.Set(DisplacementContactCriteria::PRINTING_OUTPUT, PrintingOutput);
mOptions.Set(DisplacementContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementContactCriteria::ROTATION_DOF_IS_CONSIDERED, false);
// The displacement solution
mDispRatioTolerance = DispRatioTolerance;
mDispAbsTolerance = DispAbsTolerance;
// The rotation solution
mRotRatioTolerance = RotRatioTolerance;
mRotAbsTolerance = RotAbsTolerance;
}
// Copy constructor.
DisplacementContactCriteria( DisplacementContactCriteria const& rOther )
:BaseType(rOther)
,mOptions(rOther.mOptions)
,mDispRatioTolerance(rOther.mDispRatioTolerance)
,mDispAbsTolerance(rOther.mDispAbsTolerance)
,mRotRatioTolerance(rOther.mRotRatioTolerance)
,mRotAbsTolerance(rOther.mRotAbsTolerance)
{
}
/// Destructor.
~DisplacementContactCriteria() override = default;
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Create method
* @param ThisParameters The configuration parameters
*/
typename BaseType::Pointer Create(Parameters ThisParameters) const override
{
return Kratos::make_shared<ClassType>(ThisParameters);
}
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
if (SparseSpaceType::Size(rDx) != 0) { //if we are solving for something
// Initialize
double disp_solution_norm = 0.0, disp_increase_norm = 0.0;
IndexType disp_dof_num(0);
double rot_solution_norm = 0.0, rot_increase_norm = 0.0;
IndexType rot_dof_num(0);
// First iterator
const auto it_dof_begin = rDofSet.begin();
// Auxiliar values
std::size_t dof_id = 0;
double dof_value = 0.0, dof_incr = 0.0;
// Auxiliar displacement DoF check
const std::function<bool(const VariableData&)> check_without_rot =
[](const VariableData& rCurrVar) -> bool {return true;};
const std::function<bool(const VariableData&)> check_with_rot =
[](const VariableData& rCurrVar) -> bool {return ((rCurrVar == DISPLACEMENT_X) || (rCurrVar == DISPLACEMENT_Y) || (rCurrVar == DISPLACEMENT_Z));};
const auto* p_check_disp = (mOptions.Is(DisplacementContactCriteria::ROTATION_DOF_IS_CONSIDERED)) ? &check_with_rot : &check_without_rot;
// Loop over Dofs
#pragma omp parallel for reduction(+:disp_solution_norm,disp_increase_norm,disp_dof_num,rot_solution_norm,rot_increase_norm,rot_dof_num,dof_id,dof_value,dof_incr)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = it_dof_begin + i;
if (it_dof->IsFree()) {
dof_id = it_dof->EquationId();
dof_value = it_dof->GetSolutionStepValue(0);
dof_incr = rDx[dof_id];
const auto& r_curr_var = it_dof->GetVariable();
if ((*p_check_disp)(r_curr_var)) {
disp_solution_norm += std::pow(dof_value, 2);
disp_increase_norm += std::pow(dof_incr, 2);
++disp_dof_num;
} else {
KRATOS_DEBUG_ERROR_IF_NOT((r_curr_var == ROTATION_X) || (r_curr_var == ROTATION_Y) || (r_curr_var == ROTATION_Z)) << "Variable must be a ROTATION and it is: " << r_curr_var.Name() << std::endl;
rot_solution_norm += std::pow(dof_value, 2);
rot_increase_norm += std::pow(dof_incr, 2);
++rot_dof_num;
}
}
}
if(disp_increase_norm == 0.0) disp_increase_norm = 1.0;
if(disp_solution_norm == 0.0) disp_solution_norm = 1.0;
if(rot_increase_norm == 0.0) rot_increase_norm = 1.0;
if(rot_solution_norm == 0.0) rot_solution_norm = 1.0;
const double disp_ratio = std::sqrt(disp_increase_norm/disp_solution_norm);
const double disp_abs = std::sqrt(disp_increase_norm)/ static_cast<double>(disp_dof_num);
const double rot_ratio = std::sqrt(rot_increase_norm/rot_solution_norm);
const double rot_abs = std::sqrt(rot_increase_norm)/ static_cast<double>(rot_dof_num);
// The process info of the model part
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// We print the results // TODO: Replace for the new log
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
std::cout.precision(4);
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& Table = p_table->GetTable();
if (mOptions.Is(DisplacementContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
Table << disp_ratio << mDispRatioTolerance << disp_abs << mDispAbsTolerance << rot_ratio << mRotRatioTolerance << rot_abs << mRotAbsTolerance;
} else {
Table << disp_ratio << mDispRatioTolerance << disp_abs << mDispAbsTolerance;
}
} else {
std::cout.precision(4);
if (mOptions.IsNot(DisplacementContactCriteria::PRINTING_OUTPUT)) {
KRATOS_INFO("DisplacementContactCriteria") << BOLDFONT("DoF ONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl;
KRATOS_INFO("DisplacementContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl;
if (mOptions.Is(DisplacementContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
KRATOS_INFO("DisplacementContactCriteria") << BOLDFONT("\tROTATION: RATIO = ") << rot_ratio << BOLDFONT(" EXP.RATIO = ") << mRotRatioTolerance << BOLDFONT(" ABS = ") << rot_abs << BOLDFONT(" EXP.ABS = ") << mRotAbsTolerance << std::endl;
}
} else {
KRATOS_INFO("DisplacementContactCriteria") << "DoF ONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl;
KRATOS_INFO("DisplacementContactCriteria") << "\tDISPLACEMENT: RATIO = " << disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl;
if (mOptions.Is(DisplacementContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
KRATOS_INFO("DisplacementContactCriteria") << "\tROTATION: RATIO = " << rot_ratio << " EXP.RATIO = " << mRotRatioTolerance << " ABS = " << rot_abs << " EXP.ABS = " << mRotAbsTolerance << std::endl;
}
}
}
}
// We check if converged
const bool disp_converged = (disp_ratio <= mDispRatioTolerance || disp_abs <= mDispAbsTolerance);
const bool rot_converged = mOptions.Is(DisplacementContactCriteria::ROTATION_DOF_IS_CONSIDERED) ? (rot_ratio <= mRotRatioTolerance || rot_abs <= mRotAbsTolerance) : true;
if (disp_converged && rot_converged) {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& table = p_table->GetTable();
if (mOptions.IsNot(DisplacementContactCriteria::PRINTING_OUTPUT))
table << BOLDFONT(FGRN(" Achieved"));
else
table << "Achieved";
} else {
if (mOptions.IsNot(DisplacementContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl;
else
KRATOS_INFO("DisplacementContactCriteria") << "\tDoF convergence is achieved" << std::endl;
}
}
return true;
} else {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& table = p_table->GetTable();
if (mOptions.IsNot(DisplacementContactCriteria::PRINTING_OUTPUT))
table << BOLDFONT(FRED(" Not achieved"));
else
table << "Not achieved";
} else {
if (mOptions.IsNot(DisplacementContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl;
else
KRATOS_INFO("DisplacementContactCriteria") << "\tDoF convergence is not achieved" << std::endl;
}
}
return false;
}
}
else // In this case all the displacements are imposed!
return true;
}
/**
* @brief This function initialize the convergence criteria
* @param rModelPart Reference to the ModelPart containing the contact problem. (unused)
*/
void Initialize( ModelPart& rModelPart ) override
{
// Initialize
BaseType::mConvergenceCriteriaIsInitialized = true;
// Check rotation dof
mOptions.Set(DisplacementContactCriteria::ROTATION_DOF_IS_CONSIDERED, ContactUtilities::CheckModelPartHasRotationDoF(rModelPart));
// Initialize header
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementContactCriteria::TABLE_IS_INITIALIZED)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
r_table.AddColumn("DP RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
if (mOptions.Is(DisplacementContactCriteria::ROTATION_DOF_IS_CONSIDERED)) {
r_table.AddColumn("RT RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
}
r_table.AddColumn("CONVERGENCE", 15);
mOptions.Set(DisplacementContactCriteria::TABLE_IS_INITIALIZED, true);
}
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "displacement_contact_criteria",
"ensure_contact" : false,
"print_convergence_criterion" : false,
"displacement_relative_tolerance" : 1.0e-4,
"displacement_absolute_tolerance" : 1.0e-9,
"rotation_relative_tolerance" : 1.0e-4,
"rotation_absolute_tolerance" : 1.0e-9
})");
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "displacement_contact_criteria";
}
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "DisplacementContactCriteria";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief This method assigns settings to member variables
* @param ThisParameters Parameters that are assigned to the member variables
*/
void AssignSettings(const Parameters ThisParameters) override
{
BaseType::AssignSettings(ThisParameters);
// The displacement solution
mDispRatioTolerance = ThisParameters["displacement_relative_tolerance"].GetDouble();
mDispAbsTolerance = ThisParameters["displacement_absolute_tolerance"].GetDouble();
// The rotation solution
mRotRatioTolerance = ThisParameters["rotation_relative_tolerance"].GetDouble();
mRotAbsTolerance = ThisParameters["rotation_absolute_tolerance"].GetDouble();
// Set local flags
mOptions.Set(DisplacementContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool());
mOptions.Set(DisplacementContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementContactCriteria::ROTATION_DOF_IS_CONSIDERED, false);
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
Flags mOptions; /// Local flags
double mDispRatioTolerance; /// The ratio threshold for the norm of the displacement
double mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement
double mRotRatioTolerance; /// The ratio threshold for the norm of the rotation
double mRotAbsTolerance; /// The absolute value threshold for the norm of the rotation
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
}; // Kratos DisplacementContactCriteria
///@name Local flags creation
///@{
/// Local Flags
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementContactCriteria<TSparseSpace, TDenseSpace>::ROTATION_DOF_IS_CONSIDERED(Kratos::Flags::Create(3));
}
#endif /* KRATOS_DISPLACEMENT_CONTACT_CRITERIA_H */
|
openmpLBM.c | // http://www.caam.rice.edu/~timwar/CAAM210/Flows.html
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include "png_util.h"
#include <omp.h>
#define dfloat double
#define FLUID 0
#define WALL 1
#define NSPECIES 9
// loop up 1D array index from 2D node coordinates
int idx(int N, int n, int m){
return n + m*(N+2);
}
void lbmInput(const char *imageFileName,
dfloat threshold,
int *outN,
int *outM,
unsigned char **rgb,
unsigned char **alpha,
int **nodeType){
int n,m, N,M;
// read png file
read_png(imageFileName, &N, &M, rgb, alpha);
// pad to guarantee space around obstacle and extend the wake
int Npad = 3*N;
int Mpad = 2*M;
if(Npad>8192) Npad = 8192;
if(Mpad>8192) Mpad = 8192;
// threshold walls based on gray scale
*nodeType = (int*) calloc((Npad+2)*(Mpad+2), sizeof(int));
// mark pixels by gray scale intensity
unsigned char *rgbPad = (unsigned char*) calloc(3*(Npad+2)*(Mpad+2), sizeof(unsigned char));
unsigned char *alphaPad = (unsigned char*) calloc((Npad+2)*(Mpad+2), sizeof(unsigned char));
int wallCount = 0;
for(m=1;m<=M;++m){
for(n=1;n<=N;++n){
int offset = ((n-1)+(m-1)*N);
dfloat r = (*rgb)[3*offset+0];
dfloat g = (*rgb)[3*offset+1];
dfloat b = (*rgb)[3*offset+2];
dfloat a = (*alpha) ? (*alpha)[offset]:255;
// center image in padded region (including halo zone)
int id = idx(Npad,n+(N/4),m+(M/2));
if(a==0)
(*nodeType)[id] = FLUID;
else
(*nodeType)[id] = WALL*(sqrt(r*r+g*g+b*b)<threshold);
wallCount += (*nodeType)[id];
rgbPad[3*id+0] = r;
rgbPad[3*id+1] = g;
rgbPad[3*id+2] = b;
alphaPad[id] = 255;
}
}
for(n=1;n<=Npad;++n){
(*nodeType)[idx(Npad,n,1)] = WALL;
(*nodeType)[idx(Npad,n,Mpad)] = WALL;
}
free(*rgb); free(*alpha);
*rgb = rgbPad;
*alpha = alphaPad;
printf("wallCount = %d (%g percent of %d x %d nodes)\n", wallCount, 100.*((dfloat)wallCount/((Npad+2)*(Mpad+2))), Npad, Mpad);
*outN = Npad;
*outM = Mpad;
}
void lbmOutput(const char *fname,
const int *nodeType,
unsigned char *rgb,
unsigned char *alpha,
const dfloat c,
const dfloat dx,
int N,
int M,
const dfloat *f){
int n,m,s;
FILE *bah = fopen(fname, "w");
// compute vorticity
dfloat *Ux = (dfloat*) calloc((N+2)*(M+2), sizeof(dfloat));
dfloat *Uy = (dfloat*) calloc((N+2)*(M+2), sizeof(dfloat));
dfloat fnm[NSPECIES];
for(m=1;m<=M;++m){
for(n=1;n<=N;++n){
int base = idx(N, n, m);
for(s=0;s<NSPECIES;++s)
fnm[s] = f[base+s*(N+2)*(M+2)];
const dfloat rho = fnm[0]+fnm[1]+fnm[2]+fnm[3]+fnm[4]+fnm[5]+fnm[6]+fnm[7]+fnm[8];
// macroscopic momentum
Ux[base] = (fnm[1] - fnm[3] + fnm[5] - fnm[6] - fnm[7] + fnm[8])*c/rho;
Uy[base] = (fnm[2] - fnm[4] + fnm[5] + fnm[6] - fnm[7] - fnm[8])*c/rho;
}
}
dfloat plotMin = -4, plotMax = 4;
for(m=1;m<=M;++m){
for(n=1;n<=N;++n){
int id = idx(N,n,m);
// over write pixels in fluid region
if(nodeType[id]==FLUID){
unsigned char r,g,b,a;
// reconstruct macroscopic density
dfloat rho = 0;
for(s=0;s<NSPECIES;++s)
rho += f[id+s*(N+2)*(M+2)];
rho = ((rho-plotMin)/(plotMax-plotMin)); // rescale
dfloat dUxdy = (Ux[idx(N,n,m+1)]-Ux[idx(N,n,m-1)])/(2.*dx);
dfloat dUydx = (Uy[idx(N,n+1,m)]-Uy[idx(N,n-1,m)])/(2.*dx);
dfloat curlU = dUydx-dUxdy;
curlU = ((curlU-plotMin)/(plotMax-plotMin));
r = 255*curlU;
g = 255*curlU;
b = 255*curlU;
a = 255;
rgb[idx(N,n,m)*3+0] = r;
rgb[idx(N,n,m)*3+1] = g;
rgb[idx(N,n,m)*3+2] = b;
alpha[idx(N,n,m)] = a;
}
}
}
write_png(bah, N+2, M+2, rgb, alpha);
fclose(bah);
free(Ux);
free(Uy);
}
// weights used to compute equilibrium distribution (post collision)
const dfloat w0 = 4.f/9.f, w1 = 1.f/9.f, w2 = 1.f/9.f, w3 = 1.f/9.f;
const dfloat w4 = 1.f/9.f, w5 = 1.f/36.f, w6 = 1.f/36.f, w7 = 1.f/36.f, w8 = 1.f/36.f;
void lbmEquilibrium(const dfloat c,
const dfloat rho,
const dfloat Ux,
const dfloat Uy,
dfloat * feq){
// resolve macroscopic velocity into lattice particle velocity directions
const dfloat U2 = Ux*Ux+Uy*Uy;
const dfloat v0 = 0;
const dfloat v1 = +Ux/c;
const dfloat v2 = +Uy/c;
const dfloat v3 = -Ux/c;
const dfloat v4 = -Uy/c;
const dfloat v5 = (+Ux+Uy)/c;
const dfloat v6 = (-Ux+Uy)/c;
const dfloat v7 = (-Ux-Uy)/c;
const dfloat v8 = (+Ux-Uy)/c;
// compute LBM post-collisional
feq[0] = rho*w0*(1.f + 3.f*v0 + 4.5f*v0*v0 - 1.5f*U2/(c*c));
feq[1] = rho*w1*(1.f + 3.f*v1 + 4.5f*v1*v1 - 1.5f*U2/(c*c));
feq[2] = rho*w2*(1.f + 3.f*v2 + 4.5f*v2*v2 - 1.5f*U2/(c*c));
feq[3] = rho*w3*(1.f + 3.f*v3 + 4.5f*v3*v3 - 1.5f*U2/(c*c));
feq[4] = rho*w4*(1.f + 3.f*v4 + 4.5f*v4*v4 - 1.5f*U2/(c*c));
feq[5] = rho*w5*(1.f + 3.f*v5 + 4.5f*v5*v5 - 1.5f*U2/(c*c));
feq[6] = rho*w6*(1.f + 3.f*v6 + 4.5f*v6*v6 - 1.5f*U2/(c*c));
feq[7] = rho*w7*(1.f + 3.f*v7 + 4.5f*v7*v7 - 1.5f*U2/(c*c));
feq[8] = rho*w8*(1.f + 3.f*v8 + 4.5f*v8*v8 - 1.5f*U2/(c*c));
}
// perform lattice streaming and collision steps
void lbmUpdate(const int N, // number of nodes in x
const int M, // number of nodes in y
const dfloat c, // speed of sound
const dfloat *tau, // relaxation rate
const int * nodeType, // (N+2) x (M+2) node types
const dfloat * f, // (N+2) x (M+2) x 9 fields before streaming and collisions
dfloat * fnew){ // (N+2) x (M+2) x 9 fields after streaming and collisions
// loop counters
int n,m;
// number of nodes in whole array including halo
int Nall = (N+2)*(M+2);
// loop over all non-halo nodes in lattice
#pragma omp parallel for
for(m=1;m<M+1;++m){
for(n=1;n<=N+1;++n){
// physics paramaters
dfloat tauinv = 1.f/tau[idx(N,n,m)];
// discover type of node (WALL or FLUID)
const int nt = nodeType[idx(N,n,m)];
dfloat fnm[NSPECIES];
// OUTFLOW
if(n==N+1){
fnm[0] = f[idx(N,n, m) + 0*Nall]; // stationary
fnm[1] = f[idx(N,n-1,m) + 1*Nall]; // E bound from W
fnm[2] = f[idx(N,n,m-1) + 2*Nall]; // N bound from S
fnm[3] = f[idx(N,n,m) + 3*Nall]; // W bound from E
fnm[4] = f[idx(N,n,m+1) + 4*Nall]; // S bound from N
fnm[5] = f[idx(N,n-1,m-1) + 5*Nall]; // NE bound from SW
fnm[6] = f[idx(N,n,m-1) + 6*Nall]; // NW bound from SE
fnm[7] = f[idx(N,n,m+1) + 7*Nall]; // SW bound from NE
fnm[8] = f[idx(N,n-1,m+1) + 8*Nall]; // SE bound from NW
}
else if(nt == FLUID){
fnm[0] = f[idx(N,n, m) + 0*Nall]; // stationary
fnm[1] = f[idx(N,n-1,m) + 1*Nall]; // E bound from W
fnm[2] = f[idx(N,n,m-1) + 2*Nall]; // N bound from S
fnm[3] = f[idx(N,n+1,m) + 3*Nall]; // W bound from E
fnm[4] = f[idx(N,n,m+1) + 4*Nall]; // S bound from N
fnm[5] = f[idx(N,n-1,m-1) + 5*Nall]; // NE bound from SW
fnm[6] = f[idx(N,n+1,m-1) + 6*Nall]; // NW bound from SE
fnm[7] = f[idx(N,n+1,m+1) + 7*Nall]; // SW bound from NE
fnm[8] = f[idx(N,n-1,m+1) + 8*Nall]; // SE bound from NW
}
else{
// WALL reflects particles
fnm[0] = f[idx(N,n,m) + 0*Nall]; // stationary
fnm[1] = f[idx(N,n,m) + 3*Nall]; // E bound from W
fnm[2] = f[idx(N,n,m) + 4*Nall]; // N bound from S
fnm[3] = f[idx(N,n,m) + 1*Nall]; // W bound from E
fnm[4] = f[idx(N,n,m) + 2*Nall]; // S bound from N
fnm[5] = f[idx(N,n,m) + 7*Nall]; // NE bound from SW
fnm[6] = f[idx(N,n,m) + 8*Nall]; // NW bound from SE
fnm[7] = f[idx(N,n,m) + 5*Nall]; // SW bound from NE
fnm[8] = f[idx(N,n,m) + 6*Nall]; // SE bound from NW
}
// macroscopic density
const dfloat rho = fnm[0]+fnm[1]+fnm[2]+fnm[3]+fnm[4]+fnm[5]+fnm[6]+fnm[7]+fnm[8];
if(rho<1e-4){ printf("rho(%d,%d)=%g\n", n,m,rho); exit(-1); }
// macroscopic momentum
const dfloat delta2 = 1e-5;
const dfloat Ux = (fnm[1] - fnm[3] + fnm[5] - fnm[6] - fnm[7] + fnm[8])*c/sqrt(rho*rho+delta2);
const dfloat Uy = (fnm[2] - fnm[4] + fnm[5] + fnm[6] - fnm[7] - fnm[8])*c/sqrt(rho*rho+delta2);
// compute equilibrium distribution
dfloat feq[NSPECIES];
lbmEquilibrium(c, rho, Ux, Uy, feq);
// MRT stabilization
const dfloat g0 = 1.f, g1 = -2.f, g2 = -2.f, g3 = -2.f, g4 = -2.f;
const dfloat g5 = 4.f, g6 = 4.f, g7 = 4.f, g8 = 4.f;
const dfloat R = g0*fnm[0] + g1*fnm[1] + g2*fnm[2]+ g3*fnm[3] + g4*fnm[4] + g5*fnm[5] + g6*fnm[6] + g7*fnm[7] + g8*fnm[8];
// relax towards post collision densities
fnm[0] -= tauinv*(fnm[0]-feq[0]) + (1.f-tauinv)*w0*g0*R*0.25f;
fnm[1] -= tauinv*(fnm[1]-feq[1]) + (1.f-tauinv)*w1*g1*R*0.25f;
fnm[2] -= tauinv*(fnm[2]-feq[2]) + (1.f-tauinv)*w2*g2*R*0.25f;
fnm[3] -= tauinv*(fnm[3]-feq[3]) + (1.f-tauinv)*w3*g3*R*0.25f;
fnm[4] -= tauinv*(fnm[4]-feq[4]) + (1.f-tauinv)*w4*g4*R*0.25f;
fnm[5] -= tauinv*(fnm[5]-feq[5]) + (1.f-tauinv)*w5*g5*R*0.25f;
fnm[6] -= tauinv*(fnm[6]-feq[6]) + (1.f-tauinv)*w6*g6*R*0.25f;
fnm[7] -= tauinv*(fnm[7]-feq[7]) + (1.f-tauinv)*w7*g7*R*0.25f;
fnm[8] -= tauinv*(fnm[8]-feq[8]) + (1.f-tauinv)*w8*g8*R*0.25f;
// store new densities
const int base = idx(N,n,m);
fnew[base+0*Nall] = fnm[0];
fnew[base+1*Nall] = fnm[1];
fnew[base+2*Nall] = fnm[2];
fnew[base+3*Nall] = fnm[3];
fnew[base+4*Nall] = fnm[4];
fnew[base+5*Nall] = fnm[5];
fnew[base+6*Nall] = fnm[6];
fnew[base+7*Nall] = fnm[7];
fnew[base+8*Nall] = fnm[8];
}
}
}
void lbmCheck(int N, int M, dfloat *f){
int n;
int nanCount = 0;
for(n=0;n<NSPECIES*N*M;++n){
nanCount += isnan(f[n]);
}
if(nanCount){ printf("found %d nans\n", nanCount); exit(-1); }
}
// set initial conditions (use uniform flow f everywhere)
void lbmInitialConditions(dfloat c, int N, int M, int *nodeType, dfloat *f){
int n,m,s;
dfloat feqIC[NSPECIES];
dfloat feqWALL[NSPECIES];
dfloat rhoIC = 1.;
dfloat UxIC = 1.;
dfloat UyIC = 0.;
lbmEquilibrium(c, rhoIC, UxIC, UyIC, feqIC);
lbmEquilibrium(c, rhoIC, 0., 0., feqWALL);
for(m=0;m<=M+1;++m){
for(n=0;n<=N+1;++n){
int base = idx(N, n, m);
int s;
if(n==0){
for(s=0;s<NSPECIES;++s){
f[idx(N,n,m)+s*(N+2)*(M+2)] = feqIC[s];
}
}
else{
for(s=0;s<NSPECIES;++s){
f[idx(N,n,m)+s*(N+2)*(M+2)] = feqWALL[s];
}
}
}
}
}
void lbmRun(int N,
int M,
unsigned char *rgb,
unsigned char *alpha,
dfloat c,
dfloat dx,
dfloat *h_tau,
int *nodeType,
dfloat *f,
dfloat *fnew){
int Nsteps = 300000/2, tstep = 0, iostep = 100;
// time step
for(tstep=0;tstep<Nsteps;++tstep){
// perform two updates
lbmUpdate(N, M, c, h_tau, nodeType, f, fnew);
lbmUpdate(N, M, c, h_tau, nodeType, fnew, f);
// check for nans
lbmCheck(N, M, f);
if(!(tstep%iostep)){
printf("tstep = %d\n", tstep);
char fname[BUFSIZ];
sprintf(fname, "bah%06d.png", tstep/iostep);
lbmOutput(fname, nodeType, rgb, alpha, c, dx, N, M, f);
}
}
}
int main(int argc, char **argv){
if(argc!=3){
printf("usage: ./lbm foo.png threshold\n");
exit(-1);
}
int N, M; // size of lattice
int n,m;
// read threshold
dfloat threshold = atof(argv[2]);
char *imageFileName = strdup(argv[1]);
unsigned char *rgb, *alpha;
int *nodeType;
lbmInput(imageFileName, threshold, &N, &M, &rgb, &alpha, &nodeType);
printf("N=%d, M=%d\n", N, M);
// physical parameters
dfloat dx = .01; // lattice node spacings in x
dfloat dt = dx*.1; // time step (also determines Mach number)
dfloat c = dx/dt; // speed of sound
dfloat tau = .61; // relaxation rate
dfloat Reynolds = 2./((tau-.5)*c*c*dt/3.);
printf("Reynolds number %g\n", Reynolds);
// create lattice storage
dfloat *f = (dfloat*) calloc((N+2)*(M+2)*NSPECIES, sizeof(dfloat));
dfloat *fnew = (dfloat*) calloc((N+2)*(M+2)*NSPECIES, sizeof(dfloat));
dfloat *h_tau = (dfloat*) calloc((N+2)*(M+2), sizeof(dfloat));
// set tau based on n index
dfloat xo = .9;
for(m=0;m<=M+1;++m){
for(n=0;n<=N+1;++n){
dfloat x = ((dfloat)n)/N;
dfloat taunm = tau*(1 + 4*(1+tanh(10*(x-xo))));
h_tau[idx(N,n,m)] = taunm;
}
}
// set initial flow densities
lbmInitialConditions(c, N, M, nodeType, f);
lbmInitialConditions(c, N, M, nodeType, fnew);
// time step the LBM solver
lbmRun(N, M, rgb, alpha, c, dx, h_tau, nodeType, f, fnew);
// output result as image
lbmOutput("bahFinal.png", nodeType, rgb, alpha, c, dx, N, M, f);
exit(0);
return 0;
}
|
compare.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP AAA RRRR EEEEE %
% C O O MM MM P P A A R R E %
% C O O M M M PPPP AAAAA RRRR EEE %
% C O O M M P A A R R E %
% CCCC OOO M M P A A R R EEEEE %
% %
% %
% MagickCore Image Comparison Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/compare.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/statistic.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p a r e I m a g e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompareImageChannels() compares one or more image channels of an image
% to a reconstructed image and returns the difference image.
%
% The format of the CompareImageChannels method is:
%
% Image *CompareImageChannels(const Image *image,
% const Image *reconstruct_image,const ChannelType channel,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o channel: the channel.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
Image
*highlight_image;
highlight_image=CompareImageChannels(image,reconstruct_image,
CompositeChannels,metric,distortion,exception);
return(highlight_image);
}
static size_t GetNumberChannels(const Image *image,const ChannelType channel)
{
size_t
channels;
channels=0;
if ((channel & RedChannel) != 0)
channels++;
if ((channel & GreenChannel) != 0)
channels++;
if ((channel & BlueChannel) != 0)
channels++;
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
channels++;
if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))
channels++;
return(channels == 0 ? 1UL : channels);
}
static inline MagickBooleanType ValidateImageMorphology(
const Image *magick_restrict image,
const Image *magick_restrict reconstruct_image)
{
/*
Does the image match the reconstructed image morphology?
*/
if (GetNumberChannels(image,DefaultChannels) !=
GetNumberChannels(reconstruct_image,DefaultChannels))
return(MagickFalse);
return(MagickTrue);
}
MagickExport Image *CompareImageChannels(Image *image,
const Image *reconstruct_image,const ChannelType channel,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
CacheView
*highlight_view,
*image_view,
*reconstruct_view;
const char
*artifact;
double
fuzz;
Image
*clone_image,
*difference_image,
*highlight_image;
MagickBooleanType
status;
MagickPixelPacket
highlight,
lowlight,
zero;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (metric != PerceptualHashErrorMetric)
if (ValidateImageMorphology(image,reconstruct_image) == MagickFalse)
ThrowImageException(ImageError,"ImageMorphologyDiffers");
status=GetImageChannelDistortion(image,reconstruct_image,channel,metric,
distortion,exception);
if (status == MagickFalse)
return((Image *) NULL);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageMask(clone_image,(Image *) NULL);
difference_image=CloneImage(clone_image,0,0,MagickTrue,exception);
clone_image=DestroyImage(clone_image);
if (difference_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
highlight_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (highlight_image == (Image *) NULL)
{
difference_image=DestroyImage(difference_image);
return((Image *) NULL);
}
if (SetImageStorageClass(highlight_image,DirectClass) == MagickFalse)
{
InheritException(exception,&highlight_image->exception);
difference_image=DestroyImage(difference_image);
highlight_image=DestroyImage(highlight_image);
return((Image *) NULL);
}
(void) SetImageMask(highlight_image,(Image *) NULL);
(void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel);
(void) QueryMagickColor("#f1001ecc",&highlight,exception);
artifact=GetImageArtifact(image,"compare:highlight-color");
if (artifact != (const char *) NULL)
(void) QueryMagickColor(artifact,&highlight,exception);
(void) QueryMagickColor("#ffffffcc",&lowlight,exception);
artifact=GetImageArtifact(image,"compare:lowlight-color");
if (artifact != (const char *) NULL)
(void) QueryMagickColor(artifact,&lowlight,exception);
if (highlight_image->colorspace == CMYKColorspace)
{
ConvertRGBToCMYK(&highlight);
ConvertRGBToCMYK(&lowlight);
}
/*
Generate difference image.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
GetMagickPixelPacket(image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
highlight_view=AcquireAuthenticCacheView(highlight_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,highlight_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel,
reconstruct_pixel;
const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
const PixelPacket
*magick_restrict p,
*magick_restrict q;
IndexPacket
*magick_restrict highlight_indexes;
PixelPacket
*magick_restrict r;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
highlight_indexes=GetCacheViewAuthenticIndexQueue(highlight_view);
pixel=zero;
reconstruct_pixel=zero;
for (x=0; x < (ssize_t) columns; x++)
{
MagickStatusType
difference;
SetMagickPixelPacket(image,p,indexes+x,&pixel);
SetMagickPixelPacket(reconstruct_image,q,reconstruct_indexes+x,
&reconstruct_pixel);
difference=MagickFalse;
if (channel == CompositeChannels)
{
if (IsMagickColorSimilar(&pixel,&reconstruct_pixel) == MagickFalse)
difference=MagickTrue;
}
else
{
double
Da,
distance,
pixel,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(q) :
(QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
pixel=Sa*GetPixelRed(p)-Da*GetPixelRed(q);
distance=pixel*pixel;
if (distance >= fuzz)
difference=MagickTrue;
}
if ((channel & GreenChannel) != 0)
{
pixel=Sa*GetPixelGreen(p)-Da*GetPixelGreen(q);
distance=pixel*pixel;
if (distance >= fuzz)
difference=MagickTrue;
}
if ((channel & BlueChannel) != 0)
{
pixel=Sa*GetPixelBlue(p)-Da*GetPixelBlue(q);
distance=pixel*pixel;
if (distance >= fuzz)
difference=MagickTrue;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
pixel=(double) GetPixelOpacity(p)-GetPixelOpacity(q);
distance=pixel*pixel;
if (distance >= fuzz)
difference=MagickTrue;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
pixel=Sa*indexes[x]-Da*reconstruct_indexes[x];
distance=pixel*pixel;
if (distance >= fuzz)
difference=MagickTrue;
}
}
if (difference != MagickFalse)
SetPixelPacket(highlight_image,&highlight,r,highlight_indexes+x);
else
SetPixelPacket(highlight_image,&lowlight,r,highlight_indexes+x);
p++;
q++;
r++;
}
sync=SyncCacheViewAuthenticPixels(highlight_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
highlight_view=DestroyCacheView(highlight_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
(void) CompositeImage(difference_image,image->compose,highlight_image,0,0);
highlight_image=DestroyImage(highlight_image);
if (status == MagickFalse)
difference_image=DestroyImage(difference_image);
return(difference_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l D i s t o r t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelDistortion() compares one or more image channels of an image
% to a reconstructed image and returns the specified distortion metric.
%
% The format of the GetImageChannelDistortion method is:
%
% MagickBooleanType GetImageChannelDistortion(const Image *image,
% const Image *reconstruct_image,const ChannelType channel,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o channel: the channel.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageDistortion(Image *image,
const Image *reconstruct_image,const MetricType metric,double *distortion,
ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelDistortion(image,reconstruct_image,CompositeChannels,
metric,distortion,exception);
return(status);
}
static MagickBooleanType GetAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,double *distortion,
ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
fuzz;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
/*
Compute the absolute difference in pixels between two images.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[CompositeChannels+1];
const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
const PixelPacket
*magick_restrict p,
*magick_restrict q;
ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
distance,
pixel,
Sa;
MagickBooleanType
difference;
difference=MagickFalse;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(q) :
(QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
pixel=Sa*GetPixelRed(p)-Da*GetPixelRed(q);
distance=pixel*pixel;
if (distance >= fuzz)
{
channel_distortion[RedChannel]++;
difference=MagickTrue;
}
}
if ((channel & GreenChannel) != 0)
{
pixel=Sa*GetPixelGreen(p)-Da*GetPixelGreen(q);
distance=pixel*pixel;
if (distance >= fuzz)
{
channel_distortion[GreenChannel]++;
difference=MagickTrue;
}
}
if ((channel & BlueChannel) != 0)
{
pixel=Sa*GetPixelBlue(p)-Da*GetPixelBlue(q);
distance=pixel*pixel;
if (distance >= fuzz)
{
channel_distortion[BlueChannel]++;
difference=MagickTrue;
}
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
pixel=(double) GetPixelOpacity(p)-GetPixelOpacity(q);
distance=pixel*pixel;
if (distance >= fuzz)
{
channel_distortion[OpacityChannel]++;
difference=MagickTrue;
}
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
pixel=Sa*indexes[x]-Da*reconstruct_indexes[x];
distance=pixel*pixel;
if (distance >= fuzz)
{
channel_distortion[BlackChannel]++;
difference=MagickTrue;
}
}
if (difference != MagickFalse)
channel_distortion[CompositeChannels]++;
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetAbsoluteDistortion)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]+=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static MagickBooleanType GetFuzzDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[CompositeChannels+1];
const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
const PixelPacket
*magick_restrict p,
*magick_restrict q;
ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
distance,
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelRed(p)-Da*GetPixelRed(q));
channel_distortion[RedChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if ((channel & GreenChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelGreen(p)-Da*GetPixelGreen(q));
channel_distortion[GreenChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if ((channel & BlueChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelBlue(p)-Da*GetPixelBlue(q));
channel_distortion[BlueChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if (((channel & OpacityChannel) != 0) && ((image->matte != MagickFalse) ||
(reconstruct_image->matte != MagickFalse)))
{
distance=QuantumScale*((image->matte != MagickFalse ?
GetPixelOpacity(p) : OpaqueOpacity)-
(reconstruct_image->matte != MagickFalse ?
GetPixelOpacity(q): OpaqueOpacity));
channel_distortion[OpacityChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=QuantumScale*(Sa*GetPixelIndex(indexes+x)-
Da*GetPixelIndex(reconstruct_indexes+x));
channel_distortion[BlackChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetFuzzDistortion)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]+=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]/=((double) columns*rows);
distortion[CompositeChannels]/=(double) GetNumberChannels(image,channel);
distortion[CompositeChannels]=sqrt(distortion[CompositeChannels]);
return(status);
}
static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[CompositeChannels+1];
const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
const PixelPacket
*magick_restrict p,
*magick_restrict q;
ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
distance,
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=QuantumScale*fabs((double) (Sa*GetPixelRed(p)-Da*
GetPixelRed(q)));
channel_distortion[RedChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
if ((channel & GreenChannel) != 0)
{
distance=QuantumScale*fabs((double) (Sa*GetPixelGreen(p)-Da*
GetPixelGreen(q)));
channel_distortion[GreenChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
if ((channel & BlueChannel) != 0)
{
distance=QuantumScale*fabs((double) (Sa*GetPixelBlue(p)-Da*
GetPixelBlue(q)));
channel_distortion[BlueChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
distance=QuantumScale*fabs((double) (GetPixelOpacity(p)-(double)
GetPixelOpacity(q)));
channel_distortion[OpacityChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
distance=QuantumScale*fabs((double) (Sa*GetPixelIndex(indexes+x)-Da*
GetPixelIndex(reconstruct_indexes+x)));
channel_distortion[BlackChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanAbsoluteError)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]+=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]/=((double) columns*rows);
distortion[CompositeChannels]/=(double) GetNumberChannels(image,channel);
return(status);
}
static MagickBooleanType GetMeanErrorPerPixel(Image *image,
const Image *reconstruct_image,const ChannelType channel,double *distortion,
ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
MagickRealType
area,
gamma,
maximum_error,
mean_error;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
area=0.0;
maximum_error=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
const PixelPacket
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
distance,
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=fabs((double) (Sa*GetPixelRed(p)-Da*GetPixelRed(q)));
distortion[RedChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if ((channel & GreenChannel) != 0)
{
distance=fabs((double) (Sa*GetPixelGreen(p)-Da*GetPixelGreen(q)));
distortion[GreenChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if ((channel & BlueChannel) != 0)
{
distance=fabs((double) (Sa*GetPixelBlue(p)-Da*GetPixelBlue(q)));
distortion[BlueChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
distance=fabs((double) (GetPixelOpacity(p)-
(double) GetPixelOpacity(q)));
distortion[OpacityChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=fabs((double) (Sa*GetPixelIndex(indexes+x)-Da*
GetPixelIndex(reconstruct_indexes+x)));
distortion[BlackChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p++;
q++;
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
gamma=PerceptibleReciprocal(area);
image->error.mean_error_per_pixel=gamma*distortion[CompositeChannels];
image->error.normalized_mean_error=gamma*QuantumScale*QuantumScale*mean_error;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(status);
}
static MagickBooleanType GetMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[CompositeChannels+1];
const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
const PixelPacket
*magick_restrict p,
*magick_restrict q;
ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
distance,
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelRed(p)-Da*GetPixelRed(q));
channel_distortion[RedChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if ((channel & GreenChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelGreen(p)-Da*GetPixelGreen(q));
channel_distortion[GreenChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if ((channel & BlueChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelBlue(p)-Da*GetPixelBlue(q));
channel_distortion[BlueChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
distance=QuantumScale*(GetPixelOpacity(p)-(MagickRealType)
GetPixelOpacity(q));
channel_distortion[OpacityChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=QuantumScale*(Sa*GetPixelIndex(indexes+x)-Da*
GetPixelIndex(reconstruct_indexes+x));
channel_distortion[BlackChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanSquaredError)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]+=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]/=((double) columns*rows);
distortion[CompositeChannels]/=(double) GetNumberChannels(image,channel);
return(status);
}
static MagickBooleanType GetNormalizedCrossCorrelationDistortion(
const Image *image,const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*image_view,
*reconstruct_view;
ChannelStatistics
*image_statistics,
*reconstruct_statistics;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
area;
ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Normalize to account for variation due to lighting and exposure condition.
*/
image_statistics=GetImageChannelStatistics(image,exception);
reconstruct_statistics=GetImageChannelStatistics(reconstruct_image,exception);
if ((image_statistics == (ChannelStatistics *) NULL) ||
(reconstruct_statistics == (ChannelStatistics *) NULL))
{
if (image_statistics != (ChannelStatistics *) NULL)
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
if (reconstruct_statistics != (ChannelStatistics *) NULL)
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=1.0/((MagickRealType) columns*rows);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
const PixelPacket
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
distortion[RedChannel]+=area*QuantumScale*(Sa*GetPixelRed(p)-
image_statistics[RedChannel].mean)*(Da*GetPixelRed(q)-
reconstruct_statistics[RedChannel].mean);
if ((channel & GreenChannel) != 0)
distortion[GreenChannel]+=area*QuantumScale*(Sa*GetPixelGreen(p)-
image_statistics[GreenChannel].mean)*(Da*GetPixelGreen(q)-
reconstruct_statistics[GreenChannel].mean);
if ((channel & BlueChannel) != 0)
distortion[BlueChannel]+=area*QuantumScale*(Sa*GetPixelBlue(p)-
image_statistics[BlueChannel].mean)*(Da*GetPixelBlue(q)-
reconstruct_statistics[BlueChannel].mean);
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
distortion[OpacityChannel]+=area*QuantumScale*(
GetPixelOpacity(p)-image_statistics[OpacityChannel].mean)*
(GetPixelOpacity(q)-reconstruct_statistics[OpacityChannel].mean);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
distortion[BlackChannel]+=area*QuantumScale*(Sa*
GetPixelIndex(indexes+x)-image_statistics[BlackChannel].mean)*(Da*
GetPixelIndex(reconstruct_indexes+x)-
reconstruct_statistics[BlackChannel].mean);
p++;
q++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SimilarityImageTag,progress,rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
/*
Divide by the standard deviation.
*/
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
double
gamma;
gamma=image_statistics[i].standard_deviation*
reconstruct_statistics[i].standard_deviation;
gamma=PerceptibleReciprocal(gamma);
distortion[i]=QuantumRange*gamma*distortion[i];
}
distortion[CompositeChannels]=0.0;
if ((channel & RedChannel) != 0)
distortion[CompositeChannels]+=distortion[RedChannel]*
distortion[RedChannel];
if ((channel & GreenChannel) != 0)
distortion[CompositeChannels]+=distortion[GreenChannel]*
distortion[GreenChannel];
if ((channel & BlueChannel) != 0)
distortion[CompositeChannels]+=distortion[BlueChannel]*
distortion[BlueChannel];
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
distortion[CompositeChannels]+=distortion[OpacityChannel]*
distortion[OpacityChannel];
if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))
distortion[CompositeChannels]+=distortion[BlackChannel]*
distortion[BlackChannel];
distortion[CompositeChannels]=sqrt(distortion[CompositeChannels]/
GetNumberChannels(image,channel));
/*
Free resources.
*/
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
return(status);
}
static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[CompositeChannels+1];
const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
const PixelPacket
*magick_restrict p,
*magick_restrict q;
ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
distance,
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=QuantumScale*fabs((double) (Sa*GetPixelRed(p)-Da*
GetPixelRed(q)));
if (distance > channel_distortion[RedChannel])
channel_distortion[RedChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
if ((channel & GreenChannel) != 0)
{
distance=QuantumScale*fabs((double) (Sa*GetPixelGreen(p)-Da*
GetPixelGreen(q)));
if (distance > channel_distortion[GreenChannel])
channel_distortion[GreenChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
if ((channel & BlueChannel) != 0)
{
distance=QuantumScale*fabs((double) (Sa*GetPixelBlue(p)-Da*
GetPixelBlue(q)));
if (distance > channel_distortion[BlueChannel])
channel_distortion[BlueChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
distance=QuantumScale*fabs((double) (GetPixelOpacity(p)-(double)
GetPixelOpacity(q)));
if (distance > channel_distortion[OpacityChannel])
channel_distortion[OpacityChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=QuantumScale*fabs((double) (Sa*GetPixelIndex(indexes+x)-Da*
GetPixelIndex(reconstruct_indexes+x)));
if (distance > channel_distortion[BlackChannel])
channel_distortion[BlackChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPeakAbsoluteError)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
if (channel_distortion[i] > distortion[i])
distortion[i]=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetMeanSquaredDistortion(image,reconstruct_image,channel,distortion,
exception);
if ((channel & RedChannel) != 0)
{
if (fabs(distortion[RedChannel]) < MagickEpsilon)
distortion[RedChannel]=INFINITY;
else
distortion[RedChannel]=10.0*MagickLog10(1.0)-10.0*
MagickLog10(distortion[RedChannel]);
}
if ((channel & GreenChannel) != 0)
{
if (fabs(distortion[GreenChannel]) < MagickEpsilon)
distortion[GreenChannel]=INFINITY;
else
distortion[GreenChannel]=10.0*MagickLog10(1.0)-10.0*
MagickLog10(distortion[GreenChannel]);
}
if ((channel & BlueChannel) != 0)
{
if (fabs(distortion[BlueChannel]) < MagickEpsilon)
distortion[BlueChannel]=INFINITY;
else
distortion[BlueChannel]=10.0*MagickLog10(1.0)-10.0*
MagickLog10(distortion[BlueChannel]);
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
{
if (fabs(distortion[OpacityChannel]) < MagickEpsilon)
distortion[OpacityChannel]=INFINITY;
else
distortion[OpacityChannel]=10.0*MagickLog10(1.0)-10.0*
MagickLog10(distortion[OpacityChannel]);
}
if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))
{
if (fabs(distortion[BlackChannel]) < MagickEpsilon)
distortion[BlackChannel]=INFINITY;
else
distortion[BlackChannel]=10.0*MagickLog10(1.0)-10.0*
MagickLog10(distortion[BlackChannel]);
}
if (fabs(distortion[CompositeChannels]) < MagickEpsilon)
distortion[CompositeChannels]=INFINITY;
else
distortion[CompositeChannels]=10.0*MagickLog10(1.0)-10.0*
MagickLog10(distortion[CompositeChannels]);
return(status);
}
static MagickBooleanType GetPerceptualHashDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,double *distortion,
ExceptionInfo *exception)
{
ChannelPerceptualHash
*image_phash,
*reconstruct_phash;
double
difference;
ssize_t
i;
/*
Compute perceptual hash in the sRGB colorspace.
*/
image_phash=GetImageChannelPerceptualHash(image,exception);
if (image_phash == (ChannelPerceptualHash *) NULL)
return(MagickFalse);
reconstruct_phash=GetImageChannelPerceptualHash(reconstruct_image,exception);
if (reconstruct_phash == (ChannelPerceptualHash *) NULL)
{
image_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(image_phash);
return(MagickFalse);
}
for (i=0; i < MaximumNumberOfImageMoments; i++)
{
/*
Compute sum of moment differences squared.
*/
if ((channel & RedChannel) != 0)
{
difference=reconstruct_phash[RedChannel].P[i]-
image_phash[RedChannel].P[i];
distortion[RedChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if ((channel & GreenChannel) != 0)
{
difference=reconstruct_phash[GreenChannel].P[i]-
image_phash[GreenChannel].P[i];
distortion[GreenChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if ((channel & BlueChannel) != 0)
{
difference=reconstruct_phash[BlueChannel].P[i]-
image_phash[BlueChannel].P[i];
distortion[BlueChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse) &&
(reconstruct_image->matte != MagickFalse))
{
difference=reconstruct_phash[OpacityChannel].P[i]-
image_phash[OpacityChannel].P[i];
distortion[OpacityChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
difference=reconstruct_phash[IndexChannel].P[i]-
image_phash[IndexChannel].P[i];
distortion[IndexChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
}
/*
Compute perceptual hash in the HCLP colorspace.
*/
for (i=0; i < MaximumNumberOfImageMoments; i++)
{
/*
Compute sum of moment differences squared.
*/
if ((channel & RedChannel) != 0)
{
difference=reconstruct_phash[RedChannel].Q[i]-
image_phash[RedChannel].Q[i];
distortion[RedChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if ((channel & GreenChannel) != 0)
{
difference=reconstruct_phash[GreenChannel].Q[i]-
image_phash[GreenChannel].Q[i];
distortion[GreenChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if ((channel & BlueChannel) != 0)
{
difference=reconstruct_phash[BlueChannel].Q[i]-
image_phash[BlueChannel].Q[i];
distortion[BlueChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse) &&
(reconstruct_image->matte != MagickFalse))
{
difference=reconstruct_phash[OpacityChannel].Q[i]-
image_phash[OpacityChannel].Q[i];
distortion[OpacityChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
difference=reconstruct_phash[IndexChannel].Q[i]-
image_phash[IndexChannel].Q[i];
distortion[IndexChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
}
/*
Free resources.
*/
reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
reconstruct_phash);
image_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(image_phash);
return(MagickTrue);
}
static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,double *distortion,
ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetMeanSquaredDistortion(image,reconstruct_image,channel,distortion,
exception);
if ((channel & RedChannel) != 0)
distortion[RedChannel]=sqrt(distortion[RedChannel]);
if ((channel & GreenChannel) != 0)
distortion[GreenChannel]=sqrt(distortion[GreenChannel]);
if ((channel & BlueChannel) != 0)
distortion[BlueChannel]=sqrt(distortion[BlueChannel]);
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
distortion[OpacityChannel]=sqrt(distortion[OpacityChannel]);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
distortion[BlackChannel]=sqrt(distortion[BlackChannel]);
distortion[CompositeChannels]=sqrt(distortion[CompositeChannels]);
return(status);
}
MagickExport MagickBooleanType GetImageChannelDistortion(Image *image,
const Image *reconstruct_image,const ChannelType channel,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (metric != PerceptualHashErrorMetric)
if (ValidateImageMorphology(image,reconstruct_image) == MagickFalse)
ThrowBinaryException(ImageError,"ImageMorphologyDiffers",image->filename);
/*
Get image distortion.
*/
length=CompositeChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*sizeof(*channel_distortion));
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel,channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
}
*distortion=channel_distortion[CompositeChannels];
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
(void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(),
*distortion);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l D i s t o r t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelDistortions() compares the image channels of an image to a
% reconstructed image and returns the specified distortion metric for each
% channel.
%
% The format of the GetImageChannelDistortions method is:
%
% double *GetImageChannelDistortions(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport double *GetImageChannelDistortions(Image *image,
const Image *reconstruct_image,const MetricType metric,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (metric != PerceptualHashErrorMetric)
if (ValidateImageMorphology(image,reconstruct_image) == MagickFalse)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ImageError,"ImageMorphologyDiffers","`%s'",image->filename);
return((double *) NULL);
}
/*
Get image distortion.
*/
length=CompositeChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*
sizeof(*channel_distortion));
status=MagickTrue;
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,CompositeChannels,
channel_distortion,exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,CompositeChannels,
channel_distortion,exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case MeanErrorPerPixelMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,CompositeChannels,
channel_distortion,exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,CompositeChannels,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
}
if (status == MagickFalse)
{
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
return((double *) NULL);
}
return(channel_distortion);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e s E q u a l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImagesEqual() measures the difference between colors at each pixel
% location of two images. A value other than 0 means the colors match
% exactly. Otherwise an error measure is computed by summing over all
% pixels in an image the distance squared in RGB space between each image
% pixel and its corresponding pixel in the reconstruct image. The error
% measure is assigned to these image members:
%
% o mean_error_per_pixel: The mean error for any single pixel in
% the image.
%
% o normalized_mean_error: The normalized mean quantization error for
% any single pixel in the image. This distance measure is normalized to
% a range between 0 and 1. It is independent of the range of red, green,
% and blue values in the image.
%
% o normalized_maximum_error: The normalized maximum quantization
% error for any single pixel in the image. This distance measure is
% normalized to a range between 0 and 1. It is independent of the range
% of red, green, and blue values in your image.
%
% A small normalized mean square error, accessed as
% image->normalized_mean_error, suggests the images are very similar in
% spatial layout and color.
%
% The format of the IsImagesEqual method is:
%
% MagickBooleanType IsImagesEqual(Image *image,
% const Image *reconstruct_image)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
*/
MagickExport MagickBooleanType IsImagesEqual(Image *image,
const Image *reconstruct_image)
{
CacheView
*image_view,
*reconstruct_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickRealType
area,
gamma,
maximum_error,
mean_error,
mean_error_per_pixel;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
exception=(&image->exception);
if (ValidateImageMorphology(image,reconstruct_image) == MagickFalse)
ThrowBinaryException(ImageError,"ImageMorphologyDiffers",image->filename);
area=0.0;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
const PixelPacket
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
distance;
distance=fabs((double) (GetPixelRed(p)-(double) GetPixelRed(q)));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
distance=fabs((double) (GetPixelGreen(p)-(double) GetPixelGreen(q)));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
distance=fabs((double) (GetPixelBlue(p)-(double) GetPixelBlue(q)));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
if (image->matte != MagickFalse)
{
distance=fabs((double) (GetPixelOpacity(p)-(double)
GetPixelOpacity(q)));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if ((image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=fabs((double) (GetPixelIndex(indexes+x)-(double)
GetPixelIndex(reconstruct_indexes+x)));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p++;
q++;
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
gamma=PerceptibleReciprocal(area);
image->error.mean_error_per_pixel=gamma*mean_error_per_pixel;
image->error.normalized_mean_error=gamma*QuantumScale*QuantumScale*mean_error;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i m i l a r i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SimilarityImage() compares the reference image of the image and returns the
% best match offset. In addition, it returns a similarity image such that an
% exact match location is completely white and if none of the pixels match,
% black, otherwise some gray level in-between.
%
% The format of the SimilarityImageImage method is:
%
% Image *SimilarityImage(const Image *image,const Image *reference,
% RectangleInfo *offset,double *similarity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o the best match offset of the reference image within the image.
%
% o similarity: the computed similarity between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double GetSimilarityMetric(const Image *image,const Image *reference,
const MetricType metric,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
double
distortion;
Image
*similarity_image;
MagickBooleanType
status;
RectangleInfo
geometry;
SetGeometry(reference,&geometry);
geometry.x=x_offset;
geometry.y=y_offset;
similarity_image=CropImage(image,&geometry,exception);
if (similarity_image == (Image *) NULL)
return(0.0);
distortion=0.0;
status=GetImageDistortion(similarity_image,reference,metric,&distortion,
exception);
(void) status;
similarity_image=DestroyImage(similarity_image);
return(distortion);
}
MagickExport Image *SimilarityImage(Image *image,const Image *reference,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
Image
*similarity_image;
similarity_image=SimilarityMetricImage(image,reference,
RootMeanSquaredErrorMetric,offset,similarity_metric,exception);
return(similarity_image);
}
MagickExport Image *SimilarityMetricImage(Image *image,const Image *reference,
const MetricType metric,RectangleInfo *offset,double *similarity_metric,
ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*similarity_view;
const char
*artifact;
double
similarity_threshold;
Image
*similarity_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(offset != (RectangleInfo *) NULL);
SetGeometry(reference,offset);
*similarity_metric=MagickMaximumValue;
if (ValidateImageMorphology(image,reference) == MagickFalse)
ThrowImageException(ImageError,"ImageMorphologyDiffers");
similarity_image=CloneImage(image,image->columns-reference->columns+1,
image->rows-reference->rows+1,MagickTrue,exception);
if (similarity_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(similarity_image,DirectClass) == MagickFalse)
{
InheritException(exception,&similarity_image->exception);
similarity_image=DestroyImage(similarity_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel);
/*
Measure similarity of reference image against image.
*/
similarity_threshold=(-1.0);
artifact=GetImageArtifact(image,"compare:similarity-threshold");
if (artifact != (const char *) NULL)
similarity_threshold=StringToDouble(artifact,(char **) NULL);
status=MagickTrue;
progress=0;
similarity_view=AcquireVirtualCacheView(similarity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
shared(progress,status,similarity_metric) \
magick_number_threads(image,image,image->rows-reference->rows+1,1)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++)
{
double
similarity;
ssize_t
x;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
continue;
q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns,
1,exception);
if (q == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
break;
similarity=GetSimilarityMetric(image,reference,metric,x,y,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
if ((metric == NormalizedCrossCorrelationErrorMetric) ||
(metric == UndefinedErrorMetric))
similarity=1.0-similarity;
if (similarity < *similarity_metric)
{
*similarity_metric=similarity;
offset->x=x;
offset->y=y;
}
if (metric == PerceptualHashErrorMetric)
similarity=MagickMin(0.01*similarity,1.0);
SetPixelRed(q,ClampToQuantum(QuantumRange-QuantumRange*similarity));
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
q++;
}
if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SimilarityImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
similarity_view=DestroyCacheView(similarity_view);
if (status == MagickFalse)
similarity_image=DestroyImage(similarity_image);
return(similarity_image);
}
|
dpado.202001292043.MAX_UNWEIGHTED_DIST_define.h | //
// Created by Zhen Peng on 1/6/20.
//
#ifndef PADO_DPADO_H
#define PADO_DPADO_H
#include <vector>
//#include <unordered_map>
#include <map>
#include <algorithm>
#include <iostream>
#include <limits.h>
//#include <xmmintrin.h>
#include <immintrin.h>
#include <bitset>
#include <math.h>
#include <fstream>
#include <omp.h>
#include "globals.h"
#include "dglobals.h"
#include "dgraph.h"
namespace PADO {
template <VertexID BATCH_SIZE = 1024>
class DistBVCPLL {
private:
static const VertexID BITPARALLEL_SIZE = 50;
const inti THRESHOLD_PARALLEL = 0;
// Structure for the type of label
struct IndexType {
struct Batch {
// VertexID batch_id; // Batch ID
VertexID start_index; // Index to the array distances where the batch starts
VertexID size; // Number of distances element in this batch
Batch() = default;
Batch(VertexID start_index_, VertexID size_):
start_index(start_index_), size(size_)
{ }
// Batch(VertexID batch_id_, VertexID start_index_, VertexID size_):
// batch_id(batch_id_), start_index(start_index_), size(size_)
// { }
};
struct DistanceIndexType {
VertexID start_index; // Index to the array vertices where the same-distance vertices start
VertexID size; // Number of the same-distance vertices
UnweightedDist dist; // The real distance
DistanceIndexType() = default;
DistanceIndexType(VertexID start_index_, VertexID size_, UnweightedDist dist_):
start_index(start_index_), size(size_), dist(dist_)
{ }
};
// Bit-parallel Labels
UnweightedDist bp_dist[BITPARALLEL_SIZE];
uint64_t bp_sets[BITPARALLEL_SIZE][2]; // [0]: S^{-1}, [1]: S^{0}
std::vector<Batch> batches; // Batch info
std::vector<DistanceIndexType> distances; // Distance info
std::vector<VertexID> vertices; // Vertices in the label, presented as temporary ID
size_t get_size_in_bytes() const
{
return sizeof(bp_dist) +
sizeof(bp_sets) +
// batches.size() * sizeof(Batch) +
distances.size() * sizeof(DistanceIndexType) +
vertices.size() * sizeof(VertexID);
}
void clean_all_indices()
{
std::vector<Batch>().swap(batches);
std::vector<DistanceIndexType>().swap(distances);
std::vector<VertexID>().swap(vertices);
}
}; //__attribute__((aligned(64)));
struct ShortIndex {
// I use BATCH_SIZE + 1 bit for indicator bit array.
// The v.indicator[BATCH_SIZE] is set if in current batch v has got any new labels already.
// In this way, it helps update_label_indices() and can be reset along with other indicator elements.
// std::bitset<BATCH_SIZE + 1> indicator; // Global indicator, indicator[r] (0 <= r < BATCH_SIZE) is set means root r once selected as candidate already
// If the Batch structure is not used, the indicator could just be BATCH_SIZE long.
// std::vector<uint8_t> indicator = std::vector<uint8_t>(BATCH_SIZE, 0);
std::vector<uint8_t> indicator = std::vector<uint8_t>(BATCH_SIZE + 1, 0);
// Use a queue to store candidates
std::vector<VertexID> candidates_que = std::vector<VertexID>(BATCH_SIZE);
VertexID end_candidates_que = 0;
std::vector<uint8_t> is_candidate = std::vector<uint8_t>(BATCH_SIZE, 0);
void indicator_reset()
{
std::fill(indicator.begin(), indicator.end(), 0);
}
}; //__attribute__((aligned(64)));
// Type of Bit-Parallel Label
struct BPLabelType {
UnweightedDist bp_dist[BITPARALLEL_SIZE] = { 0 };
uint64_t bp_sets[BITPARALLEL_SIZE][2] = { {0} }; // [0]: S^{-1}, [1]: S^{0}
};
// Type of Label Message Unit, for initializing distance table
struct LabelTableUnit {
VertexID root_id;
VertexID label_global_id;
UnweightedDist dist;
LabelTableUnit() = default;
LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) :
root_id(r), label_global_id(l), dist(d) {}
};
// Type of BitParallel Label Message Unit for initializing bit-parallel labels
struct MsgBPLabel {
VertexID r_root_id;
UnweightedDist bp_dist[BITPARALLEL_SIZE];
uint64_t bp_sets[BITPARALLEL_SIZE][2];
MsgBPLabel() = default;
MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2])
: r_root_id(r)
{
memcpy(bp_dist, dist, sizeof(bp_dist));
memcpy(bp_sets, sets, sizeof(bp_sets));
}
};
VertexID num_v = 0;
VertexID num_masters = 0;
// VertexID BATCH_SIZE = 0;
int host_id = 0;
int num_hosts = 0;
MPI_Datatype V_ID_Type;
std::vector<IndexType> L;
inline void bit_parallel_push_labels(
const DistGraph &G,
VertexID v_global,
// std::vector<VertexID> &tmp_que,
// VertexID &end_tmp_que,
// std::vector< std::pair<VertexID, VertexID> > &sibling_es,
// VertexID &num_sibling_es,
// std::vector< std::pair<VertexID, VertexID> > &child_es,
// VertexID &num_child_es,
std::vector<VertexID> &tmp_q,
VertexID &size_tmp_q,
std::vector< std::pair<VertexID, VertexID> > &tmp_sibling_es,
VertexID &size_tmp_sibling_es,
std::vector< std::pair<VertexID, VertexID> > &tmp_child_es,
VertexID &size_tmp_child_es,
const VertexID &offset_tmp_q,
std::vector<UnweightedDist> &dists,
UnweightedDist iter);
inline void bit_parallel_labeling(
const DistGraph &G,
std::vector<uint8_t> &used_bp_roots);
// inline void bit_parallel_push_labels(
// const DistGraph &G,
// VertexID v_global,
// std::vector<VertexID> &tmp_que,
// VertexID &end_tmp_que,
// std::vector< std::pair<VertexID, VertexID> > &sibling_es,
// VertexID &num_sibling_es,
// std::vector< std::pair<VertexID, VertexID> > &child_es,
// VertexID &num_child_es,
// std::vector<UnweightedDist> &dists,
// UnweightedDist iter);
// inline void bit_parallel_labeling(
// const DistGraph &G,
//// std::vector<IndexType> &L,
// std::vector<uint8_t> &used_bp_roots);
inline void batch_process(
const DistGraph &G,
// const VertexID b_id,
const VertexID roots_start,
const VertexID roots_size,
const std::vector<uint8_t> &used_bp_roots,
std::vector<VertexID> &active_queue,
VertexID &end_active_queue,
std::vector<VertexID> &got_candidates_queue,
VertexID &end_got_candidates_queue,
std::vector<ShortIndex> &short_index,
std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::vector<VertexID> > &recved_dist_table,
std::vector<BPLabelType> &bp_labels_table,
std::vector<uint8_t> &got_candidates,
// std::vector<bool> &got_candidates,
std::vector<uint8_t> &is_active,
// std::vector<bool> &is_active,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated);
// std::vector<bool> &once_candidated);
inline VertexID initialization(
const DistGraph &G,
std::vector<ShortIndex> &short_index,
std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::vector<VertexID> > &recved_dist_table,
std::vector<BPLabelType> &bp_labels_table,
std::vector<VertexID> &active_queue,
VertexID &end_active_queue,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
// std::vector<bool> &once_candidated,
// VertexID b_id,
VertexID roots_start,
VertexID roots_size,
// std::vector<VertexID> &roots_master_local,
const std::vector<uint8_t> &used_bp_roots);
// inline void push_single_label(
// VertexID v_head_global,
// VertexID label_root_id,
// VertexID roots_start,
// const DistGraph &G,
// std::vector<ShortIndex> &short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
// std::vector<bool> &got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
// std::vector<bool> &once_candidated,
// const std::vector<BPLabelType> &bp_labels_table,
// const std::vector<uint8_t> &used_bp_roots,
// UnweightedDist iter);
inline void schedule_label_pushing_para(
const DistGraph &G,
const VertexID roots_start,
const std::vector<uint8_t> &used_bp_roots,
const std::vector<VertexID> &active_queue,
const VertexID global_start,
const VertexID global_size,
const VertexID local_size,
// const VertexID start_active_queue,
// const VertexID size_active_queue,
std::vector<VertexID> &got_candidates_queue,
VertexID &end_got_candidates_queue,
std::vector<ShortIndex> &short_index,
const std::vector<BPLabelType> &bp_labels_table,
std::vector<uint8_t> &got_candidates,
std::vector<uint8_t> &is_active,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
const UnweightedDist iter);
inline void local_push_labels_seq(
VertexID v_head_global,
EdgeID start_index,
EdgeID bound_index,
VertexID roots_start,
const std::vector<VertexID> &labels_buffer,
const DistGraph &G,
std::vector<ShortIndex> &short_index,
std::vector<VertexID> &got_candidates_queue,
VertexID &end_got_candidates_queue,
std::vector<uint8_t> &got_candidates,
// std::vector<bool> &got_candidates,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
// std::vector<bool> &once_candidated,
const std::vector<BPLabelType> &bp_labels_table,
const std::vector<uint8_t> &used_bp_roots,
const UnweightedDist iter);
inline void local_push_labels_para(
const VertexID v_head_global,
const EdgeID start_index,
const EdgeID bound_index,
const VertexID roots_start,
const std::vector<VertexID> &labels_buffer,
const DistGraph &G,
std::vector<ShortIndex> &short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
std::vector<VertexID> &tmp_got_candidates_queue,
VertexID &size_tmp_got_candidates_queue,
const VertexID offset_tmp_queue,
std::vector<uint8_t> &got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
std::vector<VertexID> &tmp_once_candidated_queue,
VertexID &size_tmp_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
const std::vector<BPLabelType> &bp_labels_table,
const std::vector<uint8_t> &used_bp_roots,
const UnweightedDist iter);
// inline void local_push_labels(
// VertexID v_head_local,
// VertexID roots_start,
// const DistGraph &G,
// std::vector<ShortIndex> &short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
// std::vector<bool> &got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
// std::vector<bool> &once_candidated,
// const std::vector<BPLabelType> &bp_labels_table,
// const std::vector<uint8_t> &used_bp_roots,
// UnweightedDist iter);
inline void schedule_label_inserting_para(
const DistGraph &G,
const VertexID roots_start,
const VertexID roots_size,
std::vector<ShortIndex> &short_index,
const std::vector< std::vector<UnweightedDist> > &dist_table,
const std::vector<VertexID> &got_candidates_queue,
const VertexID start_got_candidates_queue,
const VertexID size_got_candidates_queue,
std::vector<uint8_t> &got_candidates,
std::vector<VertexID> &active_queue,
VertexID &end_active_queue,
std::vector<uint8_t> &is_active,
std::vector< std::pair<VertexID, VertexID> > &buffer_send,
const VertexID iter);
inline bool distance_query(
VertexID cand_root_id,
VertexID v_id,
VertexID roots_start,
// const std::vector<IndexType> &L,
const std::vector< std::vector<UnweightedDist> > &dist_table,
UnweightedDist iter);
inline void insert_label_only_seq(
VertexID cand_root_id,
// VertexID cand_root_id,
VertexID v_id_local,
VertexID roots_start,
VertexID roots_size,
const DistGraph &G,
// std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::pair<VertexID, VertexID> > &buffer_send);
// UnweightedDist iter);
inline void insert_label_only_para(
VertexID cand_root_id,
VertexID v_id_local,
VertexID roots_start,
VertexID roots_size,
const DistGraph &G,
// std::vector< std::pair<VertexID, VertexID> > &buffer_send)
std::vector< std::pair<VertexID, VertexID> > &tmp_buffer_send,
EdgeID &size_tmp_buffer_send,
const EdgeID offset_tmp_buffer_send);
inline void update_label_indices(
const VertexID v_id,
const VertexID inserted_count,
// std::vector<IndexType> &L,
std::vector<ShortIndex> &short_index,
// VertexID b_id,
const UnweightedDist iter);
inline void reset_at_end(
const DistGraph &G,
// VertexID roots_start,
// const std::vector<VertexID> &roots_master_local,
std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::vector<VertexID> > &recved_dist_table,
std::vector<BPLabelType> &bp_labels_table,
const std::vector<VertexID> &once_candidated_queue,
const VertexID end_once_candidated_queue);
// template <typename E_T, typename F>
// inline void every_host_bcasts_buffer_and_proc(
// std::vector<E_T> &buffer_send,
// F &fun);
template <typename E_T>
inline void one_host_bcasts_buffer_to_buffer(
int root,
std::vector<E_T> &buffer_send,
std::vector<E_T> &buffer_recv);
// // Function: get the destination host id which is i hop from this host.
// // For example, 1 hop from host 2 is host 0 (assume total 3 hosts);
// // -1 hop from host 0 is host 2.
// int hop_2_me_host_id(int hop) const
// {
// assert(hop >= -(num_hosts - 1) && hop < num_hosts && hop != 0);
// return (host_id + hop + num_hosts) % num_hosts;
// }
// // Function: get the destination host id which is i hop from the root.
// // For example, 1 hop from host 2 is host 0 (assume total 3 hosts);
// // -1 hop from host 0 is host 2.
// int hop_2_root_host_id(int hop, int root) const
// {
// assert(hop >= -(num_hosts - 1) && hop < num_hosts && hop != 0);
// assert(root >= 0 && root < num_hosts);
// return (root + hop + num_hosts) % num_hosts;
// }
size_t get_index_size()
{
size_t bytes = 0;
for (VertexID v_i = 0; v_i < num_masters; ++v_i) {
bytes += L[v_i].get_size_in_bytes();
}
return bytes;
}
// Test only
// uint64_t normal_hit_count = 0;
// uint64_t bp_hit_count = 0;
// uint64_t total_check_count = 0;
// uint64_t normal_check_count = 0;
// uint64_t total_candidates_num = 0;
// uint64_t set_candidates_num = 0;
// double initializing_time = 0;
// double candidating_time = 0;
// double adding_time = 0;
// double distance_query_time = 0;
// double init_index_time = 0;
// double init_dist_matrix_time = 0;
// double init_start_reset_time = 0;
// double init_indicators_time = 0;
//L2CacheMissRate cache_miss;
// double message_time = 0;
// double bp_labeling_time = 0;
// double initializing_time = 0;
// double scatter_time = 0;
// double gather_time = 0;
// double clearup_time = 0;
// TotalInstructsExe candidating_ins_count;
// TotalInstructsExe adding_ins_count;
// TotalInstructsExe bp_labeling_ins_count;
// TotalInstructsExe bp_checking_ins_count;
// TotalInstructsExe dist_query_ins_count;
// uint64_t caller_line = 0;
// End test
public:
// std::pair<uint64_t, uint64_t> length_larger_than_16 = std::make_pair(0, 0);
DistBVCPLL() = default;
explicit DistBVCPLL(
const DistGraph &G);
// UnweightedDist dist_distance_query_pair(
// VertexID a_global,
// VertexID b_global,
// const DistGraph &G);
}; // class DistBVCPLL
template <VertexID BATCH_SIZE>
DistBVCPLL<BATCH_SIZE>::
DistBVCPLL(
const DistGraph &G)
{
num_v = G.num_v;
assert(num_v >= BATCH_SIZE);
num_masters = G.num_masters;
host_id = G.host_id;
// {
// if (1 == host_id) {
// volatile int i = 0;
// while (i == 0) {
// sleep(5);
// }
// }
// }
num_hosts = G.num_hosts;
V_ID_Type = G.V_ID_Type;
// L.resize(num_v);
L.resize(num_masters);
VertexID remainer = num_v % BATCH_SIZE;
VertexID b_i_bound = num_v / BATCH_SIZE;
std::vector<uint8_t> used_bp_roots(num_v, 0);
//cache_miss.measure_start();
double time_labeling = -WallTimer::get_time_mark();
// bp_labeling_time -= WallTimer::get_time_mark();
bit_parallel_labeling(G,
used_bp_roots);
// bp_labeling_time += WallTimer::get_time_mark();
{//test
//#ifdef DEBUG_MESSAGES_ON
if (0 == host_id) {
printf("host_id: %u bp_labeling_finished.\n", host_id);
}
//#endif
}
std::vector<VertexID> active_queue(num_masters); // Any vertex v who is active should be put into this queue.
VertexID end_active_queue = 0;
std::vector<uint8_t> is_active(num_masters, false);// is_active[v] is true means vertex v is in the active queue.
// std::vector<bool> is_active(num_masters, false);// is_active[v] is true means vertex v is in the active queue.
std::vector<VertexID> got_candidates_queue(num_masters); // Any vertex v who got candidates should be put into this queue.
VertexID end_got_candidates_queue = 0;
std::vector<uint8_t> got_candidates(num_masters, false); // got_candidates[v] is true means vertex v is in the queue got_candidates_queue
// std::vector<bool> got_candidates(num_masters, false); // got_candidates[v] is true means vertex v is in the queue got_candidates_queue
std::vector<ShortIndex> short_index(num_masters);
std::vector< std::vector<UnweightedDist> > dist_table(BATCH_SIZE, std::vector<UnweightedDist>(num_v, MAX_UNWEIGHTED_DIST));
std::vector<VertexID> once_candidated_queue(num_masters); // if short_index[v].indicator.any() is true, v is in the queue.
// Used mainly for resetting short_index[v].indicator.
VertexID end_once_candidated_queue = 0;
std::vector<uint8_t> once_candidated(num_masters, false);
// std::vector<bool> once_candidated(num_masters, false);
std::vector< std::vector<VertexID> > recved_dist_table(BATCH_SIZE); // Some distances are from other hosts. This is used to reset the dist_table.
std::vector<BPLabelType> bp_labels_table(BATCH_SIZE); // All roots' bit-parallel labels
//printf("b_i_bound: %u\n", b_i_bound);//test
for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) {
// {
////#ifdef DEBUG_MESSAGES_ON
// if (b_i % 4000 == 0 && 0 == host_id) {
if (0 == host_id) {
printf("b_i: %u\n", b_i);//test
}
////#endif
// }
batch_process(
G,
// b_i,
b_i * BATCH_SIZE,
BATCH_SIZE,
// L,
used_bp_roots,
active_queue,
end_active_queue,
got_candidates_queue,
end_got_candidates_queue,
short_index,
dist_table,
recved_dist_table,
bp_labels_table,
got_candidates,
is_active,
once_candidated_queue,
end_once_candidated_queue,
once_candidated);
// exit(EXIT_SUCCESS); //test
}
if (remainer != 0) {
{
//#ifdef DEBUG_MESSAGES_ON
if (0 == host_id) {
printf("b_i: %u\n", b_i_bound);//test
}
//#endif
}
batch_process(
G,
// b_i_bound,
b_i_bound * BATCH_SIZE,
remainer,
// L,
used_bp_roots,
active_queue,
end_active_queue,
got_candidates_queue,
end_got_candidates_queue,
short_index,
dist_table,
recved_dist_table,
bp_labels_table,
got_candidates,
is_active,
once_candidated_queue,
end_once_candidated_queue,
once_candidated);
}
time_labeling += WallTimer::get_time_mark();
//cache_miss.measure_stop();
// Test
setlocale(LC_NUMERIC, "");
if (0 == host_id) {
printf("BATCH_SIZE: %u ", BATCH_SIZE);
printf("BP_Size: %u THRESHOLD_PARALLEL: %u\n", BITPARALLEL_SIZE, THRESHOLD_PARALLEL);
}
{// Total Number of Labels
EdgeID local_num_labels = 0;
for (VertexID v_global = 0; v_global < num_v; ++v_global) {
if (G.get_master_host_id(v_global) != host_id) {
continue;
}
local_num_labels += L[G.get_local_vertex_id(v_global)].vertices.size();
}
EdgeID global_num_labels;
MPI_Allreduce(&local_num_labels,
&global_num_labels,
1,
MPI_Instance::get_mpi_datatype<EdgeID>(),
MPI_SUM,
MPI_COMM_WORLD);
// printf("host_id: %u local_num_labels: %lu %.2f%%\n", host_id, local_num_labels, 100.0 * local_num_labels / global_num_labels);
MPI_Barrier(MPI_COMM_WORLD);
if (0 == host_id) {
printf("Global_num_labels: %lu average: %f\n", global_num_labels, 1.0 * global_num_labels / num_v);
}
// VertexID local_num_batches = 0;
// VertexID local_num_distances = 0;
//// double local_avg_distances_per_batches = 0;
// for (VertexID v_global = 0; v_global < num_v; ++v_global) {
// if (G.get_master_host_id(v_global) != host_id) {
// continue;
// }
// VertexID v_local = G.get_local_vertex_id(v_global);
// local_num_batches += L[v_local].batches.size();
// local_num_distances += L[v_local].distances.size();
//// double avg_d_p_b = 0;
//// for (VertexID i_b = 0; i_b < L[v_local].batches.size(); ++i_b) {
//// avg_d_p_b += L[v_local].batches[i_b].size;
//// }
//// avg_d_p_b /= L[v_local].batches.size();
//// local_avg_distances_per_batches += avg_d_p_b;
// }
//// local_avg_distances_per_batches /= num_masters;
//// double local_avg_batches = local_num_batches * 1.0 / num_masters;
//// double local_avg_distances = local_num_distances * 1.0 / num_masters;
// uint64_t global_num_batches = 0;
// uint64_t global_num_distances = 0;
// MPI_Allreduce(
// &local_num_batches,
// &global_num_batches,
// 1,
// MPI_UINT64_T,
// MPI_SUM,
// MPI_COMM_WORLD);
//// global_avg_batches /= num_hosts;
// MPI_Allreduce(
// &local_num_distances,
// &global_num_distances,
// 1,
// MPI_UINT64_T,
// MPI_SUM,
// MPI_COMM_WORLD);
//// global_avg_distances /= num_hosts;
// double global_avg_d_p_b = global_num_distances * 1.0 / global_num_batches;
// double global_avg_l_p_d = global_num_labels * 1.0 / global_num_distances;
// double global_avg_batches = global_num_batches / num_v;
// double global_avg_distances = global_num_distances / num_v;
//// MPI_Allreduce(
//// &local_avg_distances_per_batches,
//// &global_avg_d_p_b,
//// 1,
//// MPI_DOUBLE,
//// MPI_SUM,
//// MPI_COMM_WORLD);
//// global_avg_d_p_b /= num_hosts;
// MPI_Barrier(MPI_COMM_WORLD);
// if (0 == host_id) {
// printf("global_avg_batches: %f "
// "global_avg_distances: %f "
// "global_avg_distances_per_batch: %f "
// "global_avg_labels_per_distance: %f\n",
// global_avg_batches,
// global_avg_distances,
// global_avg_d_p_b,
// global_avg_l_p_d);
// }
}
// printf("BP_labeling: %f %.2f%%\n", bp_labeling_time, bp_labeling_time / time_labeling * 100);
// printf("Initializing: %f %.2f%%\n", initializing_time, initializing_time / time_labeling * 100);
// printf("\tinit_start_reset_time: %f (%f%%)\n", init_start_reset_time, init_start_reset_time / initializing_time * 100);
// printf("\tinit_index_time: %f (%f%%)\n", init_index_time, init_index_time / initializing_time * 100);
// printf("\t\tinit_indicators_time: %f (%f%%)\n", init_indicators_time, init_indicators_time / init_index_time * 100);
// printf("\tinit_dist_matrix_time: %f (%f%%)\n", init_dist_matrix_time, init_dist_matrix_time / initializing_time * 100);
// printf("Candidating: %f %.2f%%\n", candidating_time, candidating_time / time_labeling * 100);
// printf("Adding: %f %.2f%%\n", adding_time, adding_time / time_labeling * 100);
// printf("distance_query_time: %f %.2f%%\n", distance_query_time, distance_query_time / time_labeling * 100);
// uint64_t total_check_count = bp_hit_count + normal_check_count;
// printf("total_check_count: %'llu\n", total_check_count);
// printf("bp_hit_count: %'llu %.2f%%\n",
// bp_hit_count,
// bp_hit_count * 100.0 / total_check_count);
// printf("normal_check_count: %'llu %.2f%%\n", normal_check_count, normal_check_count * 100.0 / total_check_count);
// printf("total_candidates_num: %'llu set_candidates_num: %'llu %.2f%%\n",
// total_candidates_num,
// set_candidates_num,
// set_candidates_num * 100.0 / total_candidates_num);
// printf("\tnormal_hit_count (to total_check, to normal_check): %llu (%f%%, %f%%)\n",
// normal_hit_count,
// normal_hit_count * 100.0 / total_check_count,
// normal_hit_count * 100.0 / (total_check_count - bp_hit_count));
//cache_miss.print();
// printf("Candidating: "); candidating_ins_count.print();
// printf("Adding: "); adding_ins_count.print();
// printf("BP_Labeling: "); bp_labeling_ins_count.print();
// printf("BP_Checking: "); bp_checking_ins_count.print();
// printf("distance_query: "); dist_query_ins_count.print();
// if (0 == host_id) {
// printf("num_hosts: %u host_id: %u\n"
// "Local_labeling_time: %.2f seconds\n"
// "bp_labeling_time: %.2f %.2f%%\n"
// "initializing_time: %.2f %.2f%%\n"
// "scatter_time: %.2f %.2f%%\n"
// "gather_time: %.2f %.2f%%\n"
// "clearup_time: %.2f %.2f%%\n"
// "message_time: %.2f %.2f%%\n",
// num_hosts, host_id,
// time_labeling,
// bp_labeling_time, 100.0 * bp_labeling_time / time_labeling,
// initializing_time, 100.0 * initializing_time / time_labeling,
// scatter_time, 100.0 * scatter_time / time_labeling,
// gather_time, 100.0 * gather_time / time_labeling,
// clearup_time, 100.0 * clearup_time / time_labeling,
// message_time, 100.0 * message_time / time_labeling);
// }
double global_time_labeling;
MPI_Allreduce(&time_labeling,
&global_time_labeling,
1,
MPI_DOUBLE,
MPI_MAX,
MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
if (0 == host_id) {
printf("num_hosts: %d "
"num_threads: %d "
"Global_labeling_time: %.2f seconds\n",
num_hosts,
NUM_THREADS,
global_time_labeling);
}
// End test
}
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::bit_parallel_labeling(
// const DistGraph &G,
// std::vector<uint8_t> &used_bp_roots)
//{
//// VertexID num_v = G.num_v;
// EdgeID num_e = G.num_e;
//
// std::vector<UnweightedDist> tmp_d(num_v); // distances from the root to every v
// std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0}
// std::vector<VertexID> que(num_v); // active queue
// std::vector<std::pair<VertexID, VertexID> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0)
// std::vector<std::pair<VertexID, VertexID> > child_es(num_e); // child and father, their distances to the root have difference of 1.
//
// VertexID r = 0; // root r
// for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) {
// while (r < num_v && used_bp_roots[r]) {
// ++r;
// }
// if (r == num_v) {
// for (VertexID v = 0; v < num_v; ++v) {
// L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST;
// }
// continue;
// }
// used_bp_roots[r] = true;
//
// fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST);
// fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0));
//
// VertexID que_t0 = 0, que_t1 = 0, que_h = 0;
// que[que_h++] = r;
// tmp_d[r] = 0;
// que_t1 = que_h;
//
// int ns = 0; // number of selected neighbor, default 64
// // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward
// // There was a bug cost countless time: the unsigned iterator i might decrease to zero and then flip to the INF.
//// VertexID i_bound = G.vertices[r] - 1;
//// VertexID i_start = i_bound + G.out_degrees[r];
//// for (VertexID i = i_start; i > i_bound; --i) {
// //int i_bound = G.vertices[r];
// //int i_start = i_bound + G.out_degrees[r] - 1;
// //for (int i = i_start; i >= i_bound; --i) {
// VertexID d_i_bound = G.local_out_degrees[r];
// EdgeID i_start = G.vertices_idx[r] + d_i_bound - 1;
// for (VertexID d_i = 0; d_i < d_i_bound; ++d_i) {
// EdgeID i = i_start - d_i;
// VertexID v = G.out_edges[i];
// if (!used_bp_roots[v]) {
// used_bp_roots[v] = true;
// // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set)
// que[que_h++] = v;
// tmp_d[v] = 1;
// tmp_s[v].first = 1ULL << ns;
// if (++ns == 64) break;
// }
// }
// //}
//// }
//
// for (UnweightedDist d = 0; que_t0 < que_h; ++d) {
// VertexID num_sibling_es = 0, num_child_es = 0;
//
// for (VertexID que_i = que_t0; que_i < que_t1; ++que_i) {
// VertexID v = que[que_i];
//// bit_parallel_push_labels(G,
//// v,
//// que,
//// que_h,
//// sibling_es,
//// num_sibling_es,
//// child_es,
//// num_child_es,
//// tmp_d,
//// d);
// EdgeID i_start = G.vertices_idx[v];
// EdgeID i_bound = i_start + G.local_out_degrees[v];
// for (EdgeID i = i_start; i < i_bound; ++i) {
// VertexID tv = G.out_edges[i];
// UnweightedDist td = d + 1;
//
// if (d > tmp_d[tv]) {
// ;
// }
// else if (d == tmp_d[tv]) {
// if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph.
// sibling_es[num_sibling_es].first = v;
// sibling_es[num_sibling_es].second = tv;
// ++num_sibling_es;
// }
// } else { // d < tmp_d[tv]
// if (tmp_d[tv] == MAX_UNWEIGHTED_DIST) {
// que[que_h++] = tv;
// tmp_d[tv] = td;
// }
// child_es[num_child_es].first = v;
// child_es[num_child_es].second = tv;
// ++num_child_es;
// }
// }
// }
//
// for (VertexID i = 0; i < num_sibling_es; ++i) {
// VertexID v = sibling_es[i].first, w = sibling_es[i].second;
// tmp_s[v].second |= tmp_s[w].first;
// tmp_s[w].second |= tmp_s[v].first;
// }
// for (VertexID i = 0; i < num_child_es; ++i) {
// VertexID v = child_es[i].first, c = child_es[i].second;
// tmp_s[c].first |= tmp_s[v].first;
// tmp_s[c].second |= tmp_s[v].second;
// }
//
// {// test
// printf("iter %u @%u host_id: %u num_sibling_es: %u num_child_es: %u\n", d, __LINE__, host_id, num_sibling_es, num_child_es);
//// if (4 == d) {
//// exit(EXIT_SUCCESS);
//// }
// }
//
// que_t0 = que_t1;
// que_t1 = que_h;
// }
//
// for (VertexID v = 0; v < num_v; ++v) {
// L[v].bp_dist[i_bpspt] = tmp_d[v];
// L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1}
// L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1}
// }
// }
//
//}
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
bit_parallel_push_labels(
const DistGraph &G,
const VertexID v_global,
// std::vector<VertexID> &tmp_que,
// VertexID &end_tmp_que,
// std::vector< std::pair<VertexID, VertexID> > &sibling_es,
// VertexID &num_sibling_es,
// std::vector< std::pair<VertexID, VertexID> > &child_es,
// VertexID &num_child_es,
std::vector<VertexID> &tmp_q,
VertexID &size_tmp_q,
std::vector< std::pair<VertexID, VertexID> > &tmp_sibling_es,
VertexID &size_tmp_sibling_es,
std::vector< std::pair<VertexID, VertexID> > &tmp_child_es,
VertexID &size_tmp_child_es,
const VertexID &offset_tmp_q,
std::vector<UnweightedDist> &dists,
const UnweightedDist iter)
{
EdgeID i_start = G.vertices_idx[v_global];
EdgeID i_bound = i_start + G.local_out_degrees[v_global];
// {//test
// printf("host_id: %u local_out_degrees[%u]: %u\n", host_id, v_global, G.local_out_degrees[v_global]);
// }
for (EdgeID i = i_start; i < i_bound; ++i) {
VertexID tv_global = G.out_edges[i];
VertexID tv_local = G.get_local_vertex_id(tv_global);
UnweightedDist td = iter + 1;
if (iter > dists[tv_local]) {
;
} else if (iter == dists[tv_local]) {
if (v_global < tv_global) { // ??? Why need v < tv !!! Because it's a undirected graph.
tmp_sibling_es[offset_tmp_q + size_tmp_sibling_es].first = v_global;
tmp_sibling_es[offset_tmp_q + size_tmp_sibling_es].second = tv_global;
++size_tmp_sibling_es;
// sibling_es[num_sibling_es].first = v_global;
// sibling_es[num_sibling_es].second = tv_global;
// ++num_sibling_es;
}
} else { // iter < dists[tv]
if (dists[tv_local] == MAX_UNWEIGHTED_DIST) {
if (CAS(dists.data() + tv_local, MAX_UNWEIGHTED_DIST, td)) {
tmp_q[offset_tmp_q + size_tmp_q++] = tv_global;
}
}
// if (dists[tv_local] == MAX_UNWEIGHTED_DIST) {
// tmp_que[end_tmp_que++] = tv_global;
// dists[tv_local] = td;
// }
tmp_child_es[offset_tmp_q + size_tmp_child_es].first = v_global;
tmp_child_es[offset_tmp_q + size_tmp_child_es].second = tv_global;
++size_tmp_child_es;
// child_es[num_child_es].first = v_global;
// child_es[num_child_es].second = tv_global;
// ++num_child_es;
}
}
}
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
bit_parallel_labeling(
const DistGraph &G,
// std::vector<IndexType> &L,
std::vector<uint8_t> &used_bp_roots)
{
// Class type of Bit-Parallel label message unit.
struct MsgUnitBP {
VertexID v_global;
uint64_t S_n1;
uint64_t S_0;
MsgUnitBP() = default;
// MsgUnitBP(MsgUnitBP&& other) = default;
// MsgUnitBP(MsgUnitBP& other) = default;
// MsgUnitBP& operator=(const MsgUnitBP& other) = default;
// MsgUnitBP& operator=(MsgUnitBP&& other) = default;
MsgUnitBP(VertexID v, uint64_t sn1, uint64_t s0)
: v_global(v), S_n1(sn1), S_0(s0) { }
};
// VertexID num_v = G.num_v;
// EdgeID num_e = G.num_e;
EdgeID local_num_edges = G.num_edges_local;
std::vector<UnweightedDist> tmp_d(num_masters); // distances from the root to every v
std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0}
std::vector<VertexID> que(num_masters); // active queue
VertexID end_que = 0;
std::vector<VertexID> tmp_que(num_masters); // temporary queue, to be swapped with que
VertexID end_tmp_que = 0;
std::vector<std::pair<VertexID, VertexID> > sibling_es(local_num_edges); // siblings, their distances to the root are equal (have difference of 0)
std::vector<std::pair<VertexID, VertexID> > child_es(local_num_edges); // child and father, their distances to the root have difference of 1.
VertexID r_global = 0; // root r
for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) {
// {// test
// if (0 == host_id) {
// printf("i_bpsp: %u\n", i_bpspt);
// }
// }
// Select the root r_global
if (0 == host_id) {
while (r_global < num_v && used_bp_roots[r_global]) {
++r_global;
}
if (r_global == num_v) {
for (VertexID v = 0; v < num_v; ++v) {
L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST;
}
continue;
}
}
// Broadcast the r here.
// message_time -= WallTimer::get_time_mark();
MPI_Bcast(&r_global,
1,
V_ID_Type,
0,
MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
used_bp_roots[r_global] = 1;
//#ifdef DEBUG_MESSAGES_ON
// {//test
// if (0 == host_id) {
// printf("r_global: %u i_bpspt: %u\n", r_global, i_bpspt);
// }
// }
//#endif
// VertexID que_t0 = 0, que_t1 = 0, que_h = 0;
fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST);
fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0));
// Mark the r_global
if (G.get_master_host_id(r_global) == host_id) {
tmp_d[G.get_local_vertex_id(r_global)] = 0;
que[end_que++] = r_global;
}
// Select the r_global's 64 neighbors
{
// Get r_global's neighbors into buffer_send, rank from high to low.
VertexID local_degree = G.local_out_degrees[r_global];
std::vector<VertexID> buffer_send(local_degree);
if (local_degree) {
EdgeID e_i_start = G.vertices_idx[r_global] + local_degree - 1;
for (VertexID d_i = 0; d_i < local_degree; ++d_i) {
EdgeID e_i = e_i_start - d_i;
buffer_send[d_i] = G.out_edges[e_i];
}
}
// Get selected neighbors (up to 64)
std::vector<VertexID> selected_nbrs;
if (0 != host_id) {
// Every host other than 0 sends neighbors to host 0
// message_time -= WallTimer::get_time_mark();
MPI_Instance::send_buffer_2_dst(buffer_send,
0,
SENDING_ROOT_NEIGHBORS,
SENDING_SIZE_ROOT_NEIGHBORS);
// Receive selected neighbors from host 0
MPI_Instance::recv_buffer_from_src(selected_nbrs,
0,
SENDING_SELECTED_NEIGHBORS,
SENDING_SIZE_SELETED_NEIGHBORS);
// message_time += WallTimer::get_time_mark();
} else {
// Host 0
// Host 0 receives neighbors from others
std::vector<VertexID> all_nbrs(buffer_send);
std::vector<VertexID > buffer_recv;
for (int loc = 0; loc < num_hosts - 1; ++loc) {
// message_time -= WallTimer::get_time_mark();
MPI_Instance::recv_buffer_from_any(buffer_recv,
SENDING_ROOT_NEIGHBORS,
SENDING_SIZE_ROOT_NEIGHBORS);
// message_time += WallTimer::get_time_mark();
if (buffer_recv.empty()) {
continue;
}
buffer_send.resize(buffer_send.size() + buffer_recv.size());
std::merge(buffer_recv.begin(), buffer_recv.end(), all_nbrs.begin(), all_nbrs.end(), buffer_send.begin());
all_nbrs.resize(buffer_send.size());
all_nbrs.assign(buffer_send.begin(), buffer_send.end());
}
assert(all_nbrs.size() == G.get_global_out_degree(r_global));
// Select 64 (or less) neighbors
VertexID ns = 0; // number of selected neighbor, default 64
for (VertexID v_global : all_nbrs) {
if (used_bp_roots[v_global]) {
continue;
}
used_bp_roots[v_global] = 1;
selected_nbrs.push_back(v_global);
if (++ns == 64) {
break;
}
}
// Send selected neighbors to other hosts
// message_time -= WallTimer::get_time_mark();
for (int dest = 1; dest < num_hosts; ++dest) {
MPI_Instance::send_buffer_2_dst(selected_nbrs,
dest,
SENDING_SELECTED_NEIGHBORS,
SENDING_SIZE_SELETED_NEIGHBORS);
}
// message_time += WallTimer::get_time_mark();
}
// {//test
// printf("host_id: %u selected_nbrs.size(): %lu\n", host_id, selected_nbrs.size());
// }
// Synchronize the used_bp_roots.
for (VertexID v_global : selected_nbrs) {
used_bp_roots[v_global] = 1;
}
// Mark selected neighbors
for (VertexID v_i = 0; v_i < selected_nbrs.size(); ++v_i) {
VertexID v_global = selected_nbrs[v_i];
if (host_id != G.get_master_host_id(v_global)) {
continue;
}
tmp_que[end_tmp_que++] = v_global;
tmp_d[G.get_local_vertex_id(v_global)] = 1;
tmp_s[v_global].first = 1ULL << v_i;
}
}
// Reduce the global number of active vertices
VertexID global_num_actives = 1;
UnweightedDist d = 0;
while (global_num_actives) {
// {// Limit the distance
// if (d > 7) {
// break;
// }
// }
//#ifdef DEBUG_MESSAGES_ON
// {//test
// if (0 == host_id) {
// printf("d: %u que_size: %u\n", d, global_num_actives);
// }
// }
//#endif
// for (UnweightedDist d = 0; que_t0 < que_h; ++d) {
VertexID num_sibling_es = 0, num_child_es = 0;
// Send active masters to mirrors
{
std::vector<MsgUnitBP> buffer_send(end_que);
for (VertexID que_i = 0; que_i < end_que; ++que_i) {
VertexID v_global = que[que_i];
buffer_send[que_i] = MsgUnitBP(v_global, tmp_s[v_global].first, tmp_s[v_global].second);
}
// {// test
// printf("host_id: %u buffer_send.size(): %lu\n", host_id, buffer_send.size());
// }
for (int root = 0; root < num_hosts; ++root) {
std::vector<MsgUnitBP> buffer_recv;
one_host_bcasts_buffer_to_buffer(root,
buffer_send,
buffer_recv);
if (buffer_recv.empty()) {
continue;
}
// For parallel adding to queue
VertexID size_buffer_recv = buffer_recv.size();
std::vector<VertexID> offsets_tmp_q(size_buffer_recv);
#pragma omp parallel for
for (VertexID i_q = 0; i_q < size_buffer_recv; ++i_q) {
offsets_tmp_q[i_q] = G.local_out_degrees[buffer_recv[i_q].v_global];
}
VertexID num_neighbors = PADO::prefix_sum_for_offsets(offsets_tmp_q);
std::vector<VertexID> tmp_q(num_neighbors);
std::vector<VertexID> sizes_tmp_q(size_buffer_recv, 0);
// For parallel adding to sibling_es
std::vector< std::pair<VertexID, VertexID> > tmp_sibling_es(num_neighbors);
std::vector<VertexID> sizes_tmp_sibling_es(size_buffer_recv, 0);
// For parallel adding to child_es
std::vector< std::pair<VertexID, VertexID> > tmp_child_es(num_neighbors);
std::vector<VertexID> sizes_tmp_child_es(size_buffer_recv, 0);
#pragma omp parallel for
// for (const MsgUnitBP &m : buffer_recv) {
for (VertexID i_m = 0; i_m < size_buffer_recv; ++i_m) {
const MsgUnitBP &m = buffer_recv[i_m];
VertexID v_global = m.v_global;
if (!G.local_out_degrees[v_global]) {
continue;
}
tmp_s[v_global].first = m.S_n1;
tmp_s[v_global].second = m.S_0;
// Push labels
bit_parallel_push_labels(
G,
v_global,
tmp_q,
sizes_tmp_q[i_m],
tmp_sibling_es,
sizes_tmp_sibling_es[i_m],
tmp_child_es,
sizes_tmp_child_es[i_m],
offsets_tmp_q[i_m],
// tmp_que,
// end_tmp_que,
// sibling_es,
// num_sibling_es,
// child_es,
// num_child_es,
tmp_d,
d);
}
{// From tmp_sibling_es to sibling_es
idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_sibling_es);
PADO::collect_into_queue(
tmp_sibling_es,
offsets_tmp_q,
sizes_tmp_sibling_es,
total_size_tmp,
sibling_es,
num_sibling_es);
}
{// From tmp_child_es to child_es
idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_child_es);
PADO::collect_into_queue(
tmp_child_es,
offsets_tmp_q,
sizes_tmp_child_es,
total_size_tmp,
child_es,
num_child_es);
}
{// From tmp_q to tmp_que
idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_q);
PADO::collect_into_queue(
tmp_q,
offsets_tmp_q,
sizes_tmp_q,
total_size_tmp,
tmp_que,
end_tmp_que);
}
// {// test
// printf("host_id: %u root: %u done push.\n", host_id, root);
// }
}
}
// Update the sets in tmp_s
{
#pragma omp parallel for
for (VertexID i = 0; i < num_sibling_es; ++i) {
VertexID v = sibling_es[i].first, w = sibling_es[i].second;
__atomic_or_fetch(&tmp_s[v].second, tmp_s[w].first, __ATOMIC_SEQ_CST);
__atomic_or_fetch(&tmp_s[w].second, tmp_s[v].first, __ATOMIC_SEQ_CST);
// tmp_s[v].second |= tmp_s[w].first; // !!! Need to send back!!!
// tmp_s[w].second |= tmp_s[v].first;
}
// Put into the buffer sending to others
std::vector< std::pair<VertexID, uint64_t> > buffer_send(2 * num_sibling_es);
#pragma omp parallel for
for (VertexID i = 0; i < num_sibling_es; ++i) {
VertexID v = sibling_es[i].first;
VertexID w = sibling_es[i].second;
buffer_send[2 * i] = std::make_pair(v, tmp_s[v].second);
buffer_send[2 * i + 1] = std::make_pair(w, tmp_s[w].second);
}
// Send the messages
for (int root = 0; root < num_hosts; ++root) {
std::vector< std::pair<VertexID, uint64_t> > buffer_recv;
one_host_bcasts_buffer_to_buffer(root,
buffer_send,
buffer_recv);
if (buffer_recv.empty()) {
continue;
}
size_t i_m_bound = buffer_recv.size();
#pragma omp parallel for
for (size_t i_m = 0; i_m < i_m_bound; ++i_m) {
const auto &m = buffer_recv[i_m];
__atomic_or_fetch(&tmp_s[m.first].second, m.second, __ATOMIC_SEQ_CST);
}
// for (const std::pair<VertexID, uint64_t> &m : buffer_recv) {
// tmp_s[m.first].second |= m.second;
// }
}
#pragma omp parallel for
for (VertexID i = 0; i < num_child_es; ++i) {
VertexID v = child_es[i].first, c = child_es[i].second;
__atomic_or_fetch(&tmp_s[c].first, tmp_s[v].first, __ATOMIC_SEQ_CST);
__atomic_or_fetch(&tmp_s[c].second, tmp_s[v].second, __ATOMIC_SEQ_CST);
// tmp_s[c].first |= tmp_s[v].first;
// tmp_s[c].second |= tmp_s[v].second;
}
}
//#ifdef DEBUG_MESSAGES_ON
// {// test
// VertexID global_num_sibling_es;
// VertexID global_num_child_es;
// MPI_Allreduce(&num_sibling_es,
// &global_num_sibling_es,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
// MPI_Allreduce(&num_child_es,
// &global_num_child_es,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
// if (0 == host_id) {
// printf("iter: %u num_sibling_es: %u num_child_es: %u\n", d, global_num_sibling_es, global_num_child_es);
// }
//
//// printf("iter %u @%u host_id: %u num_sibling_es: %u num_child_es: %u\n", d, __LINE__, host_id, num_sibling_es, num_child_es);
//// if (0 == d) {
//// exit(EXIT_SUCCESS);
//// }
// }
//#endif
// Swap que and tmp_que
tmp_que.swap(que);
end_que = end_tmp_que;
end_tmp_que = 0;
// message_time -= WallTimer::get_time_mark();
MPI_Allreduce(&end_que,
&global_num_actives,
1,
V_ID_Type,
MPI_MAX,
MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
// }
++d;
}
#pragma omp parallel for
for (VertexID v_local = 0; v_local < num_masters; ++v_local) {
VertexID v_global = G.get_global_vertex_id(v_local);
L[v_local].bp_dist[i_bpspt] = tmp_d[v_local];
L[v_local].bp_sets[i_bpspt][0] = tmp_s[v_global].first; // S_r^{-1}
L[v_local].bp_sets[i_bpspt][1] = tmp_s[v_global].second & ~tmp_s[v_global].first; // Only need those r's neighbors who are not already in S_r^{-1}
}
}
}
//template <VertexID BATCH_SIZE>
//inline void DistBVCPLL<BATCH_SIZE>::
//bit_parallel_push_labels(
// const DistGraph &G,
// const VertexID v_global,
// std::vector<VertexID> &tmp_que,
// VertexID &end_tmp_que,
// std::vector< std::pair<VertexID, VertexID> > &sibling_es,
// VertexID &num_sibling_es,
// std::vector< std::pair<VertexID, VertexID> > &child_es,
// VertexID &num_child_es,
// std::vector<UnweightedDist> &dists,
// const UnweightedDist iter)
//{
// EdgeID i_start = G.vertices_idx[v_global];
// EdgeID i_bound = i_start + G.local_out_degrees[v_global];
//// {//test
//// printf("host_id: %u local_out_degrees[%u]: %u\n", host_id, v_global, G.local_out_degrees[v_global]);
//// }
// for (EdgeID i = i_start; i < i_bound; ++i) {
// VertexID tv_global = G.out_edges[i];
// VertexID tv_local = G.get_local_vertex_id(tv_global);
// UnweightedDist td = iter + 1;
//
// if (iter > dists[tv_local]) {
// ;
// } else if (iter == dists[tv_local]) {
// if (v_global < tv_global) { // ??? Why need v < tv !!! Because it's a undirected graph.
// sibling_es[num_sibling_es].first = v_global;
// sibling_es[num_sibling_es].second = tv_global;
// ++num_sibling_es;
// }
// } else { // iter < dists[tv]
// if (dists[tv_local] == MAX_UNWEIGHTED_DIST) {
// tmp_que[end_tmp_que++] = tv_global;
// dists[tv_local] = td;
// }
// child_es[num_child_es].first = v_global;
// child_es[num_child_es].second = tv_global;
// ++num_child_es;
//// {
//// printf("host_id: %u num_child_es: %u v_global: %u tv_global: %u\n", host_id, num_child_es, v_global, tv_global);//test
//// }
// }
// }
//
//}
//
//template <VertexID BATCH_SIZE>
//inline void DistBVCPLL<BATCH_SIZE>::
//bit_parallel_labeling(
// const DistGraph &G,
//// std::vector<IndexType> &L,
// std::vector<uint8_t> &used_bp_roots)
//{
// // Class type of Bit-Parallel label message unit.
// struct MsgUnitBP {
// VertexID v_global;
// uint64_t S_n1;
// uint64_t S_0;
//
// MsgUnitBP() = default;
//// MsgUnitBP(MsgUnitBP&& other) = default;
//// MsgUnitBP(MsgUnitBP& other) = default;
//// MsgUnitBP& operator=(const MsgUnitBP& other) = default;
//// MsgUnitBP& operator=(MsgUnitBP&& other) = default;
// MsgUnitBP(VertexID v, uint64_t sn1, uint64_t s0)
// : v_global(v), S_n1(sn1), S_0(s0) { }
// };
//// VertexID num_v = G.num_v;
//// EdgeID num_e = G.num_e;
// EdgeID local_num_edges = G.num_edges_local;
//
// std::vector<UnweightedDist> tmp_d(num_masters); // distances from the root to every v
// std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0}
// std::vector<VertexID> que(num_masters); // active queue
// VertexID end_que = 0;
// std::vector<VertexID> tmp_que(num_masters); // temporary queue, to be swapped with que
// VertexID end_tmp_que = 0;
// std::vector<std::pair<VertexID, VertexID> > sibling_es(local_num_edges); // siblings, their distances to the root are equal (have difference of 0)
// std::vector<std::pair<VertexID, VertexID> > child_es(local_num_edges); // child and father, their distances to the root have difference of 1.
//
//// std::vector<UnweightedDist> tmp_d(num_v); // distances from the root to every v
//// std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0}
//// std::vector<VertexID> que(num_v); // active queue
//// std::vector<std::pair<VertexID, VertexID> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0)
//// std::vector<std::pair<VertexID, VertexID> > child_es(num_e); // child and father, their distances to the root have difference of 1.
//
// VertexID r_global = 0; // root r
// for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) {
// // Select the root r_global
// if (0 == host_id) {
// while (r_global < num_v && used_bp_roots[r_global]) {
// ++r_global;
// }
// if (r_global == num_v) {
// for (VertexID v = 0; v < num_v; ++v) {
// L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST;
// }
// continue;
// }
// }
// // Broadcast the r here.
// message_time -= WallTimer::get_time_mark();
// MPI_Bcast(&r_global,
// 1,
// V_ID_Type,
// 0,
// MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
// used_bp_roots[r_global] = 1;
//#ifdef DEBUG_MESSAGES_ON
// {//test
// if (0 == host_id) {
// printf("r_global: %u i_bpspt: %u\n", r_global, i_bpspt);
// }
// }
//#endif
//
//// VertexID que_t0 = 0, que_t1 = 0, que_h = 0;
// fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST);
// fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0));
//
// // Mark the r_global
// if (G.get_master_host_id(r_global) == host_id) {
// tmp_d[G.get_local_vertex_id(r_global)] = 0;
// que[end_que++] = r_global;
// }
// // Select the r_global's 64 neighbors
// {
// // Get r_global's neighbors into buffer_send, rank from low to high.
// VertexID local_degree = G.local_out_degrees[r_global];
// std::vector<VertexID> buffer_send(local_degree);
// if (local_degree) {
// EdgeID e_i_start = G.vertices_idx[r_global] + local_degree - 1;
// for (VertexID d_i = 0; d_i < local_degree; ++d_i) {
// EdgeID e_i = e_i_start - d_i;
// buffer_send[d_i] = G.out_edges[e_i];
// }
// }
//
// // Get selected neighbors (up to 64)
// std::vector<VertexID> selected_nbrs;
// if (0 != host_id) {
// // Every host other than 0 sends neighbors to host 0
// message_time -= WallTimer::get_time_mark();
// MPI_Instance::send_buffer_2_dst(buffer_send,
// 0,
// SENDING_ROOT_NEIGHBORS,
// SENDING_SIZE_ROOT_NEIGHBORS);
// // Receive selected neighbors from host 0
// MPI_Instance::recv_buffer_from_src(selected_nbrs,
// 0,
// SENDING_SELECTED_NEIGHBORS,
// SENDING_SIZE_SELETED_NEIGHBORS);
// message_time += WallTimer::get_time_mark();
// } else {
// // Host 0
// // Host 0 receives neighbors from others
// std::vector<VertexID> all_nbrs(buffer_send);
// std::vector<VertexID > buffer_recv;
// for (int loc = 0; loc < num_hosts - 1; ++loc) {
// message_time -= WallTimer::get_time_mark();
// MPI_Instance::recv_buffer_from_any(buffer_recv,
// SENDING_ROOT_NEIGHBORS,
// SENDING_SIZE_ROOT_NEIGHBORS);
//// MPI_Instance::receive_dynamic_buffer_from_any(buffer_recv,
//// num_hosts,
//// SENDING_ROOT_NEIGHBORS);
// message_time += WallTimer::get_time_mark();
// if (buffer_recv.empty()) {
// continue;
// }
//
// buffer_send.resize(buffer_send.size() + buffer_recv.size());
// std::merge(buffer_recv.begin(), buffer_recv.end(), all_nbrs.begin(), all_nbrs.end(), buffer_send.begin());
// all_nbrs.resize(buffer_send.size());
// all_nbrs.assign(buffer_send.begin(), buffer_send.end());
// }
// assert(all_nbrs.size() == G.get_global_out_degree(r_global));
// // Select 64 (or less) neighbors
// VertexID ns = 0; // number of selected neighbor, default 64
// for (VertexID v_global : all_nbrs) {
// if (used_bp_roots[v_global]) {
// continue;
// }
// used_bp_roots[v_global] = 1;
// selected_nbrs.push_back(v_global);
// if (++ns == 64) {
// break;
// }
// }
// // Send selected neighbors to other hosts
// message_time -= WallTimer::get_time_mark();
// for (int dest = 1; dest < num_hosts; ++dest) {
// MPI_Instance::send_buffer_2_dst(selected_nbrs,
// dest,
// SENDING_SELECTED_NEIGHBORS,
// SENDING_SIZE_SELETED_NEIGHBORS);
// }
// message_time += WallTimer::get_time_mark();
// }
//// {//test
//// printf("host_id: %u selected_nbrs.size(): %lu\n", host_id, selected_nbrs.size());
//// }
//
// // Synchronize the used_bp_roots.
// for (VertexID v_global : selected_nbrs) {
// used_bp_roots[v_global] = 1;
// }
//
// // Mark selected neighbors
// for (VertexID v_i = 0; v_i < selected_nbrs.size(); ++v_i) {
// VertexID v_global = selected_nbrs[v_i];
// if (host_id != G.get_master_host_id(v_global)) {
// continue;
// }
// tmp_que[end_tmp_que++] = v_global;
// tmp_d[G.get_local_vertex_id(v_global)] = 1;
// tmp_s[v_global].first = 1ULL << v_i;
// }
// }
//
// // Reduce the global number of active vertices
// VertexID global_num_actives = 1;
// UnweightedDist d = 0;
// while (global_num_actives) {
//// for (UnweightedDist d = 0; que_t0 < que_h; ++d) {
// VertexID num_sibling_es = 0, num_child_es = 0;
//
//
// // Send active masters to mirrors
// {
// std::vector<MsgUnitBP> buffer_send(end_que);
// for (VertexID que_i = 0; que_i < end_que; ++que_i) {
// VertexID v_global = que[que_i];
// buffer_send[que_i] = MsgUnitBP(v_global, tmp_s[v_global].first, tmp_s[v_global].second);
// }
//// {// test
//// printf("host_id: %u buffer_send.size(): %lu\n", host_id, buffer_send.size());
//// }
//
// for (int root = 0; root < num_hosts; ++root) {
// std::vector<MsgUnitBP> buffer_recv;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send,
// buffer_recv);
// if (buffer_recv.empty()) {
// continue;
// }
// for (const MsgUnitBP &m : buffer_recv) {
// VertexID v_global = m.v_global;
// if (!G.local_out_degrees[v_global]) {
// continue;
// }
// tmp_s[v_global].first = m.S_n1;
// tmp_s[v_global].second = m.S_0;
// // Push labels
// bit_parallel_push_labels(G,
// v_global,
// tmp_que,
// end_tmp_que,
// sibling_es,
// num_sibling_es,
// child_es,
// num_child_es,
// tmp_d,
// d);
// }
//// {// test
//// printf("host_id: %u root: %u done push.\n", host_id, root);
//// }
// }
// }
//
// // Update the sets in tmp_s
// {
//
// for (VertexID i = 0; i < num_sibling_es; ++i) {
// VertexID v = sibling_es[i].first, w = sibling_es[i].second;
// tmp_s[v].second |= tmp_s[w].first; // !!! Need to send back!!!
// tmp_s[w].second |= tmp_s[v].first;
//
// }
// // Put into the buffer sending to others
// std::vector< std::pair<VertexID, uint64_t> > buffer_send(2 * num_sibling_es);
//// std::vector< std::vector<MPI_Request> > requests_list(num_hosts - 1);
// for (VertexID i = 0; i < num_sibling_es; ++i) {
// VertexID v = sibling_es[i].first;
// VertexID w = sibling_es[i].second;
//// buffer_send.emplace_back(v, tmp_s[v].second);
//// buffer_send.emplace_back(w, tmp_s[w].second);
// buffer_send[2 * i] = std::make_pair(v, tmp_s[v].second);
// buffer_send[2 * i + 1] = std::make_pair(w, tmp_s[w].second);
// }
// // Send the messages
// for (int root = 0; root < num_hosts; ++root) {
// std::vector< std::pair<VertexID, uint64_t> > buffer_recv;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send,
// buffer_recv);
// if (buffer_recv.empty()) {
// continue;
// }
// for (const std::pair<VertexID, uint64_t> &m : buffer_recv) {
// tmp_s[m.first].second |= m.second;
// }
// }
// for (VertexID i = 0; i < num_child_es; ++i) {
// VertexID v = child_es[i].first, c = child_es[i].second;
// tmp_s[c].first |= tmp_s[v].first;
// tmp_s[c].second |= tmp_s[v].second;
// }
// }
////#ifdef DEBUG_MESSAGES_ON
// {// test
// VertexID global_num_sibling_es;
// VertexID global_num_child_es;
// MPI_Allreduce(&num_sibling_es,
// &global_num_sibling_es,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
// MPI_Allreduce(&num_child_es,
// &global_num_child_es,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
// if (0 == host_id) {
// printf("iter: %u num_sibling_es: %u num_child_es: %u\n", d, global_num_sibling_es, global_num_child_es);
// }
// }
////#endif
//
// // Swap que and tmp_que
// tmp_que.swap(que);
// end_que = end_tmp_que;
// end_tmp_que = 0;
// MPI_Allreduce(&end_que,
// &global_num_actives,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
//
//// }
// ++d;
// }
//
// for (VertexID v_local = 0; v_local < num_masters; ++v_local) {
// VertexID v_global = G.get_global_vertex_id(v_local);
// L[v_local].bp_dist[i_bpspt] = tmp_d[v_local];
// L[v_local].bp_sets[i_bpspt][0] = tmp_s[v_global].first; // S_r^{-1}
// L[v_local].bp_sets[i_bpspt][1] = tmp_s[v_global].second & ~tmp_s[v_global].first; // Only need those r's neighbors who are not already in S_r^{-1}
// }
// }
//}
//// Function bit parallel checking:
//// return false if shortest distance exits in bp labels, return true if bp labels cannot cover the distance
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//inline bool DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::bit_parallel_checking(
// VertexID v_id,
// VertexID w_id,
// const std::vector<IndexType> &L,
// UnweightedDist iter)
//{
// // Bit Parallel Checking: if label_real_id to v_tail has shorter distance already
// const IndexType &Lv = L[v_id];
// const IndexType &Lw = L[w_id];
//
// _mm_prefetch(&Lv.bp_dist[0], _MM_HINT_T0);
// _mm_prefetch(&Lv.bp_sets[0][0], _MM_HINT_T0);
// _mm_prefetch(&Lw.bp_dist[0], _MM_HINT_T0);
// _mm_prefetch(&Lw.bp_sets[0][0], _MM_HINT_T0);
// for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) {
// VertexID td = Lv.bp_dist[i] + Lw.bp_dist[i]; // Use type VertexID in case of addition of two INF.
// if (td - 2 <= iter) {
// td +=
// (Lv.bp_sets[i][0] & Lw.bp_sets[i][0]) ? -2 :
// ((Lv.bp_sets[i][0] & Lw.bp_sets[i][1]) |
// (Lv.bp_sets[i][1] & Lw.bp_sets[i][0]))
// ? -1 : 0;
// if (td <= iter) {
//// ++bp_hit_count;
// return false;
// }
// }
// }
// return true;
//}
// Function for initializing at the begin of a batch
// For a batch, initialize the temporary labels and real labels of roots;
// traverse roots' labels to initialize distance buffer;
// unset flag arrays is_active and got_labels
template <VertexID BATCH_SIZE>
inline VertexID DistBVCPLL<BATCH_SIZE>::
initialization(
const DistGraph &G,
std::vector<ShortIndex> &short_index,
std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::vector<VertexID> > &recved_dist_table,
std::vector<BPLabelType> &bp_labels_table,
std::vector<VertexID> &active_queue,
VertexID &end_active_queue,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
// VertexID b_id,
VertexID roots_start,
VertexID roots_size,
// std::vector<VertexID> &roots_master_local,
const std::vector<uint8_t> &used_bp_roots)
{
// Get the roots_master_local, containing all local roots.
std::vector<VertexID> roots_master_local;
VertexID size_roots_master_local;
VertexID roots_bound = roots_start + roots_size;
try {
for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) {
if (G.get_master_host_id(r_global) == host_id && !used_bp_roots[r_global]) {
roots_master_local.push_back(G.get_local_vertex_id(r_global));
// {//test
// if (1024 == roots_start && 7 == host_id && 31600 == *roots_master_local.rbegin()) {
// printf("S0.0 host_id: %d "
// "31600 YES!\n",
// host_id);
// }
// }
}
}
size_roots_master_local = roots_master_local.size();
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("initialization_roots_master_local: bad_alloc "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
// Short_index
{
if (end_once_candidated_queue >= THRESHOLD_PARALLEL) {
#pragma omp parallel for
for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) {
VertexID v_local = once_candidated_queue[v_i];
short_index[v_local].indicator_reset();
once_candidated[v_local] = 0;
}
} else {
for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) {
VertexID v_local = once_candidated_queue[v_i];
short_index[v_local].indicator_reset();
once_candidated[v_local] = 0;
}
}
end_once_candidated_queue = 0;
if (size_roots_master_local >= THRESHOLD_PARALLEL) {
#pragma omp parallel for
for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) {
VertexID r_local = roots_master_local[i_r];
short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself
// short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels
}
} else {
for (VertexID r_local : roots_master_local) {
short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself
// short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels
}
}
}
//
// Real Index
try
{
if (size_roots_master_local >= THRESHOLD_PARALLEL) {
#pragma omp parallel for
for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) {
VertexID r_local = roots_master_local[i_r];
IndexType &Lr = L[r_local];
Lr.batches.emplace_back(
// b_id, // Batch ID
Lr.distances.size(), // start_index
1); // size
Lr.distances.emplace_back(
Lr.vertices.size(), // start_index
1, // size
0); // dist
Lr.vertices.push_back(G.get_global_vertex_id(r_local));
// Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start);
}
} else {
for (VertexID r_local : roots_master_local) {
IndexType &Lr = L[r_local];
Lr.batches.emplace_back(
// b_id, // Batch ID
Lr.distances.size(), // start_index
1); // size
Lr.distances.emplace_back(
Lr.vertices.size(), // start_index
1, // size
0); // dist
Lr.vertices.push_back(G.get_global_vertex_id(r_local));
// Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start);
}
}
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("initialization_real_index: bad_alloc "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
// Dist Table
try
{
// struct LabelTableUnit {
// VertexID root_id;
// VertexID label_global_id;
// UnweightedDist dist;
//
// LabelTableUnit() = default;
//
// LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) :
// root_id(r), label_global_id(l), dist(d) {}
// };
std::vector<LabelTableUnit> buffer_send; // buffer for sending
// Dist_matrix
{
// Deprecated Old method: unpack the IndexType structure before sending.
// Okay, it's back.
if (size_roots_master_local >= THRESHOLD_PARALLEL) {
// Offsets for adding labels to buffer_send in parallel
std::vector<VertexID> offsets_beffer_send(size_roots_master_local);
#pragma omp parallel for
for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) {
VertexID r_local = roots_master_local[i_r];
offsets_beffer_send[i_r] = L[r_local].vertices.size();
}
EdgeID size_labels = PADO::prefix_sum_for_offsets(offsets_beffer_send);
buffer_send.resize(size_labels);
#pragma omp parallel for
for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) {
VertexID r_local = roots_master_local[i_r];
VertexID top_location = 0;
IndexType &Lr = L[r_local];
VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start;
VertexID b_i_bound = Lr.batches.size();
_mm_prefetch(&Lr.batches[0], _MM_HINT_T0);
_mm_prefetch(&Lr.distances[0], _MM_HINT_T0);
_mm_prefetch(&Lr.vertices[0], _MM_HINT_T0);
// Traverse batches array
for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) {
// VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE;
VertexID dist_start_index = Lr.batches[b_i].start_index;
VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size;
// Traverse distances array
for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) {
// VertexID dist_bound_index = Lr.distances.size();
// for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) {
VertexID v_start_index = Lr.distances[dist_i].start_index;
VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size;
UnweightedDist dist = Lr.distances[dist_i].dist;
// Traverse vertices array
for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) {
// Write into the dist_table
// buffer_send[offsets_beffer_send[i_r] + top_location++] =
// LabelTableUnit(r_root_id, Lr.vertices[v_i] + id_offset, dist);
buffer_send[offsets_beffer_send[i_r] + top_location++] =
LabelTableUnit(r_root_id, Lr.vertices[v_i], dist);
}
}
}
}
} else {
for (VertexID r_local : roots_master_local) {
// The distance table.
IndexType &Lr = L[r_local];
VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start;
VertexID b_i_bound = Lr.batches.size();
_mm_prefetch(&Lr.batches[0], _MM_HINT_T0);
_mm_prefetch(&Lr.distances[0], _MM_HINT_T0);
_mm_prefetch(&Lr.vertices[0], _MM_HINT_T0);
// Traverse batches array
for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) {
// VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE;
VertexID dist_start_index = Lr.batches[b_i].start_index;
VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size;
// Traverse distances array
for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) {
// VertexID dist_bound_index = Lr.distances.size();
// for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) {
VertexID v_start_index = Lr.distances[dist_i].start_index;
VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size;
UnweightedDist dist = Lr.distances[dist_i].dist;
// Traverse vertices array
for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) {
// Write into the dist_table
buffer_send.emplace_back(r_root_id, Lr.vertices[v_i],
dist); // buffer for sending
// buffer_send.emplace_back(r_root_id, Lr.vertices[v_i] + id_offset,
// dist); // buffer for sending
}
}
}
}
}
}
// Broadcast local roots labels
for (int root = 0; root < num_hosts; ++root) {
std::vector<LabelTableUnit> buffer_recv;
one_host_bcasts_buffer_to_buffer(root,
buffer_send,
buffer_recv);
if (buffer_recv.empty()) {
continue;
}
EdgeID size_buffer_recv = buffer_recv.size();
if (size_buffer_recv >= THRESHOLD_PARALLEL) {
std::vector<VertexID> sizes_recved_root_labels(roots_size, 0);
#pragma omp parallel for
for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) {
const LabelTableUnit &l = buffer_recv[i_l];
VertexID root_id = l.root_id;
VertexID label_global_id = l.label_global_id;
UnweightedDist dist = l.dist;
dist_table[root_id][label_global_id] = dist;
// Record root_id's number of its received label, for later adding to recved_dist_table
__atomic_add_fetch(sizes_recved_root_labels.data() + root_id, 1, __ATOMIC_SEQ_CST);
// recved_dist_table[root_id].push_back(label_global_id);
}
// Record the received label in recved_dist_table, for later reset
#pragma omp parallel for
for (VertexID root_id = 0; root_id < roots_size; ++root_id) {
VertexID &size = sizes_recved_root_labels[root_id];
if (size) {
recved_dist_table[root_id].resize(size);
size = 0;
}
}
#pragma omp parallel for
for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) {
const LabelTableUnit &l = buffer_recv[i_l];
VertexID root_id = l.root_id;
VertexID label_global_id = l.label_global_id;
PADO::TS_enqueue(recved_dist_table[root_id], sizes_recved_root_labels[root_id], label_global_id);
}
} else {
for (const LabelTableUnit &l : buffer_recv) {
VertexID root_id = l.root_id;
VertexID label_global_id = l.label_global_id;
UnweightedDist dist = l.dist;
dist_table[root_id][label_global_id] = dist;
// Record the received label in recved_dist_table, for later reset
recved_dist_table[root_id].push_back(label_global_id);
}
}
}
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("initialization_dist_table: bad_alloc "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
// Build the Bit-Parallel Labels Table
try
{
// struct MsgBPLabel {
// VertexID r_root_id;
// UnweightedDist bp_dist[BITPARALLEL_SIZE];
// uint64_t bp_sets[BITPARALLEL_SIZE][2];
//
// MsgBPLabel() = default;
// MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2])
// : r_root_id(r)
// {
// memcpy(bp_dist, dist, sizeof(bp_dist));
// memcpy(bp_sets, sets, sizeof(bp_sets));
// }
// };
// std::vector<MPI_Request> requests_send(num_hosts - 1);
std::vector<MsgBPLabel> buffer_send;
std::vector<VertexID> roots_queue;
for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) {
if (G.get_master_host_id(r_global) != host_id) {
continue;
}
roots_queue.push_back(r_global);
}
VertexID size_roots_queue = roots_queue.size();
if (size_roots_queue >= THRESHOLD_PARALLEL) {
buffer_send.resize(size_roots_queue);
#pragma omp parallel for
for (VertexID i_r = 0; i_r < size_roots_queue; ++i_r) {
VertexID r_global = roots_queue[i_r];
VertexID r_local = G.get_local_vertex_id(r_global);
VertexID r_root = r_global - roots_start;
// Prepare for sending
// buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets);
buffer_send[i_r] = MsgBPLabel(r_root, L[r_local].bp_dist, L[r_local].bp_sets);
}
} else {
// for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) {
// if (G.get_master_host_id(r_global) != host_id) {
// continue;
// }
for (VertexID r_global : roots_queue) {
VertexID r_local = G.get_local_vertex_id(r_global);
VertexID r_root = r_global - roots_start;
// Local roots
// memcpy(bp_labels_table[r_root].bp_dist, L[r_local].bp_dist, sizeof(bp_labels_table[r_root].bp_dist));
// memcpy(bp_labels_table[r_root].bp_sets, L[r_local].bp_sets, sizeof(bp_labels_table[r_root].bp_sets));
// Prepare for sending
buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets);
}
}
for (int root = 0; root < num_hosts; ++root) {
std::vector<MsgBPLabel> buffer_recv;
one_host_bcasts_buffer_to_buffer(root,
buffer_send,
buffer_recv);
if (buffer_recv.empty()) {
continue;
}
VertexID size_buffer_recv = buffer_recv.size();
if (size_buffer_recv >= THRESHOLD_PARALLEL) {
#pragma omp parallel for
for (VertexID i_m = 0; i_m < size_buffer_recv; ++i_m) {
const MsgBPLabel &m = buffer_recv[i_m];
VertexID r_root = m.r_root_id;
memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist));
memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets));
}
} else {
for (const MsgBPLabel &m : buffer_recv) {
VertexID r_root = m.r_root_id;
memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist));
memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets));
}
}
}
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("initialization_bp_labels_table: bad_alloc "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
// Active_queue
VertexID global_num_actives = 0; // global number of active vertices.
{
if (size_roots_master_local >= THRESHOLD_PARALLEL) {
#pragma omp parallel for
for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) {
VertexID r_local = roots_master_local[i_r];
active_queue[i_r] = r_local;
}
end_active_queue = size_roots_master_local;
} else {
for (VertexID r_local : roots_master_local) {
active_queue[end_active_queue++] = r_local;
}
{
}
}
// Get the global number of active vertices;
// message_time -= WallTimer::get_time_mark();
MPI_Allreduce(&end_active_queue,
&global_num_actives,
1,
V_ID_Type,
// MPI_SUM,
MPI_MAX,
MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
}
return global_num_actives;
}
// Sequential Version
//// Function for initializing at the begin of a batch
//// For a batch, initialize the temporary labels and real labels of roots;
//// traverse roots' labels to initialize distance buffer;
//// unset flag arrays is_active and got_labels
//template <VertexID BATCH_SIZE>
//inline VertexID DistBVCPLL<BATCH_SIZE>::
//initialization(
// const DistGraph &G,
// std::vector<ShortIndex> &short_index,
// std::vector< std::vector<UnweightedDist> > &dist_table,
// std::vector< std::vector<VertexID> > &recved_dist_table,
// std::vector<BPLabelType> &bp_labels_table,
// std::vector<VertexID> &active_queue,
// VertexID &end_active_queue,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
// std::vector<uint8_t> &once_candidated,
// VertexID b_id,
// VertexID roots_start,
// VertexID roots_size,
//// std::vector<VertexID> &roots_master_local,
// const std::vector<uint8_t> &used_bp_roots)
//{
// // Get the roots_master_local, containing all local roots.
// std::vector<VertexID> roots_master_local;
// VertexID roots_bound = roots_start + roots_size;
// for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) {
// if (G.get_master_host_id(r_global) == host_id && !used_bp_roots[r_global]) {
// roots_master_local.push_back(G.get_local_vertex_id(r_global));
// }
// }
// // Short_index
// {
// for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) {
// VertexID v_local = once_candidated_queue[v_i];
// short_index[v_local].indicator_reset();
// once_candidated[v_local] = 0;
// }
// end_once_candidated_queue = 0;
// for (VertexID r_local : roots_master_local) {
// short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself
// short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels
//// short_index[r_local].indicator.set(G.get_global_vertex_id(r_local) - roots_start); // v itself
//// short_index[r_local].indicator.set(BATCH_SIZE); // v got labels
// }
// }
////
// // Real Index
// {
// for (VertexID r_local : roots_master_local) {
// IndexType &Lr = L[r_local];
// Lr.batches.emplace_back(
// b_id, // Batch ID
// Lr.distances.size(), // start_index
// 1); // size
// Lr.distances.emplace_back(
// Lr.vertices.size(), // start_index
// 1, // size
// 0); // dist
// Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start);
// }
// }
//
// // Dist Table
// {
//// struct LabelTableUnit {
//// VertexID root_id;
//// VertexID label_global_id;
//// UnweightedDist dist;
////
//// LabelTableUnit() = default;
////
//// LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) :
//// root_id(r), label_global_id(l), dist(d) {}
//// };
// std::vector<LabelTableUnit> buffer_send; // buffer for sending
// // Dist_matrix
// {
// // Deprecated Old method: unpack the IndexType structure before sending.
// for (VertexID r_local : roots_master_local) {
// // The distance table.
// IndexType &Lr = L[r_local];
// VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start;
// VertexID b_i_bound = Lr.batches.size();
// _mm_prefetch(&Lr.batches[0], _MM_HINT_T0);
// _mm_prefetch(&Lr.distances[0], _MM_HINT_T0);
// _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0);
// // Traverse batches array
// for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) {
// VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE;
// VertexID dist_start_index = Lr.batches[b_i].start_index;
// VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size;
// // Traverse distances array
// for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) {
// VertexID v_start_index = Lr.distances[dist_i].start_index;
// VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size;
// UnweightedDist dist = Lr.distances[dist_i].dist;
// // Traverse vertices array
// for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) {
// // Write into the dist_table
//// dist_table[r_root_id][Lr.vertices[v_i] + id_offset] = dist; // distance table
// buffer_send.emplace_back(r_root_id, Lr.vertices[v_i] + id_offset,
// dist); // buffer for sending
// }
// }
// }
// }
// }
// // Broadcast local roots labels
// for (int root = 0; root < num_hosts; ++root) {
// std::vector<LabelTableUnit> buffer_recv;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send,
// buffer_recv);
// if (buffer_recv.empty()) {
// continue;
// }
// for (const LabelTableUnit &l : buffer_recv) {
// VertexID root_id = l.root_id;
// VertexID label_global_id = l.label_global_id;
// UnweightedDist dist = l.dist;
// dist_table[root_id][label_global_id] = dist;
// // Record the received label in recved_dist_table, for later reset
// recved_dist_table[root_id].push_back(label_global_id);
// }
// }
// }
//
// // Build the Bit-Parallel Labels Table
// {
//// struct MsgBPLabel {
//// VertexID r_root_id;
//// UnweightedDist bp_dist[BITPARALLEL_SIZE];
//// uint64_t bp_sets[BITPARALLEL_SIZE][2];
////
//// MsgBPLabel() = default;
//// MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2])
//// : r_root_id(r)
//// {
//// memcpy(bp_dist, dist, sizeof(bp_dist));
//// memcpy(bp_sets, sets, sizeof(bp_sets));
//// }
//// };
//// std::vector<MPI_Request> requests_send(num_hosts - 1);
// std::vector<MsgBPLabel> buffer_send;
// for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) {
// if (G.get_master_host_id(r_global) != host_id) {
// continue;
// }
// VertexID r_local = G.get_local_vertex_id(r_global);
// VertexID r_root = r_global - roots_start;
// // Local roots
//// memcpy(bp_labels_table[r_root].bp_dist, L[r_local].bp_dist, sizeof(bp_labels_table[r_root].bp_dist));
//// memcpy(bp_labels_table[r_root].bp_sets, L[r_local].bp_sets, sizeof(bp_labels_table[r_root].bp_sets));
// // Prepare for sending
// buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets);
// }
//
// for (int root = 0; root < num_hosts; ++root) {
// std::vector<MsgBPLabel> buffer_recv;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send,
// buffer_recv);
// if (buffer_recv.empty()) {
// continue;
// }
// for (const MsgBPLabel &m : buffer_recv) {
// VertexID r_root = m.r_root_id;
// memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist));
// memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets));
// }
// }
// }
//
// // TODO: parallel enqueue
// // Active_queue
// VertexID global_num_actives = 0; // global number of active vertices.
// {
// for (VertexID r_local : roots_master_local) {
// active_queue[end_active_queue++] = r_local;
// }
// // Get the global number of active vertices;
// message_time -= WallTimer::get_time_mark();
// MPI_Allreduce(&end_active_queue,
// &global_num_actives,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
// }
//
// return global_num_actives;
//}
//// Function: push v_head_global's newly added labels to its all neighbors.
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//push_single_label(
// VertexID v_head_global,
// VertexID label_root_id,
// VertexID roots_start,
// const DistGraph &G,
// std::vector<ShortIndex> &short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
// std::vector<bool> &got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
// std::vector<bool> &once_candidated,
// const std::vector<BPLabelType> &bp_labels_table,
// const std::vector<uint8_t> &used_bp_roots,
// UnweightedDist iter)
//{
// const BPLabelType &L_label = bp_labels_table[label_root_id];
// VertexID label_global_id = label_root_id + roots_start;
// EdgeID e_i_start = G.vertices_idx[v_head_global];
// EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global];
// for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) {
// VertexID v_tail_global = G.out_edges[e_i];
// if (used_bp_roots[v_tail_global]) {
// continue;
// }
// if (v_tail_global < roots_start) { // all remaining v_tail_global has higher rank than any roots, then no roots can push new labels to it.
// return;
// }
//
// VertexID v_tail_local = G.get_local_vertex_id(v_tail_global);
// const IndexType &L_tail = L[v_tail_local];
// if (v_tail_global <= label_global_id) {
// // remaining v_tail_global has higher rank than the label
// return;
// }
// ShortIndex &SI_v_tail = short_index[v_tail_local];
// if (SI_v_tail.indicator[label_root_id]) {
// // The label is already selected before
// continue;
// }
// // Record label_root_id as once selected by v_tail_global
// SI_v_tail.indicator.set(label_root_id);
// // Add into once_candidated_queue
//
// if (!once_candidated[v_tail_local]) {
// // If v_tail_global is not in the once_candidated_queue yet, add it in
// once_candidated[v_tail_local] = true;
// once_candidated_queue[end_once_candidated_queue++] = v_tail_local;
// }
// // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already
// // ++total_check_count;
//// const IndexType &L_label = L[label_global_id];
//// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0);
//// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0);
//// bp_checking_ins_count.measure_start();
// bool no_need_add = false;
// for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) {
// VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i];
// if (td - 2 <= iter) {
// td +=
// (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 :
// ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) |
// (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0]))
// ? -1 : 0;
// if (td <= iter) {
// no_need_add = true;
//// ++bp_hit_count;
// break;
// }
// }
// }
// if (no_need_add) {
//// bp_checking_ins_count.measure_stop();
// continue;
// }
//// bp_checking_ins_count.measure_stop();
// if (SI_v_tail.is_candidate[label_root_id]) {
// continue;
// }
// SI_v_tail.is_candidate[label_root_id] = true;
// SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id;
//
// if (!got_candidates[v_tail_local]) {
// // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate)
// got_candidates[v_tail_local] = true;
// got_candidates_queue[end_got_candidates_queue++] = v_tail_local;
// }
// }
//// {// Just for the complain from the compiler
//// assert(iter >= iter);
//// }
//}
template<VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
schedule_label_pushing_para(
const DistGraph &G,
const VertexID roots_start,
const std::vector<uint8_t> &used_bp_roots,
const std::vector<VertexID> &active_queue,
const VertexID global_start,
const VertexID global_size,
const VertexID local_size,
// const VertexID start_active_queue,
// const VertexID size_active_queue,
std::vector<VertexID> &got_candidates_queue,
VertexID &end_got_candidates_queue,
std::vector<ShortIndex> &short_index,
const std::vector<BPLabelType> &bp_labels_table,
std::vector<uint8_t> &got_candidates,
std::vector<uint8_t> &is_active,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
const UnweightedDist iter)
{
std::vector<std::pair<VertexID, VertexID> > buffer_send_indices;
//.first: Vertex ID
//.second: size of labels
std::vector<VertexID> buffer_send_labels;
if (local_size) {
const VertexID start_active_queue = global_start;
const VertexID size_active_queue = global_size <= local_size ?
global_size :
local_size;
const VertexID bound_active_queue = start_active_queue + size_active_queue;
buffer_send_indices.resize(size_active_queue);
// Prepare offset for inserting
std::vector<VertexID> offsets_buffer_locs(size_active_queue);
#pragma omp parallel for
for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) {
VertexID v_head_local = active_queue[i_q];
is_active[v_head_local] = 0; // reset is_active
const IndexType &Lv = L[v_head_local];
offsets_buffer_locs[i_q - start_active_queue] = Lv.distances.rbegin()->size;
}
EdgeID size_buffer_send_labels = PADO::prefix_sum_for_offsets(offsets_buffer_locs);
try {
buffer_send_labels.resize(size_buffer_send_labels);
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("schedule_label_pushing_para.buffer_send_labels: bad_alloc "
"host_id: %d "
"size_buffer_send_labels: %lu "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
size_buffer_send_labels,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
// Build buffer_send_labels by parallel inserting
#pragma omp parallel for
for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) {
VertexID v_head_local = active_queue[i_q];
is_active[v_head_local] = 0; // reset is_active
VertexID v_head_global = G.get_global_vertex_id(v_head_local);
const IndexType &Lv = L[v_head_local];
// Prepare the buffer_send_indices
VertexID tmp_i_q = i_q - start_active_queue;
buffer_send_indices[tmp_i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size);
// These 2 index are used for traversing v_head's last inserted labels
VertexID l_i_start = Lv.distances.rbegin()->start_index;
VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size;
VertexID top_labels = offsets_buffer_locs[tmp_i_q];
for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) {
VertexID label_root_id = Lv.vertices[l_i] - roots_start;
buffer_send_labels[top_labels++] = label_root_id;
// buffer_send_labels.push_back(label_root_id);
}
}
}
////////////////////////////////////////////////
////
// const VertexID bound_active_queue = start_active_queue + size_active_queue;
// std::vector<std::pair<VertexID, VertexID> > buffer_send_indices(size_active_queue);
// //.first: Vertex ID
// //.second: size of labels
// std::vector<VertexID> buffer_send_labels;
// // Prepare masters' newly added labels for sending
// // Parallel Version
// // Prepare offset for inserting
// std::vector<VertexID> offsets_buffer_locs(size_active_queue);
//#pragma omp parallel for
// for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) {
// VertexID v_head_local = active_queue[i_q];
// is_active[v_head_local] = 0; // reset is_active
// const IndexType &Lv = L[v_head_local];
// offsets_buffer_locs[i_q - start_active_queue] = Lv.distances.rbegin()->size;
// }
// EdgeID size_buffer_send_labels = PADO::prefix_sum_for_offsets(offsets_buffer_locs);
//// {// test
//// if (0 == host_id) {
//// double memtotal = 0;
//// double memfree = 0;
//// double bytes_buffer_send_labels = size_buffer_send_labels * sizeof(VertexID);
//// PADO::Utils::system_memory(memtotal, memfree);
//// printf("bytes_buffer_send_labels: %fGB memtotal: %fGB memfree: %fGB\n",
//// bytes_buffer_send_labels / (1 << 30), memtotal / 1024, memfree / 1024);
//// }
//// }
// buffer_send_labels.resize(size_buffer_send_labels);
//// {// test
//// if (0 == host_id) {
//// printf("buffer_send_labels created.\n");
//// }
//// }
//
// // Build buffer_send_labels by parallel inserting
//#pragma omp parallel for
// for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) {
// VertexID tmp_i_q = i_q - start_active_queue;
// VertexID v_head_local = active_queue[i_q];
// is_active[v_head_local] = 0; // reset is_active
// VertexID v_head_global = G.get_global_vertex_id(v_head_local);
// const IndexType &Lv = L[v_head_local];
// // Prepare the buffer_send_indices
// buffer_send_indices[tmp_i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size);
// // These 2 index are used for traversing v_head's last inserted labels
// VertexID l_i_start = Lv.distances.rbegin()->start_index;
// VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size;
// VertexID top_labels = offsets_buffer_locs[tmp_i_q];
// for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) {
// VertexID label_root_id = Lv.vertices[l_i];
// buffer_send_labels[top_labels++] = label_root_id;
//// buffer_send_labels.push_back(label_root_id);
// }
// }
//// end_active_queue = 0;
////
////////////////////////////////////////////////
for (int root = 0; root < num_hosts; ++root) {
// Get the indices
std::vector<std::pair<VertexID, VertexID> > indices_buffer;
one_host_bcasts_buffer_to_buffer(root,
buffer_send_indices,
indices_buffer);
if (indices_buffer.empty()) {
continue;
}
// Get the labels
std::vector<VertexID> labels_buffer;
one_host_bcasts_buffer_to_buffer(root,
buffer_send_labels,
labels_buffer);
VertexID size_indices_buffer = indices_buffer.size();
// Prepare the offsets for reading indices_buffer
std::vector<EdgeID> starts_locs_index(size_indices_buffer);
#pragma omp parallel for
for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) {
const std::pair<VertexID, VertexID> &e = indices_buffer[i_i];
starts_locs_index[i_i] = e.second;
}
EdgeID total_recved_labels = PADO::prefix_sum_for_offsets(starts_locs_index);
// Prepare the offsets for inserting v_tails into queue
std::vector<VertexID> offsets_tmp_queue(size_indices_buffer);
#pragma omp parallel for
for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) {
const std::pair<VertexID, VertexID> &e = indices_buffer[i_i];
offsets_tmp_queue[i_i] = G.local_out_degrees[e.first];
}
EdgeID num_ngbrs = PADO::prefix_sum_for_offsets(offsets_tmp_queue);
std::vector<VertexID> tmp_got_candidates_queue;
std::vector<VertexID> sizes_tmp_got_candidates_queue;
std::vector<VertexID> tmp_once_candidated_queue;
std::vector<VertexID> sizes_tmp_once_candidated_queue;
try {
tmp_got_candidates_queue.resize(num_ngbrs);
sizes_tmp_got_candidates_queue.resize(size_indices_buffer, 0);
tmp_once_candidated_queue.resize(num_ngbrs);
sizes_tmp_once_candidated_queue.resize(size_indices_buffer, 0);
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("schedule_label_pushing_para.tmp_queues: bad_alloc "
"host_id: %d "
"num_ngbrs: %lu "
"size_indices_buffer: %u "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
num_ngbrs,
size_indices_buffer,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
#pragma omp parallel for
for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) {
VertexID v_head_global = indices_buffer[i_i].first;
EdgeID start_index = starts_locs_index[i_i];
EdgeID bound_index = i_i != size_indices_buffer - 1 ?
starts_locs_index[i_i + 1] : total_recved_labels;
if (G.local_out_degrees[v_head_global]) {
local_push_labels_para(
v_head_global,
start_index,
bound_index,
roots_start,
labels_buffer,
G,
short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
tmp_got_candidates_queue,
sizes_tmp_got_candidates_queue[i_i],
offsets_tmp_queue[i_i],
got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
tmp_once_candidated_queue,
sizes_tmp_once_candidated_queue[i_i],
once_candidated,
bp_labels_table,
used_bp_roots,
iter);
}
}
{// Collect elements from tmp_got_candidates_queue to got_candidates_queue
VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_got_candidates_queue);
PADO::collect_into_queue(
tmp_got_candidates_queue,
offsets_tmp_queue, // the locations for reading tmp_got_candidate_queue
sizes_tmp_got_candidates_queue, // the locations for writing got_candidate_queue
total_new,
got_candidates_queue,
end_got_candidates_queue);
}
{// Collect elements from tmp_once_candidated_queue to once_candidated_queue
VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_once_candidated_queue);
PADO::collect_into_queue(
tmp_once_candidated_queue,
offsets_tmp_queue, // the locations for reading tmp_once_candidats_queue
sizes_tmp_once_candidated_queue, // the locations for writing once_candidated_queue
total_new,
once_candidated_queue,
end_once_candidated_queue);
}
}
}
// Function: pushes v_head's labels to v_head's every (master) neighbor
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
local_push_labels_para(
const VertexID v_head_global,
const EdgeID start_index,
const EdgeID bound_index,
const VertexID roots_start,
const std::vector<VertexID> &labels_buffer,
const DistGraph &G,
std::vector<ShortIndex> &short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
std::vector<VertexID> &tmp_got_candidates_queue,
VertexID &size_tmp_got_candidates_queue,
const VertexID offset_tmp_queue,
std::vector<uint8_t> &got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
std::vector<VertexID> &tmp_once_candidated_queue,
VertexID &size_tmp_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
const std::vector<BPLabelType> &bp_labels_table,
const std::vector<uint8_t> &used_bp_roots,
const UnweightedDist iter)
{
// Traverse v_head's every neighbor v_tail
EdgeID e_i_start = G.vertices_idx[v_head_global];
EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global];
for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) {
VertexID v_tail_global = G.out_edges[e_i];
if (used_bp_roots[v_tail_global]) {
continue;
}
if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it.
return;
}
VertexID v_tail_local = G.get_local_vertex_id(v_tail_global);
const IndexType &L_tail = L[v_tail_local];
ShortIndex &SI_v_tail = short_index[v_tail_local];
// Traverse v_head's last inserted labels
for (VertexID l_i = start_index; l_i < bound_index; ++l_i) {
VertexID label_root_id = labels_buffer[l_i];
VertexID label_global_id = label_root_id + roots_start;
if (v_tail_global <= label_global_id) {
// v_tail_global has higher rank than the label
continue;
}
// if (SI_v_tail.indicator[label_root_id]) {
// // The label is already selected before
// continue;
// }
// // Record label_root_id as once selected by v_tail_global
// SI_v_tail.indicator[label_root_id] = 1;
{// Deal with race condition
if (!PADO::CAS(SI_v_tail.indicator.data() + label_root_id, static_cast<uint8_t>(0),
static_cast<uint8_t>(1))) {
// The label is already selected before
continue;
}
}
// Add into once_candidated_queue
if (!once_candidated[v_tail_local]) {
// If v_tail_global is not in the once_candidated_queue yet, add it in
if (PADO::CAS(once_candidated.data() + v_tail_local, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) {
tmp_once_candidated_queue[offset_tmp_queue + size_tmp_once_candidated_queue++] = v_tail_local;
}
// once_candidated[v_tail_local] = 1;
// once_candidated_queue[end_once_candidated_queue++] = v_tail_local;
}
// Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already
// const IndexType &L_label = L[label_global_id];
// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0);
// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0);
const BPLabelType &L_label = bp_labels_table[label_root_id];
bool no_need_add = false;
for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) {
VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i];
if (td - 2 <= iter) {
td +=
(L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 :
((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) |
(L_label.bp_sets[i][1] & L_tail.bp_sets[i][0]))
? -1 : 0;
if (td <= iter) {
no_need_add = true;
break;
}
}
}
if (no_need_add) {
continue;
}
// if (SI_v_tail.is_candidate[label_root_id]) {
// continue;
// }
// SI_v_tail.is_candidate[label_root_id] = 1;
// SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id;
if (!SI_v_tail.is_candidate[label_root_id]) {
if (CAS(SI_v_tail.is_candidate.data() + label_root_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) {
PADO::TS_enqueue(SI_v_tail.candidates_que, SI_v_tail.end_candidates_que, label_root_id);
}
}
// Add into got_candidates queue
// if (!got_candidates[v_tail_local]) {
// // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate)
// got_candidates[v_tail_local] = 1;
// got_candidates_queue[end_got_candidates_queue++] = v_tail_local;
// }
if (!got_candidates[v_tail_local]) {
if (CAS(got_candidates.data() + v_tail_local, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) {
tmp_got_candidates_queue[offset_tmp_queue + size_tmp_got_candidates_queue++] = v_tail_local;
}
}
}
}
// {
// assert(iter >= iter);
// }
}
// Function: pushes v_head's labels to v_head's every (master) neighbor
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
local_push_labels_seq(
VertexID v_head_global,
EdgeID start_index,
EdgeID bound_index,
VertexID roots_start,
const std::vector<VertexID> &labels_buffer,
const DistGraph &G,
std::vector<ShortIndex> &short_index,
std::vector<VertexID> &got_candidates_queue,
VertexID &end_got_candidates_queue,
std::vector<uint8_t> &got_candidates,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
const std::vector<BPLabelType> &bp_labels_table,
const std::vector<uint8_t> &used_bp_roots,
const UnweightedDist iter)
{
// Traverse v_head's every neighbor v_tail
EdgeID e_i_start = G.vertices_idx[v_head_global];
EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global];
for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) {
VertexID v_tail_global = G.out_edges[e_i];
if (used_bp_roots[v_tail_global]) {
continue;
}
if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it.
return;
}
// Traverse v_head's last inserted labels
for (VertexID l_i = start_index; l_i < bound_index; ++l_i) {
VertexID label_root_id = labels_buffer[l_i];
VertexID label_global_id = label_root_id + roots_start;
if (v_tail_global <= label_global_id) {
// v_tail_global has higher rank than the label
continue;
}
VertexID v_tail_local = G.get_local_vertex_id(v_tail_global);
const IndexType &L_tail = L[v_tail_local];
ShortIndex &SI_v_tail = short_index[v_tail_local];
if (SI_v_tail.indicator[label_root_id]) {
// The label is already selected before
continue;
}
// Record label_root_id as once selected by v_tail_global
SI_v_tail.indicator[label_root_id] = 1;
// SI_v_tail.indicator.set(label_root_id);
// Add into once_candidated_queue
if (!once_candidated[v_tail_local]) {
// If v_tail_global is not in the once_candidated_queue yet, add it in
once_candidated[v_tail_local] = 1;
once_candidated_queue[end_once_candidated_queue++] = v_tail_local;
}
// Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already
// const IndexType &L_label = L[label_global_id];
// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0);
// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0);
const BPLabelType &L_label = bp_labels_table[label_root_id];
bool no_need_add = false;
for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) {
VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i];
if (td - 2 <= iter) {
td +=
(L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 :
((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) |
(L_label.bp_sets[i][1] & L_tail.bp_sets[i][0]))
? -1 : 0;
if (td <= iter) {
no_need_add = true;
break;
}
}
}
if (no_need_add) {
continue;
}
if (SI_v_tail.is_candidate[label_root_id]) {
continue;
}
SI_v_tail.is_candidate[label_root_id] = 1;
SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id;
if (!got_candidates[v_tail_local]) {
// If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate)
got_candidates[v_tail_local] = 1;
got_candidates_queue[end_got_candidates_queue++] = v_tail_local;
}
}
}
// {
// assert(iter >= iter);
// }
}
//// Function: pushes v_head's labels to v_head's every (master) neighbor
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//local_push_labels(
// VertexID v_head_local,
// VertexID roots_start,
// const DistGraph &G,
// std::vector<ShortIndex> &short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
// std::vector<bool> &got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
// std::vector<bool> &once_candidated,
// const std::vector<BPLabelType> &bp_labels_table,
// const std::vector<uint8_t> &used_bp_roots,
// UnweightedDist iter)
//{
// // The data structure of a message
//// std::vector< LabelUnitType > buffer_recv;
// const IndexType &Lv = L[v_head_local];
// // These 2 index are used for traversing v_head's last inserted labels
// VertexID l_i_start = Lv.distances.rbegin() -> start_index;
// VertexID l_i_bound = l_i_start + Lv.distances.rbegin() -> size;
// // Traverse v_head's every neighbor v_tail
// VertexID v_head_global = G.get_global_vertex_id(v_head_local);
// EdgeID e_i_start = G.vertices_idx[v_head_global];
// EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global];
// for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) {
// VertexID v_tail_global = G.out_edges[e_i];
// if (used_bp_roots[v_tail_global]) {
// continue;
// }
// if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it.
// return;
// }
//
// // Traverse v_head's last inserted labels
// for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) {
// VertexID label_root_id = Lv.vertices[l_i];
// VertexID label_global_id = label_root_id + roots_start;
// if (v_tail_global <= label_global_id) {
// // v_tail_global has higher rank than the label
// continue;
// }
// VertexID v_tail_local = G.get_local_vertex_id(v_tail_global);
// const IndexType &L_tail = L[v_tail_local];
// ShortIndex &SI_v_tail = short_index[v_tail_local];
// if (SI_v_tail.indicator[label_root_id]) {
// // The label is already selected before
// continue;
// }
// // Record label_root_id as once selected by v_tail_global
// SI_v_tail.indicator.set(label_root_id);
// // Add into once_candidated_queue
//
// if (!once_candidated[v_tail_local]) {
// // If v_tail_global is not in the once_candidated_queue yet, add it in
// once_candidated[v_tail_local] = true;
// once_candidated_queue[end_once_candidated_queue++] = v_tail_local;
// }
//
// // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already
// // ++total_check_count;
//// const IndexType &L_label = L[label_global_id];
//// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0);
//// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0);
//// bp_checking_ins_count.measure_start();
// const BPLabelType &L_label = bp_labels_table[label_root_id];
// bool no_need_add = false;
// for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) {
// VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i];
// if (td - 2 <= iter) {
// td +=
// (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 :
// ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) |
// (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0]))
// ? -1 : 0;
// if (td <= iter) {
// no_need_add = true;
//// ++bp_hit_count;
// break;
// }
// }
// }
// if (no_need_add) {
//// bp_checking_ins_count.measure_stop();
// continue;
// }
//// bp_checking_ins_count.measure_stop();
// if (SI_v_tail.is_candidate[label_root_id]) {
// continue;
// }
// SI_v_tail.is_candidate[label_root_id] = true;
// SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id;
//
// if (!got_candidates[v_tail_local]) {
// // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate)
// got_candidates[v_tail_local] = true;
// got_candidates_queue[end_got_candidates_queue++] = v_tail_local;
// }
// }
// }
//
// {
// assert(iter >= iter);
// }
//}
//// DEPRECATED Function: in the scatter phase, synchronize local masters to mirrors on other hosts
//// Has some mysterious problem: when I call this function, some hosts will receive wrong messages; when I copy all
//// code of this function into the caller, all messages become right.
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//sync_masters_2_mirrors(
// const DistGraph &G,
// const std::vector<VertexID> &active_queue,
// VertexID end_active_queue,
// std::vector< std::pair<VertexID, VertexID> > &buffer_send,
// std::vector<MPI_Request> &requests_send
//)
//{
//// std::vector< std::pair<VertexID, VertexID> > buffer_send;
// // pair.first: Owener vertex ID of the label
// // pair.first: label vertex ID of the label
// // Prepare masters' newly added labels for sending
// for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) {
// VertexID v_head_local = active_queue[i_q];
// VertexID v_head_global = G.get_global_vertex_id(v_head_local);
// const IndexType &Lv = L[v_head_local];
// // These 2 index are used for traversing v_head's last inserted labels
// VertexID l_i_start = Lv.distances.rbegin()->start_index;
// VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size;
// for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) {
// VertexID label_root_id = Lv.vertices[l_i];
// buffer_send.emplace_back(v_head_global, label_root_id);
//// {//test
//// if (1 == host_id) {
//// printf("@%u host_id: %u v_head_global: %u\n", __LINE__, host_id, v_head_global);//
//// }
//// }
// }
// }
// {
// if (!buffer_send.empty()) {
// printf("@%u host_id: %u sync_masters_2_mirrors: buffer_send.size: %lu buffer_send[0]:(%u %u)\n", __LINE__, host_id, buffer_send.size(), buffer_send[0].first, buffer_send[0].second);
// }
// assert(!requests_send.empty());
// }
//
// // Send messages
// for (int loc = 0; loc < num_hosts - 1; ++loc) {
// int dest_host_id = G.buffer_send_list_loc_2_master_host_id(loc);
// MPI_Isend(buffer_send.data(),
// MPI_Instance::get_sending_size(buffer_send),
// MPI_CHAR,
// dest_host_id,
// SENDING_MASTERS_TO_MIRRORS,
// MPI_COMM_WORLD,
// &requests_send[loc]);
// {
// if (!buffer_send.empty()) {
// printf("@%u host_id: %u dest_host_id: %u buffer_send.size: %lu buffer_send[0]:(%u %u)\n", __LINE__, host_id, dest_host_id, buffer_send.size(), buffer_send[0].first, buffer_send[0].second);
// }
// }
// }
//}
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
schedule_label_inserting_para(
const DistGraph &G,
const VertexID roots_start,
const VertexID roots_size,
std::vector<ShortIndex> &short_index,
const std::vector< std::vector<UnweightedDist> > &dist_table,
const std::vector<VertexID> &got_candidates_queue,
const VertexID start_got_candidates_queue,
const VertexID size_got_candidates_queue,
std::vector<uint8_t> &got_candidates,
std::vector<VertexID> &active_queue,
VertexID &end_active_queue,
std::vector<uint8_t> &is_active,
std::vector< std::pair<VertexID, VertexID> > &buffer_send,
const VertexID iter)
{
const VertexID bound_got_candidates_queue = start_got_candidates_queue + size_got_candidates_queue;
std::vector<VertexID> offsets_tmp_active_queue;
std::vector<VertexID> tmp_active_queue;
std::vector<VertexID> sizes_tmp_active_queue;
std::vector<EdgeID> offsets_tmp_buffer_send;
std::vector< std::pair<VertexID, VertexID> > tmp_buffer_send;
std::vector<EdgeID> sizes_tmp_buffer_send;
EdgeID total_send_labels;
try {
offsets_tmp_active_queue.resize(size_got_candidates_queue);
#pragma omp parallel for
for (VertexID i_q = 0; i_q < size_got_candidates_queue; ++i_q) {
offsets_tmp_active_queue[i_q] = i_q;
}
tmp_active_queue.resize(size_got_candidates_queue);
sizes_tmp_active_queue.resize(size_got_candidates_queue,
0); // Size will only be 0 or 1, but it will become offsets eventually.
// Prepare for parallel buffer_send
// std::vector<EdgeID> offsets_tmp_buffer_send(size_got_candidates_queue);
offsets_tmp_buffer_send.resize(size_got_candidates_queue);
#pragma omp parallel for
for (VertexID i_q = start_got_candidates_queue; i_q < bound_got_candidates_queue; ++i_q) {
VertexID v_id_local = got_candidates_queue[i_q];
VertexID v_global_id = G.get_global_vertex_id(v_id_local);
VertexID tmp_i_q = i_q - start_got_candidates_queue;
if (v_global_id >= roots_start && v_global_id < roots_start + roots_size) {
// If v_global_id is root, its new labels should be put into buffer_send
offsets_tmp_buffer_send[tmp_i_q] = short_index[v_id_local].end_candidates_que;
} else {
offsets_tmp_buffer_send[tmp_i_q] = 0;
}
}
total_send_labels = PADO::prefix_sum_for_offsets(offsets_tmp_buffer_send);
tmp_buffer_send.resize(total_send_labels);
sizes_tmp_buffer_send.resize(size_got_candidates_queue, 0);
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("L%u_tmp_buffer_send: bad_alloc "
"host_id: %d "
"iter: %u "
"size_got_candidates_queue: %u "
"total_send_labels: %lu "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
__LINE__,
host_id,
iter,
size_got_candidates_queue,
total_send_labels,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
#pragma omp parallel for
for (VertexID i_queue = start_got_candidates_queue; i_queue < bound_got_candidates_queue; ++i_queue) {
VertexID v_id_local = got_candidates_queue[i_queue];
VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates
got_candidates[v_id_local] = 0; // reset got_candidates
// Traverse v_id's all candidates
VertexID tmp_i_queue = i_queue - start_got_candidates_queue;
VertexID bound_cand_i = short_index[v_id_local].end_candidates_que;
for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) {
VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i];
short_index[v_id_local].is_candidate[cand_root_id] = 0;
// Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance
if (distance_query(
cand_root_id,
v_id_local,
roots_start,
// L,
dist_table,
iter)) {
if (!is_active[v_id_local]) {
is_active[v_id_local] = 1;
// active_queue[end_active_queue++] = v_id_local;
tmp_active_queue[tmp_i_queue + sizes_tmp_active_queue[tmp_i_queue]++] = v_id_local;
}
++inserted_count;
// The candidate cand_root_id needs to be added into v_id's label
insert_label_only_para(
cand_root_id,
v_id_local,
roots_start,
roots_size,
G,
tmp_buffer_send,
sizes_tmp_buffer_send[tmp_i_queue],
offsets_tmp_buffer_send[tmp_i_queue]);
// buffer_send);
}
}
short_index[v_id_local].end_candidates_que = 0;
if (0 != inserted_count) {
// Update other arrays in L[v_id] if new labels were inserted in this iteration
update_label_indices(
v_id_local,
inserted_count,
// L,
short_index,
// b_id,
iter);
}
}
{// Collect elements from tmp_active_queue to active_queue
VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_active_queue);
PADO::collect_into_queue(
tmp_active_queue,
offsets_tmp_active_queue,
sizes_tmp_active_queue,
total_new,
active_queue,
end_active_queue);
}
{// Collect elements from tmp_buffer_send to buffer_send
EdgeID old_size_buffer_send = buffer_send.size();
EdgeID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_buffer_send);
try {
buffer_send.resize(total_new + old_size_buffer_send);
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("L%u_buffer_send: bad_alloc "
"iter: %u "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
__LINE__,
iter,
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
// EdgeID zero_size = 0;
PADO::collect_into_queue(
tmp_buffer_send,
offsets_tmp_buffer_send,
sizes_tmp_buffer_send,
total_new,
buffer_send,
old_size_buffer_send);
// zero_size);
}
}
// Function for distance query;
// traverse vertex v_id's labels;
// return false if shorter distance exists already, return true if the cand_root_id can be added into v_id's label.
template <VertexID BATCH_SIZE>
inline bool DistBVCPLL<BATCH_SIZE>::
distance_query(
VertexID cand_root_id,
VertexID v_id_local,
VertexID roots_start,
// const std::vector<IndexType> &L,
const std::vector< std::vector<UnweightedDist> > &dist_table,
UnweightedDist iter)
{
VertexID cand_real_id = cand_root_id + roots_start;
const IndexType &Lv = L[v_id_local];
// Traverse v_id's all existing labels
VertexID b_i_bound = Lv.batches.size();
_mm_prefetch(&Lv.batches[0], _MM_HINT_T0);
_mm_prefetch(&Lv.distances[0], _MM_HINT_T0);
_mm_prefetch(&Lv.vertices[0], _MM_HINT_T0);
//_mm_prefetch(&dist_table[cand_root_id][0], _MM_HINT_T0);
for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) {
// VertexID id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE;
VertexID dist_start_index = Lv.batches[b_i].start_index;
VertexID dist_bound_index = dist_start_index + Lv.batches[b_i].size;
// Traverse dist_table
for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) {
// VertexID dist_bound_index = Lv.distances.size();
// for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) {
UnweightedDist dist = Lv.distances[dist_i].dist;
// Cannot use this, because no batch_id any more, so distances are not all in order among batches.
if (dist >= iter) { // In a batch, the labels' distances are increasingly ordered.
// If the half path distance is already greater than their targeted distance, jump to next batch
break;
}
VertexID v_start_index = Lv.distances[dist_i].start_index;
VertexID v_bound_index = v_start_index + Lv.distances[dist_i].size;
// _mm_prefetch(&dist_table[cand_root_id][0], _MM_HINT_T0);
_mm_prefetch(reinterpret_cast<const char *>(dist_table[cand_root_id].data()), _MM_HINT_T0);
for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) {
// VertexID v = Lv.vertices[v_i] + id_offset; // v is a label hub of v_id
VertexID v = Lv.vertices[v_i]; // v is a label hub of v_id
if (v >= cand_real_id) {
// Vertex cand_real_id cannot have labels whose ranks are lower than it,
// in which case dist_table[cand_root_id][v] does not exist.
continue;
}
VertexID d_tmp = dist + dist_table[cand_root_id][v];
if (d_tmp <= iter) {
return false;
}
}
}
}
return true;
}
//// Sequential version
// Function inserts candidate cand_root_id into vertex v_id's labels;
// update the distance buffer dist_table;
// but it only update the v_id's labels' vertices array;
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
insert_label_only_seq(
VertexID cand_root_id,
VertexID v_id_local,
VertexID roots_start,
VertexID roots_size,
const DistGraph &G,
// std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::pair<VertexID, VertexID> > &buffer_send)
// UnweightedDist iter)
{
try {
VertexID cand_real_id = cand_root_id + roots_start;
L[v_id_local].vertices.push_back(cand_real_id);
// L[v_id_local].vertices.push_back(cand_root_id);
// Update the distance buffer if v_id is a root
VertexID v_id_global = G.get_global_vertex_id(v_id_local);
VertexID v_root_id = v_id_global - roots_start;
if (v_id_global >= roots_start && v_root_id < roots_size) {
// VertexID cand_real_id = cand_root_id + roots_start;
// dist_table[v_root_id][cand_real_id] = iter;
// Put the update into the buffer_send for later sending
buffer_send.emplace_back(v_root_id, cand_real_id);
}
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("insert_label_only_seq: bad_alloc "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
}
//// Parallel Version
// Function inserts candidate cand_root_id into vertex v_id's labels;
// update the distance buffer dist_table;
// but it only update the v_id's labels' vertices array;
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
insert_label_only_para(
VertexID cand_root_id,
VertexID v_id_local,
VertexID roots_start,
VertexID roots_size,
const DistGraph &G,
// std::vector< std::pair<VertexID, VertexID> > &buffer_send)
std::vector< std::pair<VertexID, VertexID> > &tmp_buffer_send,
EdgeID &size_tmp_buffer_send,
const EdgeID offset_tmp_buffer_send)
{
try {
VertexID cand_real_id = cand_root_id + roots_start;
L[v_id_local].vertices.push_back(cand_real_id);
// L[v_id_local].vertices.push_back(cand_root_id);
// Update the distance buffer if v_id is a root
VertexID v_id_global = G.get_global_vertex_id(v_id_local);
VertexID v_root_id = v_id_global - roots_start;
if (v_id_global >= roots_start && v_root_id < roots_size) {
// VertexID cand_real_id = cand_root_id + roots_start;
// Put the update into the buffer_send for later sending
tmp_buffer_send[offset_tmp_buffer_send + size_tmp_buffer_send++] = std::make_pair(v_root_id, cand_real_id);
}
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("insert_label_only_para: bad_alloc "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
}
// Function updates those index arrays in v_id's label only if v_id has been inserted new labels
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
update_label_indices(
const VertexID v_id_local,
const VertexID inserted_count,
// std::vector<IndexType> &L,
std::vector<ShortIndex> &short_index,
// VertexID b_id,
const UnweightedDist iter)
{
try {
IndexType &Lv = L[v_id_local];
// indicator[BATCH_SIZE + 1] is true, means v got some labels already in this batch
if (short_index[v_id_local].indicator[BATCH_SIZE]) {
// Increase the batches' last element's size because a new distance element need to be added
++(Lv.batches.rbegin() -> size);
} else {
short_index[v_id_local].indicator[BATCH_SIZE] = 1;
// short_index[v_id_local].indicator.set(BATCH_SIZE);
// Insert a new Batch with batch_id, start_index, and size because a new distance element need to be added
Lv.batches.emplace_back(
// b_id, // batch id
Lv.distances.size(), // start index
1); // size
}
// Insert a new distance element with start_index, size, and dist
Lv.distances.emplace_back(
Lv.vertices.size() - inserted_count, // start index
inserted_count, // size
iter); // distance
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("update_label_indices: bad_alloc "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
}
// Function to reset dist_table the distance buffer to INF
// Traverse every root's labels to reset its distance buffer elements to INF.
// In this way to reduce the cost of initialization of the next batch.
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
reset_at_end(
const DistGraph &G,
// VertexID roots_start,
// const std::vector<VertexID> &roots_master_local,
std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::vector<VertexID> > &recved_dist_table,
std::vector<BPLabelType> &bp_labels_table,
const std::vector<VertexID> &once_candidated_queue,
const VertexID end_once_candidated_queue)
{
// // Reset dist_table according to local masters' labels
// for (VertexID r_local_id : roots_master_local) {
// IndexType &Lr = L[r_local_id];
// VertexID r_root_id = G.get_global_vertex_id(r_local_id) - roots_start;
// VertexID b_i_bound = Lr.batches.size();
// _mm_prefetch(&Lr.batches[0], _MM_HINT_T0);
// _mm_prefetch(&Lr.distances[0], _MM_HINT_T0);
// _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0);
// for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) {
// VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE;
// VertexID dist_start_index = Lr.batches[b_i].start_index;
// VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size;
// // Traverse dist_table
// for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) {
// VertexID v_start_index = Lr.distances[dist_i].start_index;
// VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size;
// for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) {
// dist_table[r_root_id][Lr.vertices[v_i] + id_offset] = MAX_UNWEIGHTED_DIST;
// }
// }
// }
// }
// Reset dist_table according to received masters' labels from other hosts
for (VertexID r_root_id = 0; r_root_id < BATCH_SIZE; ++r_root_id) {
for (VertexID cand_real_id : recved_dist_table[r_root_id]) {
dist_table[r_root_id][cand_real_id] = MAX_UNWEIGHTED_DIST;
}
recved_dist_table[r_root_id].clear();
}
// Reset bit-parallel labels table
for (VertexID r_root_id = 0; r_root_id < BATCH_SIZE; ++r_root_id) {
memset(bp_labels_table[r_root_id].bp_dist, 0, sizeof(bp_labels_table[r_root_id].bp_dist));
memset(bp_labels_table[r_root_id].bp_sets, 0, sizeof(bp_labels_table[r_root_id].bp_sets));
}
// Remove labels of local minimum set
for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) {
VertexID v_local_id = once_candidated_queue[v_i];
if (!G.is_local_minimum[v_local_id]) {
continue;
}
// L[v_local_id].clean_all_indices();
}
}
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
batch_process(
const DistGraph &G,
// const VertexID b_id,
const VertexID roots_start, // start id of roots
const VertexID roots_size, // how many roots in the batch
const std::vector<uint8_t> &used_bp_roots,
std::vector<VertexID> &active_queue,
VertexID &end_active_queue,
std::vector<VertexID> &got_candidates_queue,
VertexID &end_got_candidates_queue,
std::vector<ShortIndex> &short_index,
std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::vector<VertexID> > &recved_dist_table,
std::vector<BPLabelType> &bp_labels_table,
std::vector<uint8_t> &got_candidates,
// std::vector<bool> &got_candidates,
std::vector<uint8_t> &is_active,
// std::vector<bool> &is_active,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated)
// std::vector<bool> &once_candidated)
{
// At the beginning of a batch, initialize the labels L and distance buffer dist_table;
// initializing_time -= WallTimer::get_time_mark();
// The Maximum of active vertices among hosts.
VertexID global_num_actives = initialization(G,
short_index,
dist_table,
recved_dist_table,
bp_labels_table,
active_queue,
end_active_queue,
once_candidated_queue,
end_once_candidated_queue,
once_candidated,
// b_id,
roots_start,
roots_size,
// roots_master_local,
used_bp_roots);
// initializing_time += WallTimer::get_time_mark();
UnweightedDist iter = 0; // The iterator, also the distance for current iteration
// {//test
// if (0 == host_id) {
// printf("host_id: %u initialization finished.\n", host_id);
// }
// }
while (global_num_actives) {
++iter;
// {// Limit the distance
// if (iter > 2 ) {
// if (end_active_queue >= THRESHOLD_PARALLEL) {
//#pragma omp parallel for
// for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) {
// VertexID v_id_local = active_queue[i_q];
// is_active[v_id_local] = 0;
// }
// } else {
// for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) {
// VertexID v_id_local = active_queue[i_q];
// is_active[v_id_local] = 0;
// }
// }
// end_active_queue = 0;
// break;
// }
// }
//#ifdef DEBUG_MESSAGES_ON
// {//test
//// if (0 == host_id) {
// double memtotal = 0;
// double memfree = 0;
// PADO::Utils::system_memory(memtotal, memfree);
// printf("iter: %u "
// "host_id: %d "
// "global_num_actives: %u "
// "L.size(): %.2fGB "
// "memtotal: %.2fGB "
// "memfree: %.2fGB\n",
// iter,
// host_id,
// global_num_actives,
// get_index_size() * 1.0 / (1 << 30),
// memtotal / 1024,
// memfree / 1024);
//// }
// }
//#endif
// Traverse active vertices to push their labels as candidates
// Send masters' newly added labels to other hosts
try
{
// scatter_time -= WallTimer::get_time_mark();
////// Multiple pushing
// // Divide the pushing into many-time runs, to reduce the peak memory footprint.
// const VertexID chunk_size = 1 << 20;
// VertexID remainder = global_num_actives % chunk_size;
// VertexID bound_global_i = global_num_actives - remainder;
//// VertexID remainder = end_active_queue % chunk_size;
//// VertexID bound_active_queue = end_active_queue - remainder;
// VertexID local_size;
// for (VertexID global_i = 0; global_i < bound_global_i; global_i += chunk_size) {
// if (global_i < end_active_queue) {
// local_size = end_active_queue - global_i;
// } else {
// local_size = 0;
// }
//// {//test
//// if (1024 == roots_start && 7 == host_id) {
//// printf("S0 host_id: %d global_i: %u bound_global_i: %u local_size: %u\n",
//// host_id, global_i, bound_global_i, local_size);
//// }
//// }
// schedule_label_pushing_para(
// G,
// roots_start,
// used_bp_roots,
// active_queue,
// global_i,
// chunk_size,
// local_size,
// got_candidates_queue,
// end_got_candidates_queue,
// short_index,
// bp_labels_table,
// got_candidates,
// is_active,
// once_candidated_queue,
// end_once_candidated_queue,
// once_candidated,
// iter);
// }
// if (remainder) {
// if (bound_global_i < end_active_queue) {
// local_size = end_active_queue - bound_global_i;
// } else {
// local_size = 0;
// }
// schedule_label_pushing_para(
// G,
// roots_start,
// used_bp_roots,
// active_queue,
// bound_global_i,
// remainder,
// local_size,
// got_candidates_queue,
// end_got_candidates_queue,
// short_index,
// bp_labels_table,
// got_candidates,
// is_active,
// once_candidated_queue,
// end_once_candidated_queue,
// once_candidated,
// iter);
// }
//// Single pushing
schedule_label_pushing_para(
G,
roots_start,
used_bp_roots,
active_queue,
0,
global_num_actives,
end_active_queue,
got_candidates_queue,
end_got_candidates_queue,
short_index,
bp_labels_table,
got_candidates,
is_active,
once_candidated_queue,
end_once_candidated_queue,
once_candidated,
iter);
end_active_queue = 0;
// scatter_time += WallTimer::get_time_mark();
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("pushing: bad_alloc "
"iter: %u "
"host_id: %d "
"global_num_actives: %u "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
iter,
host_id,
global_num_actives,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
// {//test
// if (0 == host_id) {
// printf("host_id: %u pushing finished...\n", host_id);
// }
// }
// Traverse vertices in the got_candidates_queue to insert labels
{
// gather_time -= WallTimer::get_time_mark();
std::vector< std::pair<VertexID, VertexID> > buffer_send; // For sync elements in the dist_table
// pair.first: root id
// pair.second: label (global) id of the root
if (end_got_candidates_queue >= THRESHOLD_PARALLEL) {
////// Multiple checking/inserting
// const VertexID chunk_size = 1 << 20;
// VertexID remainder = end_got_candidates_queue % chunk_size;
// VertexID bound_i_q = end_got_candidates_queue - remainder;
// for (VertexID i_q = 0; i_q < bound_i_q; i_q += chunk_size) {
// schedule_label_inserting_para(
// G,
// roots_start,
// roots_size,
// short_index,
// dist_table,
// got_candidates_queue,
// i_q,
// chunk_size,
// got_candidates,
// active_queue,
// end_active_queue,
// is_active,
// buffer_send,
// iter);
// }
// if (remainder) {
// schedule_label_inserting_para(
// G,
// roots_start,
// roots_size,
// short_index,
// dist_table,
// got_candidates_queue,
// bound_i_q,
// remainder,
// got_candidates,
// active_queue,
// end_active_queue,
// is_active,
// buffer_send,
// iter);
// }
//// Single checking/inserting
schedule_label_inserting_para(
G,
roots_start,
roots_size,
short_index,
dist_table,
got_candidates_queue,
0,
end_got_candidates_queue,
got_candidates,
active_queue,
end_active_queue,
is_active,
buffer_send,
iter);
////// Backup
// // Prepare for parallel active_queue
// // Don't need offsets_tmp_active_queue here, because the index i_queue is the offset already.
// // Actually we still need offsets_tmp_active_queue, because collect_into_queue() needs it.
// std::vector<VertexID> offsets_tmp_active_queue;
// std::vector<VertexID> tmp_active_queue;
// std::vector<VertexID> sizes_tmp_active_queue;
// std::vector<EdgeID> offsets_tmp_buffer_send;
// std::vector< std::pair<VertexID, VertexID> > tmp_buffer_send;
// std::vector<EdgeID> sizes_tmp_buffer_send;
// EdgeID total_send_labels;
//
// try {
// offsets_tmp_active_queue.resize(end_got_candidates_queue);
//#pragma omp parallel for
// for (VertexID i_q = 0; i_q < end_got_candidates_queue; ++i_q) {
// offsets_tmp_active_queue[i_q] = i_q;
// }
// tmp_active_queue.resize(end_got_candidates_queue);
// sizes_tmp_active_queue.resize(end_got_candidates_queue,
// 0); // Size will only be 0 or 1, but it will become offsets eventually.
//
// // Prepare for parallel buffer_send
//// std::vector<EdgeID> offsets_tmp_buffer_send(end_got_candidates_queue);
// offsets_tmp_buffer_send.resize(end_got_candidates_queue);
//#pragma omp parallel for
// for (VertexID i_q = 0; i_q < end_got_candidates_queue; ++i_q) {
// VertexID v_id_local = got_candidates_queue[i_q];
// VertexID v_global_id = G.get_global_vertex_id(v_id_local);
// if (v_global_id >= roots_start && v_global_id < roots_start + roots_size) {
// // If v_global_id is root, its new labels should be put into buffer_send
// offsets_tmp_buffer_send[i_q] = short_index[v_id_local].end_candidates_que;
// } else {
// offsets_tmp_buffer_send[i_q] = 0;
// }
// }
// total_send_labels = PADO::prefix_sum_for_offsets(offsets_tmp_buffer_send);
// tmp_buffer_send.resize(total_send_labels);
// sizes_tmp_buffer_send.resize(end_got_candidates_queue, 0);
// }
// catch (const std::bad_alloc &) {
// double memtotal = 0;
// double memfree = 0;
// PADO::Utils::system_memory(memtotal, memfree);
// printf("L%u_tmp_buffer_send: bad_alloc "
// "host_id: %d "
// "iter: %u "
// "end_got_candidates_queue: %u "
// "total_send_labels: %u "
// "L.size(): %.2fGB "
// "memtotal: %.2fGB "
// "memfree: %.2fGB\n",
// __LINE__,
// host_id,
// iter,
// end_got_candidates_queue,
// total_send_labels,
// get_index_size() * 1.0 / (1 << 30),
// memtotal / 1024,
// memfree / 1024);
// exit(1);
// }
//
//#pragma omp parallel for
// for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) {
// VertexID v_id_local = got_candidates_queue[i_queue];
// VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates
// got_candidates[v_id_local] = 0; // reset got_candidates
// // Traverse v_id's all candidates
// VertexID bound_cand_i = short_index[v_id_local].end_candidates_que;
// for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) {
// VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i];
// short_index[v_id_local].is_candidate[cand_root_id] = 0;
// // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance
// if (distance_query(
// cand_root_id,
// v_id_local,
// roots_start,
// // L,
// dist_table,
// iter)) {
// if (!is_active[v_id_local]) {
// is_active[v_id_local] = 1;
//// active_queue[end_active_queue++] = v_id_local;
// tmp_active_queue[i_queue + sizes_tmp_active_queue[i_queue]++] = v_id_local;
// }
// ++inserted_count;
// // The candidate cand_root_id needs to be added into v_id's label
// insert_label_only_para(
// cand_root_id,
// v_id_local,
// roots_start,
// roots_size,
// G,
// tmp_buffer_send,
// sizes_tmp_buffer_send[i_queue],
// offsets_tmp_buffer_send[i_queue]);
//// buffer_send);
// }
// }
// short_index[v_id_local].end_candidates_que = 0;
// if (0 != inserted_count) {
// // Update other arrays in L[v_id] if new labels were inserted in this iteration
// update_label_indices(
// v_id_local,
// inserted_count,
// // L,
//// short_index,
//// b_id,
// iter);
// }
// }
//
// {// Collect elements from tmp_active_queue to active_queue
// VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_active_queue);
// PADO::collect_into_queue(
// tmp_active_queue,
// offsets_tmp_active_queue,
// sizes_tmp_active_queue,
// total_new,
// active_queue,
// end_active_queue);
// }
// {// Collect elements from tmp_buffer_send to buffer_send
// EdgeID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_buffer_send);
// try {
// buffer_send.resize(total_new);
// }
// catch (const std::bad_alloc &) {
// double memtotal = 0;
// double memfree = 0;
// PADO::Utils::system_memory(memtotal, memfree);
// printf("L%u_buffer_send: bad_alloc "
// "iter: %u "
// "host_id: %d "
// "L.size(): %.2fGB "
// "memtotal: %.2fGB "
// "memfree: %.2fGB\n",
// __LINE__,
// iter,
// host_id,
// get_index_size() * 1.0 / (1 << 30),
// memtotal / 1024,
// memfree / 1024);
// exit(1);
// }
// EdgeID zero_size = 0;
// PADO::collect_into_queue(
// tmp_buffer_send,
// offsets_tmp_buffer_send,
// sizes_tmp_buffer_send,
// total_new,
// buffer_send,
// zero_size);
// }
} else {
for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) {
VertexID v_id_local = got_candidates_queue[i_queue];
VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates
got_candidates[v_id_local] = 0; // reset got_candidates
// Traverse v_id's all candidates
VertexID bound_cand_i = short_index[v_id_local].end_candidates_que;
for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) {
VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i];
short_index[v_id_local].is_candidate[cand_root_id] = 0;
// Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance
if (distance_query(
cand_root_id,
v_id_local,
roots_start,
// L,
dist_table,
iter)) {
if (!is_active[v_id_local]) {
is_active[v_id_local] = 1;
active_queue[end_active_queue++] = v_id_local;
}
++inserted_count;
// The candidate cand_root_id needs to be added into v_id's label
insert_label_only_seq(
cand_root_id,
v_id_local,
roots_start,
roots_size,
G,
// dist_table,
buffer_send);
// iter);
}
}
short_index[v_id_local].end_candidates_que = 0;
if (0 != inserted_count) {
// Update other arrays in L[v_id] if new labels were inserted in this iteration
update_label_indices(
v_id_local,
inserted_count,
// L,
short_index,
// b_id,
iter);
}
}
}
// {//test
// printf("host_id: %u gather: buffer_send.size(); %lu bytes: %lu\n", host_id, buffer_send.size(), MPI_Instance::get_sending_size(buffer_send));
// }
end_got_candidates_queue = 0; // Set the got_candidates_queue empty
// Sync the dist_table
for (int root = 0; root < num_hosts; ++root) {
std::vector<std::pair<VertexID, VertexID>> buffer_recv;
one_host_bcasts_buffer_to_buffer(root,
buffer_send,
buffer_recv);
if (buffer_recv.empty()) {
continue;
}
EdgeID size_buffer_recv = buffer_recv.size();
try {
if (size_buffer_recv >= THRESHOLD_PARALLEL) {
// Get label number for every root
std::vector<VertexID> sizes_recved_root_labels(roots_size, 0);
#pragma omp parallel for
for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) {
const std::pair<VertexID, VertexID> &e = buffer_recv[i_l];
VertexID root_id = e.first;
__atomic_add_fetch(sizes_recved_root_labels.data() + root_id, 1, __ATOMIC_SEQ_CST);
}
// Resize the recved_dist_table for every root
#pragma omp parallel for
for (VertexID root_id = 0; root_id < roots_size; ++root_id) {
VertexID old_size = recved_dist_table[root_id].size();
VertexID tmp_size = sizes_recved_root_labels[root_id];
if (tmp_size) {
recved_dist_table[root_id].resize(old_size + tmp_size);
sizes_recved_root_labels[root_id] = old_size; // sizes_recved_root_labels now records old_size
}
// If tmp_size == 0, root_id has no received labels.
// sizes_recved_root_labels[root_id] = old_size; // sizes_recved_root_labels now records old_size
}
// Recorde received labels in recved_dist_table
#pragma omp parallel for
for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) {
const std::pair<VertexID, VertexID> &e = buffer_recv[i_l];
VertexID root_id = e.first;
VertexID cand_real_id = e.second;
dist_table[root_id][cand_real_id] = iter;
PADO::TS_enqueue(recved_dist_table[root_id], sizes_recved_root_labels[root_id],
cand_real_id);
}
} else {
for (const std::pair<VertexID, VertexID> &e : buffer_recv) {
VertexID root_id = e.first;
VertexID cand_real_id = e.second;
dist_table[root_id][cand_real_id] = iter;
// Record the received element, for future reset
recved_dist_table[root_id].push_back(cand_real_id);
}
}
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("recved_dist_table: bad_alloc "
"host_id: %d "
"iter: %u "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
iter,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
}
// Sync the global_num_actives
// message_time -= WallTimer::get_time_mark();
MPI_Allreduce(&end_active_queue,
&global_num_actives,
1,
V_ID_Type,
MPI_MAX,
// MPI_SUM,
MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
// gather_time += WallTimer::get_time_mark();
}
// {//test
// if (0 == host_id) {
// printf("iter: %u inserting labels finished.\n", iter);
// }
// }
}
// Reset the dist_table
// clearup_time -= WallTimer::get_time_mark();
reset_at_end(
G,
// roots_start,
// roots_master_local,
dist_table,
recved_dist_table,
bp_labels_table,
once_candidated_queue,
end_once_candidated_queue);
// clearup_time += WallTimer::get_time_mark();
// {//test
// if (0 == host_id) {
// printf("host_id: %u resetting finished.\n", host_id);
// }
// }
}
//// Sequential Version
//template <VertexID BATCH_SIZE>
//inline void DistBVCPLL<BATCH_SIZE>::
//batch_process(
// const DistGraph &G,
// VertexID b_id,
// VertexID roots_start, // start id of roots
// VertexID roots_size, // how many roots in the batch
// const std::vector<uint8_t> &used_bp_roots,
// std::vector<VertexID> &active_queue,
// VertexID &end_active_queue,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
// std::vector<ShortIndex> &short_index,
// std::vector< std::vector<UnweightedDist> > &dist_table,
// std::vector< std::vector<VertexID> > &recved_dist_table,
// std::vector<BPLabelType> &bp_labels_table,
// std::vector<uint8_t> &got_candidates,
//// std::vector<bool> &got_candidates,
// std::vector<uint8_t> &is_active,
//// std::vector<bool> &is_active,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
// std::vector<uint8_t> &once_candidated)
//// std::vector<bool> &once_candidated)
//{
// // At the beginning of a batch, initialize the labels L and distance buffer dist_table;
// initializing_time -= WallTimer::get_time_mark();
// VertexID global_num_actives = initialization(G,
// short_index,
// dist_table,
// recved_dist_table,
// bp_labels_table,
// active_queue,
// end_active_queue,
// once_candidated_queue,
// end_once_candidated_queue,
// once_candidated,
// b_id,
// roots_start,
// roots_size,
//// roots_master_local,
// used_bp_roots);
// initializing_time += WallTimer::get_time_mark();
// UnweightedDist iter = 0; // The iterator, also the distance for current iteration
//// {//test
//// printf("host_id: %u initialization finished.\n", host_id);
//// }
//
//
// while (global_num_actives) {
////#ifdef DEBUG_MESSAGES_ON
//// {//
//// if (0 == host_id) {
//// printf("iter: %u global_num_actives: %u\n", iter, global_num_actives);
//// }
//// }
////#endif
// ++iter;
// // Traverse active vertices to push their labels as candidates
// // Send masters' newly added labels to other hosts
// {
// scatter_time -= WallTimer::get_time_mark();
// std::vector<std::pair<VertexID, VertexID> > buffer_send_indices(end_active_queue);
// //.first: Vertex ID
// //.second: size of labels
// std::vector<VertexID> buffer_send_labels;
// // Prepare masters' newly added labels for sending
// for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) {
// VertexID v_head_local = active_queue[i_q];
// is_active[v_head_local] = 0; // reset is_active
// VertexID v_head_global = G.get_global_vertex_id(v_head_local);
// const IndexType &Lv = L[v_head_local];
// // Prepare the buffer_send_indices
// buffer_send_indices[i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size);
// // These 2 index are used for traversing v_head's last inserted labels
// VertexID l_i_start = Lv.distances.rbegin()->start_index;
// VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size;
// for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) {
// VertexID label_root_id = Lv.vertices[l_i];
// buffer_send_labels.push_back(label_root_id);
// }
// }
// end_active_queue = 0;
//
// for (int root = 0; root < num_hosts; ++root) {
// // Get the indices
// std::vector< std::pair<VertexID, VertexID> > indices_buffer;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send_indices,
// indices_buffer);
// if (indices_buffer.empty()) {
// continue;
// }
// // Get the labels
// std::vector<VertexID> labels_buffer;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send_labels,
// labels_buffer);
// // Push those labels
// EdgeID start_index = 0;
// for (const std::pair<VertexID, VertexID> e : indices_buffer) {
// VertexID v_head_global = e.first;
// EdgeID bound_index = start_index + e.second;
// if (G.local_out_degrees[v_head_global]) {
// local_push_labels(
// v_head_global,
// start_index,
// bound_index,
// roots_start,
// labels_buffer,
// G,
// short_index,
// got_candidates_queue,
// end_got_candidates_queue,
// got_candidates,
// once_candidated_queue,
// end_once_candidated_queue,
// once_candidated,
// bp_labels_table,
// used_bp_roots,
// iter);
// }
// start_index = bound_index;
// }
// }
// scatter_time += WallTimer::get_time_mark();
// }
//
// // Traverse vertices in the got_candidates_queue to insert labels
// {
// gather_time -= WallTimer::get_time_mark();
// std::vector< std::pair<VertexID, VertexID> > buffer_send; // For sync elements in the dist_table
// // pair.first: root id
// // pair.second: label (global) id of the root
// for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) {
// VertexID v_id_local = got_candidates_queue[i_queue];
// VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates
// got_candidates[v_id_local] = 0; // reset got_candidates
// // Traverse v_id's all candidates
// VertexID bound_cand_i = short_index[v_id_local].end_candidates_que;
// for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) {
// VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i];
// short_index[v_id_local].is_candidate[cand_root_id] = 0;
// // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance
// if ( distance_query(
// cand_root_id,
// v_id_local,
// roots_start,
// // L,
// dist_table,
// iter) ) {
// if (!is_active[v_id_local]) {
// is_active[v_id_local] = 1;
// active_queue[end_active_queue++] = v_id_local;
// }
// ++inserted_count;
// // The candidate cand_root_id needs to be added into v_id's label
// insert_label_only(
// cand_root_id,
// v_id_local,
// roots_start,
// roots_size,
// G,
//// dist_table,
// buffer_send);
//// iter);
// }
// }
// short_index[v_id_local].end_candidates_que = 0;
// if (0 != inserted_count) {
// // Update other arrays in L[v_id] if new labels were inserted in this iteration
// update_label_indices(
// v_id_local,
// inserted_count,
// // L,
// short_index,
// b_id,
// iter);
// }
// }
//// {//test
//// printf("host_id: %u gather: buffer_send.size(); %lu bytes: %lu\n", host_id, buffer_send.size(), MPI_Instance::get_sending_size(buffer_send));
//// }
// end_got_candidates_queue = 0; // Set the got_candidates_queue empty
// // Sync the dist_table
// for (int root = 0; root < num_hosts; ++root) {
// std::vector<std::pair<VertexID, VertexID>> buffer_recv;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send,
// buffer_recv);
// if (buffer_recv.empty()) {
// continue;
// }
// for (const std::pair<VertexID, VertexID> &e : buffer_recv) {
// VertexID root_id = e.first;
// VertexID cand_real_id = e.second;
// dist_table[root_id][cand_real_id] = iter;
// // Record the received element, for future reset
// recved_dist_table[root_id].push_back(cand_real_id);
// }
// }
//
// // Sync the global_num_actives
// MPI_Allreduce(&end_active_queue,
// &global_num_actives,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
// gather_time += WallTimer::get_time_mark();
// }
// }
//
// // Reset the dist_table
// clearup_time -= WallTimer::get_time_mark();
// reset_at_end(
//// G,
//// roots_start,
//// roots_master_local,
// dist_table,
// recved_dist_table,
// bp_labels_table);
// clearup_time += WallTimer::get_time_mark();
//}
//// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer.
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//template <typename E_T, typename F>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//every_host_bcasts_buffer_and_proc(
// std::vector<E_T> &buffer_send,
// F &fun)
//{
// // Every host h_i broadcast to others
// for (int root = 0; root < num_hosts; ++root) {
// std::vector<E_T> buffer_recv;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send,
// buffer_recv);
// if (buffer_recv.empty()) {
// continue;
// }
//// uint64_t size_buffer_send = buffer_send.size();
//// // Sync the size_buffer_send.
//// message_time -= WallTimer::get_time_mark();
//// MPI_Bcast(&size_buffer_send,
//// 1,
//// MPI_UINT64_T,
//// root,
//// MPI_COMM_WORLD);
//// message_time += WallTimer::get_time_mark();
////// {// test
////// printf("host_id: %u h_i: %u bcast_buffer_send.size(): %lu\n", host_id, h_i, size_buffer_send);
////// }
//// if (!size_buffer_send) {
//// continue;
//// }
//// message_time -= WallTimer::get_time_mark();
//// std::vector<E_T> buffer_recv(size_buffer_send);
//// if (host_id == root) {
//// buffer_recv.assign(buffer_send.begin(), buffer_send.end());
//// }
//// uint64_t bytes_buffer_send = size_buffer_send * ETypeSize;
//// if (bytes_buffer_send < static_cast<size_t>(INT_MAX)) {
//// // Only need 1 broadcast
////
//// MPI_Bcast(buffer_recv.data(),
//// bytes_buffer_send,
//// MPI_CHAR,
//// root,
//// MPI_COMM_WORLD);
//// } else {
//// const uint32_t num_unit_buffers = ((bytes_buffer_send - 1) / static_cast<size_t>(INT_MAX)) + 1;
//// const uint64_t unit_buffer_size = ((size_buffer_send - 1) / num_unit_buffers) + 1;
//// size_t offset = 0;
//// for (uint64_t b_i = 0; b_i < num_unit_buffers; ++b_i) {
////// size_t offset = b_i * unit_buffer_size;
//// size_t size_unit_buffer = b_i == num_unit_buffers - 1
//// ? size_buffer_send - offset
//// : unit_buffer_size;
//// MPI_Bcast(buffer_recv.data() + offset,
//// size_unit_buffer * ETypeSize,
//// MPI_CHAR,
//// root,
//// MPI_COMM_WORLD);
//// offset += unit_buffer_size;
//// }
//// }
//// message_time += WallTimer::get_time_mark();
// for (const E_T &e : buffer_recv) {
// fun(e);
// }
// }
//}
//// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer.
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//template <typename E_T, typename F>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//every_host_bcasts_buffer_and_proc(
// std::vector<E_T> &buffer_send,
// F &fun)
//{
// // Host processes locally.
// for (const E_T &e : buffer_send) {
// fun(e);
// }
//
// // Every host sends to others
// for (int src = 0; src < num_hosts; ++src) {
// if (host_id == src) {
// // Send from src
// message_time -= WallTimer::get_time_mark();
// for (int hop = 1; hop < num_hosts; ++hop) {
// int dst = hop_2_root_host_id(hop, host_id);
// MPI_Instance::send_buffer_2_dst(buffer_send,
// dst,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// }
// message_time += WallTimer::get_time_mark();
// } else {
// // Receive from src
// for (int hop = 1; hop < num_hosts; ++hop) {
// int dst = hop_2_root_host_id(hop, src);
// if (host_id == dst) {
// message_time -= WallTimer::get_time_mark();
// std::vector<E_T> buffer_recv;
// MPI_Instance::recv_buffer_from_src(buffer_recv,
// src,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// message_time += WallTimer::get_time_mark();
// // Process
// for (const E_T &e : buffer_recv) {
// fun(e);
// }
// }
// }
// }
// }
//}
//// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer.
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//template <typename E_T, typename F>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//every_host_bcasts_buffer_and_proc(
// std::vector<E_T> &buffer_send,
// F &fun)
//{
// // Host processes locally.
// for (const E_T &e : buffer_send) {
// fun(e);
// }
// // Every host sends (num_hosts - 1) times
// for (int hop = 1; hop < num_hosts; ++hop) {
// int src = hop_2_me_host_id(-hop);
// int dst = hop_2_me_host_id(hop);
// if (src != dst) { // Normal case
// // When host_id is odd, first receive, then send.
// if (static_cast<uint32_t>(host_id) & 1U) {
// message_time -= WallTimer::get_time_mark();
// // Receive first.
// std::vector<E_T> buffer_recv;
// MPI_Instance::recv_buffer_from_src(buffer_recv,
// src,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// {//test
// printf("host_id: %u recved_from: %u\n", host_id, src);
// }
// // Send then.
// MPI_Instance::send_buffer_2_dst(buffer_send,
// dst,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// {//test
// printf("host_id: %u send_to: %u\n", host_id, dst);
// }
// message_time += WallTimer::get_time_mark();
// // Process
// if (buffer_recv.empty()) {
// continue;
// }
// for (const E_T &e : buffer_recv) {
// fun(e);
// }
// } else { // When host_id is even, first send, then receive.
// // Send first.
// message_time -= WallTimer::get_time_mark();
// MPI_Instance::send_buffer_2_dst(buffer_send,
// dst,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// {//test
// printf("host_id: %u send_to: %u\n", host_id, dst);
// }
// // Receive then.
// std::vector<E_T> buffer_recv;
// MPI_Instance::recv_buffer_from_src(buffer_recv,
// src,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// {//test
// printf("host_id: %u recved_from: %u\n", host_id, src);
// }
// message_time += WallTimer::get_time_mark();
// // Process
// if (buffer_recv.empty()) {
// continue;
// }
// for (const E_T &e : buffer_recv) {
// fun(e);
// }
// }
// } else { // If host_id is higher than dst, first send, then receive
// // This is a special case. It only happens when the num_hosts is even and hop equals to num_hosts/2.
// if (host_id < dst) {
// // Send
// message_time -= WallTimer::get_time_mark();
// MPI_Instance::send_buffer_2_dst(buffer_send,
// dst,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// // Receive
// std::vector<E_T> buffer_recv;
// MPI_Instance::recv_buffer_from_src(buffer_recv,
// src,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// message_time += WallTimer::get_time_mark();
// // Process
// if (buffer_recv.empty()) {
// continue;
// }
// for (const E_T &e : buffer_recv) {
// fun(e);
// }
// } else { // Otherwise, if host_id is lower than dst, first receive, then send
// // Receive
// message_time -= WallTimer::get_time_mark();
// std::vector<E_T> buffer_recv;
// MPI_Instance::recv_buffer_from_src(buffer_recv,
// src,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// // Send
// MPI_Instance::send_buffer_2_dst(buffer_send,
// dst,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// message_time += WallTimer::get_time_mark();
// // Process
// if (buffer_recv.empty()) {
// continue;
// }
// for (const E_T &e : buffer_recv) {
// fun(e);
// }
// }
// }
// }
//}
//// DEPRECATED version Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer.
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//template <typename E_T, typename F>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//every_host_bcasts_buffer_and_proc(
// std::vector<E_T> &buffer_send,
// F &fun)
//{
// const uint32_t UNIT_BUFFER_SIZE = 16U << 20U;
// // Every host h_i broadcast to others
// for (int h_i = 0; h_i < num_hosts; ++h_i) {
// uint64_t size_buffer_send = buffer_send.size();
// // Sync the size_buffer_send.
// message_time -= WallTimer::get_time_mark();
// MPI_Bcast(&size_buffer_send,
// 1,
// MPI_UINT64_T,
// h_i,
// MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
//// {// test
//// printf("host_id: %u h_i: %u bcast_buffer_send.size(): %lu\n", host_id, h_i, size_buffer_send);
//// }
// if (!size_buffer_send) {
// continue;
// }
// uint32_t num_unit_buffers = (size_buffer_send + UNIT_BUFFER_SIZE - 1) / UNIT_BUFFER_SIZE;
//
// // Broadcast the buffer_send
// for (uint32_t b_i = 0; b_i < num_unit_buffers; ++b_i) {
// // Prepare the unit buffer
// message_time -= WallTimer::get_time_mark();
// size_t offset = b_i * UNIT_BUFFER_SIZE;
// size_t size_unit_buffer = b_i == num_unit_buffers - 1
// ? size_buffer_send - offset
// : UNIT_BUFFER_SIZE;
// std::vector<E_T> unit_buffer(size_unit_buffer);
// // Copy the messages from buffer_send to unit buffer.
// if (host_id == h_i) {
// unit_buffer.assign(buffer_send.begin() + offset, buffer_send.begin() + offset + size_unit_buffer);
// }
// // Broadcast the unit buffer
// MPI_Bcast(unit_buffer.data(),
// MPI_Instance::get_sending_size(unit_buffer),
// MPI_CHAR,
// h_i,
// MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
// // Process every element of unit_buffer
// for (const E_T &e : unit_buffer) {
// fun(e);
// }
// }
// }
//}
// Function: Host root broadcasts its sending buffer to a receiving buffer.
template <VertexID BATCH_SIZE>
template <typename E_T>
inline void DistBVCPLL<BATCH_SIZE>::
one_host_bcasts_buffer_to_buffer(
int root,
std::vector<E_T> &buffer_send,
std::vector<E_T> &buffer_recv)
{
const size_t ETypeSize = sizeof(E_T);
volatile uint64_t size_buffer_send = 0;
if (host_id == root) {
size_buffer_send = buffer_send.size();
}
// Sync the size_buffer_send.
// message_time -= WallTimer::get_time_mark();
// {//test
// if (0 == root && size_buffer_send == 16 && 1024 == caller_line) {
//// if (0 == root && size_buffer_send == 16 && 0 == host_id) {
// printf("before: host_id: %d size_buffer_send: %lu\n",
// host_id,
// size_buffer_send);
// }
// }
MPI_Bcast((void *) &size_buffer_send,
1,
MPI_UINT64_T,
root,
MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
// {//test
//// if (0 == root && size_buffer_send == 16 && 0 == host_id) {
// if (0 == root && size_buffer_send == 16 && 1024 == caller_line) {
// printf("after: host_id: %d size_buffer_send: %lu\n",
// host_id,
// size_buffer_send);
// }
// }
try {
buffer_recv.resize(size_buffer_send);
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("one_host_bcasts_buffer_to_buffer: bad_alloc "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
if (!size_buffer_send) {
return;
}
// Broadcast the buffer_send
// message_time -= WallTimer::get_time_mark();
if (host_id == root) {
// buffer_recv.assign(buffer_send.begin(), buffer_send.end());
buffer_recv.swap(buffer_send);
}
uint64_t bytes_buffer_send = size_buffer_send * ETypeSize;
if (bytes_buffer_send <= static_cast<size_t>(INT_MAX)) {
// Only need 1 broadcast
MPI_Bcast(buffer_recv.data(),
bytes_buffer_send,
MPI_CHAR,
root,
MPI_COMM_WORLD);
} else {
const uint32_t num_unit_buffers = ((bytes_buffer_send - 1) / static_cast<size_t>(INT_MAX)) + 1;
const uint64_t unit_buffer_size = ((size_buffer_send - 1) / num_unit_buffers) + 1;
size_t offset = 0;
for (uint64_t b_i = 0; b_i < num_unit_buffers; ++b_i) {
size_t size_unit_buffer = b_i == num_unit_buffers - 1
? size_buffer_send - offset
: unit_buffer_size;
MPI_Bcast(buffer_recv.data() + offset,
size_unit_buffer * ETypeSize,
MPI_CHAR,
root,
MPI_COMM_WORLD);
offset += unit_buffer_size;
}
}
// message_time += WallTimer::get_time_mark();
}
}
#endif //PADO_DPADO_H
|
deconvolution_pack1to8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void deconvolution_pack1to8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packed, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
const int maxk = kernel_w * kernel_h;
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m256 _sum = _mm256_setzero_ps();
if (bias_data_ptr)
{
_sum = _mm256_loadu_ps(bias_data_ptr + p * 8);
}
const float* kptr = weight_data_packed.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
const float* sptr = m.row(sy);
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
float val = sptr[sx];
int k = y * kernel_w + x;
__m256 _val = _mm256_set1_ps(val);
__m256 _w = _mm256_load_ps(kptr + k * 8);
_sum = _mm256_comp_fmadd_ps(_val, _w, _sum);
}
}
kptr += maxk * 8;
}
_sum = activation_avx(_sum, activation_type, activation_params);
_mm256_storeu_ps(outptr, _sum);
outptr += 8;
}
}
}
}
|
metricize_dgemm.c | #include "genmul.h"
#if defined __FAST_MATH__
void metricize_pure(double* d, double* d2, int n, int limit)
#else
void metricize(double* d, double* d2, int n, int limit)
#endif
{
int i, count = 1;
double error = 1.0;
while (error > 10e-12)
{
error = 0;
// shortest paths to d2
#if defined __FAST_MATH__
dgemm_pure(n, d, d2);
#else
dgemm_nn(n, d, d2);
#endif
#pragma omp parallel for private(i) shared(d, d2) reduction(max:error)
for (i=0;i<n*n; i++)
{
if (error<d[i]-d2[i])
error=d[i]-d2[i];
d[i] = d2[i];
}
if ((limit > 0) && (++count > limit)) break;
}
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 16;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
random.h | /*
* This file is part of Quantum++.
*
* MIT License
*
* Copyright (c) 2013 - 2019 Vlad Gheorghiu (vgheorgh@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/**
* \file random.h
* \brief Randomness-related functions
*/
#ifndef RANDOM_H_
#define RANDOM_H_
namespace qpp {
/**
* \brief Generates a random real number uniformly distributed in the interval
* [a, b)
*
* \param a Beginning of the interval, belongs to it
* \param b End of the interval, does not belong to it
* \return Random real number (double) uniformly distributed in the interval
* [a, b)
*/
inline double rand(double a, double b) {
// EXCEPTION CHECKS
if (a >= b)
throw exception::OutOfRange("qpp::rand()");
// END EXCEPTION CHECKS
std::uniform_real_distribution<> ud(a, b);
auto& gen =
#ifdef NO_THREAD_LOCAL_
RandomDevices::get_instance().get_prng();
#else
RandomDevices::get_thread_local_instance().get_prng();
#endif
return ud(gen);
}
/**
* \brief Generates a random big integer uniformly distributed in the interval
* [a, b]
*
* \note To avoid ambiguity with double qpp::rand(double, double) cast at least
* one of the arguments to qpp::bigint
*
* \param a Beginning of the interval, belongs to it
* \param b End of the interval, belongs to it
* \return Random big integer uniformly distributed in the interval [a, b]
*/
inline bigint rand(bigint a, bigint b) {
// EXCEPTION CHECKS
if (a > b)
throw exception::OutOfRange("qpp::rand()");
// END EXCEPTION CHECKS
std::uniform_int_distribution<bigint> uid(a, b);
auto& gen =
#ifdef NO_THREAD_LOCAL_
RandomDevices::get_instance().get_prng();
#else
RandomDevices::get_thread_local_instance().get_prng();
#endif
return uid(gen);
}
/**
* \brief Generates a random index (idx) uniformly distributed in the interval
* [a, b]
*
* \param a Beginning of the interval, belongs to it
* \param b End of the interval, belongs to it
* \return Random index (idx) uniformly distributed in the interval [a, b]
*/
inline idx randidx(idx a = std::numeric_limits<idx>::min(),
idx b = std::numeric_limits<idx>::max()) {
// EXCEPTION CHECKS
if (a > b)
throw exception::OutOfRange("qpp::randidx()");
// END EXCEPTION CHECKS
std::uniform_int_distribution<idx> uid(a, b);
auto& gen =
#ifdef NO_THREAD_LOCAL_
RandomDevices::get_instance().get_prng();
#else
RandomDevices::get_thread_local_instance().get_prng();
#endif
return uid(gen);
}
/**
* \brief Generates a random matrix with entries uniformly distributed in the
* interval [a, b)
*
* If complex, then both real and imaginary parts are uniformly distributed in
* [a, b)
*
* This is the generic version that always throws
* qpp::Exception::Type::UNDEFINED_TYPE. It is specialized only for qpp::dmat
* and qpp::cmat
*/
template <typename Derived>
Derived rand(idx rows QPP_UNUSED_, idx cols QPP_UNUSED_,
double a QPP_UNUSED_ = 0, double b QPP_UNUSED_ = 1) {
throw exception::UndefinedType("qpp::rand()");
}
/**
* \brief Generates a random real matrix with entries uniformly distributed in
* the interval [a, b), specialization for double matrices (qpp::dmat)
*
* The template parameter cannot be automatically deduced and must be explicitly
* provided
*
* Example:
* \code
* // generates a 3 x 3 random Eigen::MatrixXd,
* // with entries uniformly distributed in [-1,1)
* dmat mat = rand<dmat>(3, 3, -1, 1);
* \endcode
*
* \param rows Number of rows of the random generated matrix
* \param cols Number of columns of the random generated matrix
* \param a Beginning of the interval, belongs to it
* \param b End of the interval, does not belong to it
* \return Random real matrix
*/
template <>
inline dmat rand(idx rows, idx cols, double a, double b) {
// EXCEPTION CHECKS
if (rows == 0 || cols == 0)
throw exception::ZeroSize("qpp::rand()");
if (a >= b)
throw exception::OutOfRange("qpp::rand()");
// END EXCEPTION CHECKS
return dmat::Zero(rows, cols).unaryExpr([a, b](double) {
return rand(a, b);
});
}
/**
* \brief Generates a random complex matrix with entries (both real and
* imaginary) uniformly distributed in the interval [a, b), specialization for
* complex matrices (qpp::cmat)
*
* The template parameter cannot be automatically deduced and must be explicitly
* provided
*
* Example:
* \code
* // generates a 3 x 3 random Eigen::MatrixXcd,
* // with entries (both real and imaginary) uniformly distributed in [-1,1)
* cmat mat = rand<cmat>(3, 3, -1, 1);
* \endcode
*
* \param rows Number of rows of the random generated matrix
* \param cols Number of columns of the random generated matrix
* \param a Beginning of the interval, belongs to it
* \param b End of the interval, does not belong to it
* \return Random complex matrix
*/
template <>
inline cmat rand(idx rows, idx cols, double a, double b) {
// EXCEPTION CHECKS
if (rows == 0 || cols == 0)
throw exception::ZeroSize("qpp::rand()");
if (a >= b)
throw exception::OutOfRange("qpp::rand()");
// END EXCEPTION CHECKS
return rand<dmat>(rows, cols, a, b).cast<cplx>() +
1_i * rand<dmat>(rows, cols, a, b).cast<cplx>();
}
/**
* \brief Generates a random matrix with entries normally distributed in
* N(mean, sigma)
*
* If complex, then both real and imaginary parts are normally distributed in
* N(mean, sigma)
*
* This is the generic version that always throws
* qpp::Exception::Type::UNDEFINED_TYPE. It is specialized only for qpp::dmat
* and qpp::cmat
*/
template <typename Derived>
Derived randn(idx rows QPP_UNUSED_, idx cols QPP_UNUSED_,
double mean QPP_UNUSED_ = 0, double sigma QPP_UNUSED_ = 1) {
throw exception::UndefinedType("qpp::randn()");
}
/**
* \brief Generates a random real matrix with entries normally distributed in
* N(mean, sigma), specialization for double matrices (qpp::dmat)
*
* The template parameter cannot be automatically deduced and must be explicitly
* provided
*
* Example:
* \code
* // generates a 3 x 3 random Eigen::MatrixXd,
* // with entries normally distributed in N(0,2)
* dmat mat = randn<dmat>(3, 3, 0, 2);
* \endcode
*
* \param rows Number of rows of the random generated matrix
* \param cols Number of columns of the random generated matrix
* \param mean Mean
* \param sigma Standard deviation
* \return Random real matrix
*/
template <>
inline dmat randn(idx rows, idx cols, double mean, double sigma) {
// EXCEPTION CHECKS
if (rows == 0 || cols == 0)
throw exception::ZeroSize("qpp::randn()");
// END EXCEPTION CHECKS
std::normal_distribution<> nd(mean, sigma);
auto& gen =
#ifdef NO_THREAD_LOCAL_
RandomDevices::get_instance().get_prng();
#else
RandomDevices::get_thread_local_instance().get_prng();
#endif
return dmat::Zero(rows, cols).unaryExpr([&nd, &gen](double) {
return nd(gen);
});
}
/**
* \brief Generates a random complex matrix with entries (both real and
* imaginary) normally distributed in N(mean, sigma), specialization for complex
* matrices (qpp::cmat)
*
* The template parameter cannot be automatically deduced and must be explicitly
* provided
*
* Example:
* \code
* // generates a 3 x 3 random Eigen::MatrixXcd,
* // with entries (both real and imaginary) normally distributed in N(0,2)
* cmat mat = randn<cmat>(3, 3, 0, 2);
* \endcode
*
* \param rows Number of rows of the random generated matrix
* \param cols Number of columns of the random generated matrix
* \param mean Mean
* \param sigma Standard deviation
* \return Random complex matrix
*/
template <>
inline cmat randn(idx rows, idx cols, double mean, double sigma) {
// EXCEPTION CHECKS
if (rows == 0 || cols == 0)
throw exception::ZeroSize("qpp::randn()");
// END EXCEPTION CHECKS
return randn<dmat>(rows, cols, mean, sigma).cast<cplx>() +
1_i * randn<dmat>(rows, cols, mean, sigma).cast<cplx>();
}
/**
* \brief Generates a random real number (double) normally distributed in
* N(mean, sigma)
*
* \param mean Mean
* \param sigma Standard deviation
* \return Random real number normally distributed in N(mean, sigma)
*/
inline double randn(double mean = 0, double sigma = 1) {
std::normal_distribution<> nd(mean, sigma);
auto& gen =
#ifdef NO_THREAD_LOCAL_
RandomDevices::get_instance().get_prng();
#else
RandomDevices::get_thread_local_instance().get_prng();
#endif
return nd(gen);
}
/**
* \brief Generates a random unitary matrix
*
* \param D Dimension of the Hilbert space
* \return Random unitary
*/
inline cmat randU(idx D = 2)
// ~3 times slower than Toby Cubitt's MATLAB corresponding routine,
// because Eigen 3 QR algorithm is not parallelized
{
// EXCEPTION CHECKS
if (D == 0)
throw exception::DimsInvalid("qpp::randU()");
// END EXCEPTION CHECKS
cmat X = 1 / std::sqrt(2.) * randn<cmat>(D, D);
Eigen::HouseholderQR<cmat> qr(X);
cmat Q = qr.householderQ();
// phase correction so that the resultant matrix is
// uniformly distributed according to the Haar measure
Eigen::VectorXcd phases = (rand<dmat>(D, 1)).cast<cplx>();
for (idx i = 0; i < static_cast<idx>(phases.rows()); ++i)
phases(i) = std::exp(2 * pi * 1_i * phases(i));
Q = Q * phases.asDiagonal();
return Q;
}
/**
* \brief Generates a random isometry matrix
*
* \param Din Size of the input Hilbert space
* \param Dout Size of the output Hilbert space
* \return Random isometry matrix
*/
inline cmat randV(idx Din, idx Dout) {
// EXCEPTION CHECKS
if (Din == 0 || Dout == 0 || Din > Dout)
throw exception::DimsInvalid("qpp::randV()");
// END EXCEPTION CHECKS
return randU(Dout).block(0, 0, Dout, Din);
}
/**
* \brief Generates a set of random Kraus operators
*
* \note The set of Kraus operators satisfy the closure condition
* \f$ \sum_i K_i^\dagger K_i = I\f$
*
* \param N Number of Kraus operators
* \param D Dimension of the Hilbert space
* \return Set of \a N Kraus operators satisfying the closure condition
*/
inline std::vector<cmat> randkraus(idx N, idx D = 2) {
// EXCEPTION CHECKS
if (N == 0)
throw exception::OutOfRange("qpp::randkraus()");
if (D == 0)
throw exception::DimsInvalid("qpp::randkraus()");
// END EXCEPTION CHECKS
std::vector<cmat> result(N);
for (idx i = 0; i < N; ++i)
result[i] = cmat::Zero(D, D);
cmat Fk(D, D);
cmat U = randU(N * D);
#ifdef WITH_OPENMP_
#pragma omp parallel for collapse(3)
#endif // WITH_OPENMP_
for (idx k = 0; k < N; ++k)
for (idx a = 0; a < D; ++a)
for (idx b = 0; b < D; ++b)
result[k](a, b) = U(a * N + k, b * N);
return result;
}
/**
* \brief Generates a random Hermitian matrix
*
* \param D Dimension of the Hilbert space
* \return Random Hermitian matrix
*/
inline cmat randH(idx D = 2) {
// EXCEPTION CHECKS
if (D == 0)
throw exception::DimsInvalid("qpp::randH()");
// END EXCEPTION CHECKS
cmat H = 2 * rand<cmat>(D, D) - (1. + 1_i) * cmat::Ones(D, D);
return H + H.adjoint();
}
/**
* \brief Generates a random normalized ket (pure state vector)
*
* \param D Dimension of the Hilbert space
* \return Random normalized ket
*/
inline ket randket(idx D = 2) {
// EXCEPTION CHECKS
if (D == 0)
throw exception::DimsInvalid("qpp::randket()");
// END EXCEPTION CHECKS
/* slow
ket kt = ket::Ones(D);
ket result = static_cast<ket>(randU(D) * kt);
return result;
*/
ket kt = randn<cmat>(D, 1);
return kt / kt.norm();
}
/**
* \brief Generates a random density matrix
*
* \param D Dimension of the Hilbert space
* \return Random density matrix
*/
inline cmat randrho(idx D = 2) {
// EXCEPTION CHECKS
if (D == 0)
throw exception::DimsInvalid("qpp::randrho()");
// END EXCEPTION CHECKS
cmat result = 10 * randH(D);
result = result * result.adjoint();
return result / result.trace();
}
/**
* \brief Generates a random uniformly distributed permutation
*
* Uses Knuth shuffle method (as implemented by std::shuffle), so that all
* permutations are equally probable
*
* \param N Size of the permutation
* \return Random permutation of size \a N
*/
inline std::vector<idx> randperm(idx N) {
// EXCEPTION CHECKS
if (N == 0)
throw exception::PermInvalid("qpp::randperm()");
// END EXCEPTION CHECKS
std::vector<idx> result(N);
// fill in increasing order
std::iota(std::begin(result), std::end(result), 0);
// shuffle
auto& gen =
#ifdef NO_THREAD_LOCAL_
RandomDevices::get_instance().get_prng();
#else
RandomDevices::get_thread_local_instance().get_prng();
#endif
std::shuffle(std::begin(result), std::end(result), gen);
return result;
}
/**
* \brief Generates a random probability vector uniformly distributed over the
* probability simplex
*
* \param N Size of the probability vector
* \return Random probability vector
*/
inline std::vector<double> randprob(idx N) {
// EXCEPTION CHECKS
if (N == 0)
throw exception::OutOfRange("qpp::randprob()");
// END EXCEPTION CHECKS
std::vector<double> result(N);
// generate
std::exponential_distribution<> ed(1);
auto& gen =
#ifdef NO_THREAD_LOCAL_
RandomDevices::get_instance().get_prng();
#else
RandomDevices::get_thread_local_instance().get_prng();
#endif
for (idx i = 0; i < N; ++i)
result[i] = ed(gen);
// normalize
double sumprob = std::accumulate(std::begin(result), std::end(result), 0.0);
for (idx i = 0; i < N; ++i)
result[i] /= sumprob;
return result;
}
} /* namespace qpp */
#endif /* RANDOM_H_ */
|
multisort-omp-task-rama-cutoff-opt2.c | #include <malloc.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "omp.h"
#include <sys/time.h>
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6f\n",(_m), stamp);
// N and MIN must be powers of 2
long N;
long MIN_SORT_SIZE;
long MIN_MERGE_SIZE;
int CUTOFF;
#define BLOCK_SIZE 1024L
#define T int
void basicsort(long n, T data[n]);
void basicmerge(long n, T left[n], T right[n], T result[n*2], long start, long length);
void merge(long n, T left[n], T right[n], T result[n*2], long start, long length, int depth) {
if (length < MIN_MERGE_SIZE*2L) {
// Base case
basicmerge(n, left, right, result, start, length);
} else {
// Recursive decomposition
if(!omp_in_final()){
#pragma omp task final (depth >= CUTOFF)
merge(n, left, right, result, start, length/2, depth+1 );
#pragma omp task final (depth >= CUTOFF)
merge(n, left, right, result, start + length/2, length/2, depth+1);
#pragma omp taskwait
}else{
merge(n, left, right, result, start, length/2, depth+1);
merge(n, left, right, result, start + length/2, length/2, depth+1);
}
}
}
void multisort(long n, T data[n], T tmp[n], int depth) {
if (n >= MIN_SORT_SIZE*4L) {
// Recursive decomposition
if(!omp_in_final()){
#pragma omp task final (depth >= CUTOFF)
multisort(n/4L, &data[0], &tmp[0], depth+1);
#pragma omp task final (depth >= CUTOFF)
multisort(n/4L, &data[n/4L], &tmp[n/4L], depth+1);
#pragma omp task final (depth >= CUTOFF)
multisort(n/4L, &data[n/2L], &tmp[n/2L], depth+1);
#pragma omp task final (depth >= CUTOFF)
multisort(n/4L, &data[3L*n/4L], &tmp[3L*n/4L], depth+1);
#pragma omp taskwait
#pragma omp task final (depth >= CUTOFF)
merge(n/4L, &data[0], &data[n/4L], &tmp[0], 0, n/2L,0);
#pragma omp task final (depth >= CUTOFF)
merge(n/4L, &data[n/2L], &data[3L*n/4L], &tmp[n/2L], 0, n/2L,0);
#pragma omp taskwait
#pragma omp task final (depth >= CUTOFF)
merge(n/2L, &tmp[0], &tmp[n/2L], &data[0], 0, n,0);
#pragma omp taskwait
}else{
multisort(n/4L, &data[0], &tmp[0], depth+1);
multisort(n/4L, &data[n/4L], &tmp[n/4L], depth+1);
multisort(n/4L, &data[n/2L], &tmp[n/2L], depth+1);
multisort(n/4L, &data[3L*n/4L], &tmp[3L*n/4L], depth+1);
merge(n/4L, &data[0], &data[n/4L], &tmp[0], 0, n/2L,0);
merge(n/4L, &data[n/2L], &data[3L*n/4L], &tmp[n/2L], 0, n/2L,0);
merge(n/2L, &tmp[0], &tmp[n/2L], &data[0], 0, n,0);
}
} else {
// Base case
basicsort(n, data);
}
}
static void initialize(long length, T data[length]) {
#pragma omp for ordered(1)
for (long i = 0; i < length; i++) {
if (i==0) {
data[i] = rand();
} else {
data[i] = ((data[i-1]+1) * i * 104723L) % N;
}
}
}
static void clear(long length, T data[length]) {
#pragma omp for schedule (guided , 4)
for (long i = 0; i < length; i++) {
data[i] = 0;
}
}
void check_sorted(long n, T data[n])
{
int unsorted=0;
for (int i=1; i<n; i++)
if (data[i-1] > data[i]) unsorted++;
if (unsorted > 0)
printf ("\nERROR: data is NOT properly sorted. There are %d unordered positions\n\n",unsorted);
else {
// printf ("data IS ordered; ");
}
}
int main(int argc, char **argv) {
/* Defaults for command line arguments */
N = 32768 * BLOCK_SIZE;
MIN_SORT_SIZE = 32 * BLOCK_SIZE;
MIN_MERGE_SIZE = 32 * BLOCK_SIZE;;
CUTOFF = 4;
/* Process command-line arguments */
for (int i=1; i<argc; i++) {
if (strcmp(argv[i], "-n")==0) {
N = atol(argv[++i]) * BLOCK_SIZE;
}
else if (strcmp(argv[i], "-s")==0) {
MIN_SORT_SIZE = atol(argv[++i]) * BLOCK_SIZE;
}
else if (strcmp(argv[i], "-m")==0) {
MIN_MERGE_SIZE = atol(argv[++i]) * BLOCK_SIZE;
}
else if (strcmp(argv[i], "-c")==0) {
CUTOFF = atoi(argv[++i]);
}
else {
fprintf(stderr, "Usage: %s [-n vector_size -s MIN_SORT_SIZE -m MIN_MERGE_SIZE]\n", argv[0]);
fprintf(stderr, " -n to specify the size of the vector (in Kelements) to sort (default 32768)\n");
fprintf(stderr, " -s to specify the size of the vector (in Kelements) that breaks recursion in the sort phase (default 32)\n");
fprintf(stderr, " -m to specify the size of the vector (in Kelements) that breaks recursion in the merge phase (default 32)\n");
fprintf(stderr, " -c to specify the cut off recursion level to stop task generation in OpenMP (default 4)\n");
return EXIT_FAILURE;
}
}
fprintf(stdout, "Arguments (Kelements): N=%ld, MIN_SORT_SIZE=%ld, MIN_MERGE_SIZE=%ld\n", N/BLOCK_SIZE, MIN_SORT_SIZE/BLOCK_SIZE, MIN_MERGE_SIZE/BLOCK_SIZE);
fprintf(stdout, " CUTOFF=%d\n", CUTOFF);
T *data = malloc(N*sizeof(T));
T *tmp = malloc(N*sizeof(T));
double stamp;
START_COUNT_TIME;
initialize(N, data);
clear(N, tmp);
STOP_COUNT_TIME("Initialization time in seconds");
START_COUNT_TIME;
#pragma omp parallel
#pragma omp single
multisort(N, data, tmp,0);
STOP_COUNT_TIME("Multisort execution time");
START_COUNT_TIME;
check_sorted (N, data);
STOP_COUNT_TIME("Check sorted data execution time");
fprintf(stdout, "Multisort program finished\n");
return 0;
}
|
3618.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include "polybench.h"
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j,k;
for (k=0; k<1000; k++) {
#pragma omp parallel for simd schedule(static, 8) num_threads(28) private(j)
for (i = 1; i < _PB_NI - 1; ++i)
{
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
}
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
GB_unaryop__ainv_uint32_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint32_uint64
// op(A') function: GB_tran__ainv_uint32_uint64
// C type: uint32_t
// A type: uint64_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint32_uint64
(
uint32_t *Cx, // Cx and Ax may be aliased
uint64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint32_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
PatchSelect_core.c | /*
* This work is part of the Core Imaging Library developed by
* Visual Analytics and Imaging System Group of the Science Technology
* Facilities Council, STFC and Diamond Light Source Ltd.
*
* Copyright 2017 Daniil Kazantsev
* Copyright 2017 Srikanth Nagella, Edoardo Pasca
* Copyright 2018 Diamond Light Source Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "PatchSelect_core.h"
/* C-OMP implementation of non-local weight pre-calculation for non-local priors
* Weights and associated indices are stored into pre-allocated arrays and passed
* to the regulariser
*
*
* Input Parameters:
* 1. 2D/3D grayscale image/volume
* 2. Searching window (half-size of the main bigger searching window, e.g. 11)
* 3. Similarity window (half-size of the patch window, e.g. 2)
* 4. The number of neighbours to take (the most prominent after sorting neighbours will be taken)
* 5. noise-related parameter to calculate non-local weights
*
* Output [2D]:
* 1. AR_i - indeces of i neighbours
* 2. AR_j - indeces of j neighbours
* 3. Weights_ij - associated weights
*
* Output [3D]:
* 1. AR_i - indeces of i neighbours
* 2. AR_j - indeces of j neighbours
* 3. AR_k - indeces of j neighbours
* 4. Weights_ijk - associated weights
*/
void swap(float *xp, float *yp)
{
float temp = *xp;
*xp = *yp;
*yp = temp;
}
void swapUS(unsigned short *xp, unsigned short *yp)
{
unsigned short temp = *xp;
*xp = *yp;
*yp = temp;
}
/**************************************************/
float PatchSelect_CPU_main(float *A, unsigned short *H_i, unsigned short *H_j, unsigned short *H_k, float *Weights, int dimX, int dimY, int dimZ, int SearchWindow, int SimilarWin, int NumNeighb, float h)
{
int counterG;
long i, j, k;
float *Eucl_Vec, h2;
h2 = h*h;
/****************2D INPUT ***************/
if (dimZ == 0) {
/* generate a 2D Gaussian kernel for NLM procedure */
Eucl_Vec = (float*) calloc ((2*SimilarWin+1)*(2*SimilarWin+1),sizeof(float));
counterG = 0;
for(i=-SimilarWin; i<=SimilarWin; i++) {
for(j=-SimilarWin; j<=SimilarWin; j++) {
Eucl_Vec[counterG] = (float)exp(-(pow(((float) i), 2) + pow(((float) j), 2))/(2*SimilarWin*SimilarWin));
counterG++;
}} /*main neighb loop */
/* for each pixel store indeces of the most similar neighbours (patches) */
#pragma omp parallel for shared (A, Weights, H_i, H_j) private(i,j)
for(j=0; j<(long)(dimY); j++) {
for(i=0; i<(long)(dimX); i++) {
Indeces2D(A, H_i, H_j, Weights, i, j, (long)(dimX), (long)(dimY), Eucl_Vec, NumNeighb, SearchWindow, SimilarWin, h2);
}}
}
else {
/****************3D INPUT ***************/
/* generate a 3D Gaussian kernel for NLM procedure */
Eucl_Vec = (float*) calloc ((2*SimilarWin+1)*(2*SimilarWin+1)*(2*SimilarWin+1),sizeof(float));
counterG = 0;
for(i=-SimilarWin; i<=SimilarWin; i++) {
for(j=-SimilarWin; j<=SimilarWin; j++) {
for(k=-SimilarWin; k<=SimilarWin; k++) {
Eucl_Vec[counterG] = (float)exp(-(pow(((float) i), 2) + pow(((float) j), 2) + pow(((float) k), 2))/(2*SimilarWin*SimilarWin*SimilarWin));
counterG++;
}}} /*main neighb loop */
/* for each voxel store indeces of the most similar neighbours (patches) */
#pragma omp parallel for shared (A, Weights, H_i, H_j, H_k) private(i,j,k)
for(k=0; k<dimZ; k++) {
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
Indeces3D(A, H_i, H_j, H_k, Weights, i, j, k, (long)(dimX), (long)(dimY), (long)(dimZ), Eucl_Vec, NumNeighb, SearchWindow, SimilarWin, h2);
}}}
}
free(Eucl_Vec);
return 1;
}
float Indeces2D(float *Aorig, unsigned short *H_i, unsigned short *H_j, float *Weights, long i, long j, long dimX, long dimY, float *Eucl_Vec, int NumNeighb, int SearchWindow, int SimilarWin, float h2)
{
long i1, j1, i_m, j_m, i_c, j_c, i2, j2, i3, j3, counter, x, y, index, sizeWin_tot, counterG;
float *Weight_Vec, normsum;
unsigned short *ind_i, *ind_j;
sizeWin_tot = (2*SearchWindow + 1)*(2*SearchWindow + 1);
Weight_Vec = (float*) calloc(sizeWin_tot, sizeof(float));
ind_i = (unsigned short*) calloc(sizeWin_tot, sizeof(unsigned short));
ind_j = (unsigned short*) calloc(sizeWin_tot, sizeof(unsigned short));
counter = 0;
for(i_m=-SearchWindow; i_m<=SearchWindow; i_m++) {
for(j_m=-SearchWindow; j_m<=SearchWindow; j_m++) {
i1 = i+i_m;
j1 = j+j_m;
if (((i1 >= 0) && (i1 < dimX)) && ((j1 >= 0) && (j1 < dimY))) {
normsum = 0.0f; counterG = 0;
for(i_c=-SimilarWin; i_c<=SimilarWin; i_c++) {
for(j_c=-SimilarWin; j_c<=SimilarWin; j_c++) {
i2 = i1 + i_c;
j2 = j1 + j_c;
i3 = i + i_c;
j3 = j + j_c;
if (((i2 >= 0) && (i2 < dimX)) && ((j2 >= 0) && (j2 < dimY))) {
if (((i3 >= 0) && (i3 < dimX)) && ((j3 >= 0) && (j3 < dimY))) {
normsum += Eucl_Vec[counterG]*powf(Aorig[j3*dimX + (i3)] - Aorig[j2*dimX + (i2)], 2);
counterG++;
}}
}}
/* writing temporarily into vectors */
if (normsum > EPS) {
Weight_Vec[counter] = expf(-normsum/h2);
ind_i[counter] = i1;
ind_j[counter] = j1;
counter++;
}
}
}}
/* do sorting to choose the most prominent weights [HIGH to LOW] */
/* and re-arrange indeces accordingly */
for (x = 0; x < counter-1; x++) {
for (y = 0; y < counter-x-1; y++) {
if (Weight_Vec[y] < Weight_Vec[y+1]) {
swap(&Weight_Vec[y], &Weight_Vec[y+1]);
swapUS(&ind_i[y], &ind_i[y+1]);
swapUS(&ind_j[y], &ind_j[y+1]);
}
}
}
/*sorting loop finished*/
/*now select the NumNeighb more prominent weights and store into pre-allocated arrays */
for(x=0; x < NumNeighb; x++) {
index = (dimX*dimY*x) + j*dimX+i;
H_i[index] = ind_i[x];
H_j[index] = ind_j[x];
Weights[index] = Weight_Vec[x];
}
free(ind_i);
free(ind_j);
free(Weight_Vec);
return 1;
}
float Indeces3D(float *Aorig, unsigned short *H_i, unsigned short *H_j, unsigned short *H_k, float *Weights, long i, long j, long k, long dimY, long dimX, long dimZ, float *Eucl_Vec, int NumNeighb, int SearchWindow, int SimilarWin, float h2)
{
long i1, j1, k1, i_m, j_m, k_m, i_c, j_c, k_c, i2, j2, k2, i3, j3, k3, counter, x, y, index, sizeWin_tot, counterG;
float *Weight_Vec, normsum, temp;
unsigned short *ind_i, *ind_j, *ind_k, temp_i, temp_j, temp_k;
sizeWin_tot = (2*SearchWindow + 1)*(2*SearchWindow + 1)*(2*SearchWindow + 1);
Weight_Vec = (float*) calloc(sizeWin_tot, sizeof(float));
ind_i = (unsigned short*) calloc(sizeWin_tot, sizeof(unsigned short));
ind_j = (unsigned short*) calloc(sizeWin_tot, sizeof(unsigned short));
ind_k = (unsigned short*) calloc(sizeWin_tot, sizeof(unsigned short));
counter = 0l;
for(i_m=-SearchWindow; i_m<=SearchWindow; i_m++) {
for(j_m=-SearchWindow; j_m<=SearchWindow; j_m++) {
for(k_m=-SearchWindow; k_m<=SearchWindow; k_m++) {
k1 = k+k_m;
i1 = i+i_m;
j1 = j+j_m;
if (((i1 >= 0) && (i1 < dimX)) && ((j1 >= 0) && (j1 < dimY)) && ((k1 >= 0) && (k1 < dimZ))) {
normsum = 0.0f; counterG = 0l;
for(i_c=-SimilarWin; i_c<=SimilarWin; i_c++) {
for(j_c=-SimilarWin; j_c<=SimilarWin; j_c++) {
for(k_c=-SimilarWin; k_c<=SimilarWin; k_c++) {
i2 = i1 + i_c;
j2 = j1 + j_c;
k2 = k1 + k_c;
i3 = i + i_c;
j3 = j + j_c;
k3 = k + k_c;
if (((i2 >= 0) && (i2 < dimX)) && ((j2 >= 0) && (j2 < dimY)) && ((k2 >= 0) && (k2 < dimZ))) {
if (((i3 >= 0) && (i3 < dimX)) && ((j3 >= 0) && (j3 < dimY)) && ((k3 >= 0) && (k3 < dimZ))) {
normsum += Eucl_Vec[counterG]*pow(Aorig[(dimX*dimY*k3) + j3*dimX + (i3)] - Aorig[(dimX*dimY*k2) + j2*dimX + (i2)], 2);
counterG++;
}}
}}}
/* writing temporarily into vectors */
if (normsum > EPS) {
Weight_Vec[counter] = expf(-normsum/h2);
ind_i[counter] = i1;
ind_j[counter] = j1;
ind_k[counter] = k1;
counter ++;
}
}
}}}
/* do sorting to choose the most prominent weights [HIGH to LOW] */
/* and re-arrange indeces accordingly */
for (x = 0; x < counter; x++) {
for (y = 0; y < counter; y++) {
if (Weight_Vec[y] < Weight_Vec[x]) {
temp = Weight_Vec[y+1];
temp_i = ind_i[y+1];
temp_j = ind_j[y+1];
temp_k = ind_k[y+1];
Weight_Vec[y+1] = Weight_Vec[y];
Weight_Vec[y] = temp;
ind_i[y+1] = ind_i[y];
ind_i[y] = temp_i;
ind_j[y+1] = ind_j[y];
ind_j[y] = temp_j;
ind_k[y+1] = ind_k[y];
ind_k[y] = temp_k;
}}}
/*sorting loop finished*/
/*now select the NumNeighb more prominent weights and store into arrays */
for(x=0; x < NumNeighb; x++) {
index = dimX*dimY*dimZ*x + (dimX*dimY*k) + j*dimX+i;
H_i[index] = ind_i[x];
H_j[index] = ind_j[x];
H_k[index] = ind_k[x];
Weights[index] = Weight_Vec[x];
}
free(ind_i);
free(ind_j);
free(ind_k);
free(Weight_Vec);
return 1;
}
|
GB_binop__rdiv_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_int16)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_int16)
// A.*B function (eWiseMult): GB (_AemultB_03__rdiv_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_int16)
// A*D function (colscale): GB (_AxD__rdiv_int16)
// D*A function (rowscale): GB (_DxB__rdiv_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_int16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_int16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_int16)
// C=scalar+B GB (_bind1st__rdiv_int16)
// C=scalar+B' GB (_bind1st_tran__rdiv_int16)
// C=A+scalar GB (_bind2nd__rdiv_int16)
// C=A'+scalar GB (_bind2nd_tran__rdiv_int16)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 16)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IDIV_SIGNED (y, x, 16) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_INT16 || GxB_NO_RDIV_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_int16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_int16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__rdiv_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__rdiv_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = Bx [p] ;
Cx [p] = GB_IDIV_SIGNED (bij, x, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = Ax [p] ;
Cx [p] = GB_IDIV_SIGNED (y, aij, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_SIGNED (aij, x, 16) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_SIGNED (y, aij, 16) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
deconvolution_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void deconv3x3s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*9 + q*9;
const float* r0 = img0;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
#if __ARM_NEON
float32x4_t _k0 = vld1q_f32(k0);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _k2 = vld1q_f32(k2);
#endif // __ARM_NEON
for (int i = 0; i < h; i++)
{
float* outptr = out.data + out.w * i;
float* outptr0 = outptr;
float* outptr1 = outptr + outw;
float* outptr2 = outptr + outw*2;
int j = 0;
#if __ARM_NEON
for (; j+3 < w; j+=4)
{
float32x4_t _v = vld1q_f32(r0);
#if 0 // bad compiler generate slow instructions :(
// 0
float32x4_t _out00 = vld1q_f32(outptr0 + 0);
_out00 = vmlaq_lane_f32(_out00, _v, vget_low_f32(_k0), 0);
float32x4_t _out01 = vmulq_lane_f32(_v, vget_low_f32(_k0), 1);
// ext
float32x4_t _zero_out01 = vdupq_n_f32(0.f);
_zero_out01 = vextq_f32(_zero_out01, _out01, 3);
_out00 = vaddq_f32(_out00, _zero_out01);
//
float32x2_t _out00low = vget_low_f32(_out00);
float32x2_t _out00high = vget_high_f32(_out00);
_out00high = vmla_lane_f32(_out00high, vget_low_f32(_v), vget_high_f32(_k0), 0);
_out00 = vcombine_f32(_out00low, _out00high);
vst1q_f32(outptr0 + 0, _out00);
//
float32x2_t _out02high = vld1_f32(outptr0 + 4);
float32x2_t _out01_zero = vext_f32(vget_high_f32(_out01), vget_low_f32(_zero_out01), 1);
_out02high = vadd_f32(_out02high, _out01_zero);
_out02high = vmla_lane_f32(_out02high, vget_high_f32(_v), vget_high_f32(_k0), 0);
vst1_f32(outptr0 + 4, _out02high);
// 1
float32x4_t _out10 = vld1q_f32(outptr1 + 0);
_out10 = vmlaq_lane_f32(_out10, _v, vget_low_f32(_k1), 0);
float32x4_t _out11 = vmulq_lane_f32(_v, vget_low_f32(_k1), 1);
// ext
float32x4_t _zero_out11 = vdupq_n_f32(0.f);
_zero_out11 = vextq_f32(_zero_out11, _out11, 3);
_out10 = vaddq_f32(_out10, _zero_out11);
//
float32x2_t _out10low = vget_low_f32(_out10);
float32x2_t _out10high = vget_high_f32(_out10);
_out10high = vmla_lane_f32(_out10high, vget_low_f32(_v), vget_high_f32(_k1), 0);
_out10 = vcombine_f32(_out10low, _out10high);
vst1q_f32(outptr1 + 0, _out10);
//
float32x2_t _out12high = vld1_f32(outptr1 + 4);
float32x2_t _out11_zero = vext_f32(vget_high_f32(_out11), vget_low_f32(_zero_out11), 1);
_out12high = vadd_f32(_out12high, _out11_zero);
_out12high = vmla_lane_f32(_out12high, vget_high_f32(_v), vget_high_f32(_k1), 0);
vst1_f32(outptr1 + 4, _out12high);
// 2
float32x4_t _out20 = vld1q_f32(outptr2 + 0);
_out20 = vmlaq_lane_f32(_out20, _v, vget_low_f32(_k2), 0);
float32x4_t _out21 = vmulq_lane_f32(_v, vget_low_f32(_k2), 1);
// ext
float32x4_t _zero_out21 = vdupq_n_f32(0.f);
_zero_out21 = vextq_f32(_zero_out21, _out21, 3);
_out20 = vaddq_f32(_out20, _zero_out21);
//
float32x2_t _out20low = vget_low_f32(_out20);
float32x2_t _out20high = vget_high_f32(_out20);
_out20high = vmla_lane_f32(_out20high, vget_low_f32(_v), vget_high_f32(_k2), 0);
_out20 = vcombine_f32(_out20low, _out20high);
vst1q_f32(outptr2 + 0, _out20);
//
float32x2_t _out22high = vld1_f32(outptr2 + 4);
float32x2_t _out21_zero = vext_f32(vget_high_f32(_out21), vget_low_f32(_zero_out21), 1);
_out22high = vadd_f32(_out22high, _out21_zero);
_out22high = vmla_lane_f32(_out22high, vget_high_f32(_v), vget_high_f32(_k2), 0);
vst1_f32(outptr2 + 4, _out22high);
#else
//
float32x4_t _out00 = vld1q_f32(outptr0 + 0);
_out00 = vmlaq_lane_f32(_out00, _v, vget_low_f32(_k0), 0);
vst1q_f32(outptr0 + 0, _out00);
float32x4_t _out01 = vld1q_f32(outptr0 + 1);
_out01 = vmlaq_lane_f32(_out01, _v, vget_low_f32(_k0), 1);
vst1q_f32(outptr0 + 1, _out01);
float32x4_t _out02 = vld1q_f32(outptr0 + 2);
_out02 = vmlaq_lane_f32(_out02, _v, vget_high_f32(_k0), 0);
vst1q_f32(outptr0 + 2, _out02);
//
float32x4_t _out10 = vld1q_f32(outptr1 + 0);
_out10 = vmlaq_lane_f32(_out10, _v, vget_low_f32(_k1), 0);
vst1q_f32(outptr1 + 0, _out10);
float32x4_t _out11 = vld1q_f32(outptr1 + 1);
_out11 = vmlaq_lane_f32(_out11, _v, vget_low_f32(_k1), 1);
vst1q_f32(outptr1 + 1, _out11);
float32x4_t _out12 = vld1q_f32(outptr1 + 2);
_out12 = vmlaq_lane_f32(_out12, _v, vget_high_f32(_k1), 0);
vst1q_f32(outptr1 + 2, _out12);
//
float32x4_t _out20 = vld1q_f32(outptr2 + 0);
_out20 = vmlaq_lane_f32(_out20, _v, vget_low_f32(_k2), 0);
vst1q_f32(outptr2 + 0, _out20);
float32x4_t _out21 = vld1q_f32(outptr2 + 1);
_out21 = vmlaq_lane_f32(_out21, _v, vget_low_f32(_k2), 1);
vst1q_f32(outptr2 + 1, _out21);
float32x4_t _out22 = vld1q_f32(outptr2 + 2);
_out22 = vmlaq_lane_f32(_out22, _v, vget_high_f32(_k2), 0);
vst1q_f32(outptr2 + 2, _out22);
#endif
r0 += 4;
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
}
#endif // __ARM_NEON
for (; j < w; j++)
{
float val = r0[0];
outptr0[0] += val * k0[0];
outptr0[1] += val * k0[1];
outptr0[2] += val * k0[2];
outptr1[0] += val * k1[0];
outptr1[1] += val * k1[1];
outptr1[2] += val * k1[2];
outptr2[0] += val * k2[0];
outptr2[1] += val * k2[1];
outptr2[2] += val * k2[2];
r0++;
outptr0++;
outptr1++;
outptr2++;
}
}
}
}
}
static void deconv3x3s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*9 + q*9;
const float* r0 = img0;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
#if __ARM_NEON
float32x4_t _k0 = vld1q_f32(k0);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _k2 = vld1q_f32(k2);
#endif // __ARM_NEON
for (int i = 0; i < h; i++)
{
float* outptr = out.data + outw * i*2;
float* outptr0 = outptr;
float* outptr1 = outptr0 + outw;
float* outptr2 = outptr1 + outw;
int j = 0;
#if __ARM_NEON
for (; j+3 < w; j+=4)
{
float32x4_t _v = vld1q_f32(r0);
// out row 0
float32x4_t _out00 = vmulq_lane_f32(_v, vget_low_f32(_k0), 0); // 0,2,4,6
float32x4_t _out01 = vmulq_lane_f32(_v, vget_low_f32(_k0), 1); // 1,3,5,7
float32x4_t _out02 = vmulq_lane_f32(_v, vget_high_f32(_k0), 0); // 2,4,6,8
float32x4x2_t _out0 = vld2q_f32(outptr0);
_out0.val[0] = vaddq_f32(_out0.val[0], _out00); // 0,2,4,6
_out0.val[1] = vaddq_f32(_out0.val[1], _out01); // 1,3,5,7
vst2q_f32(outptr0, _out0);
_out0 = vld2q_f32(outptr0 + 2);
_out0.val[0] = vaddq_f32(_out0.val[0], _out02); // 2,4,6,8
vst2q_f32(outptr0 + 2, _out0);
// out row 1
float32x4_t _out10 = vmulq_lane_f32(_v, vget_low_f32(_k1), 0); // 0,2,4,6
float32x4_t _out11 = vmulq_lane_f32(_v, vget_low_f32(_k1), 1); // 1,3,5,7
float32x4_t _out12 = vmulq_lane_f32(_v, vget_high_f32(_k1), 0); // 2,4,6,8
float32x4x2_t _out1 = vld2q_f32(outptr1);
_out1.val[0] = vaddq_f32(_out1.val[0], _out10); // 0,2,4,6
_out1.val[1] = vaddq_f32(_out1.val[1], _out11); // 1,3,5,7
vst2q_f32(outptr1, _out1);
_out1 = vld2q_f32(outptr1 + 2);
_out1.val[0] = vaddq_f32(_out1.val[0], _out12); // 2,4,6,8
vst2q_f32(outptr1 + 2, _out1);
// out row 2
float32x4_t _out20 = vmulq_lane_f32(_v, vget_low_f32(_k2), 0); // 0,2,4,6
float32x4_t _out21 = vmulq_lane_f32(_v, vget_low_f32(_k2), 1); // 1,3,5,7
float32x4_t _out22 = vmulq_lane_f32(_v, vget_high_f32(_k2), 0); // 2,4,6,8
float32x4x2_t _out2 = vld2q_f32(outptr2);
_out2.val[0] = vaddq_f32(_out2.val[0], _out20); // 0,2,4,6
_out2.val[1] = vaddq_f32(_out2.val[1], _out21); // 1,3,5,7
vst2q_f32(outptr2, _out2);
_out2 = vld2q_f32(outptr2 + 2);
_out2.val[0] = vaddq_f32(_out2.val[0], _out22); // 2,4,6,8
vst2q_f32(outptr2 + 2, _out2);
r0 += 4;
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
}
#endif // __ARM_NEON
for (; j < w; j++)
{
float val = r0[0];
outptr0[0] += val * k0[0];
outptr0[1] += val * k0[1];
outptr0[2] += val * k0[2];
outptr1[0] += val * k1[0];
outptr1[1] += val * k1[1];
outptr1[2] += val * k1[2];
outptr2[0] += val * k2[0];
outptr2[1] += val * k2[1];
outptr2[2] += val * k2[2];
r0++;
outptr0 += 2;
outptr1 += 2;
outptr2 += 2;
}
}
}
}
}
|
sp-brisbane.c | //-------------------------------------------------------------------------//
// //
// This benchmark is a serial C version of the NPB SP code. This C //
// version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the serial Fortran versions in //
// "NPB3.3-SER" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this C version to cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
//---------------------------------------------------------------------
// program SP
//---------------------------------------------------------------------
#include <brisbane/brisbane.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/resource.h>
#include "header-brisbane.h"
#include "print_results.h"
/* common /global/ */
#pragma omp declare target
int grid_points[3], nx2, ny2, nz2;
#pragma omp end declare target
int timeron;
/* common /constants/ */
#pragma omp declare target
double tx1, tx2, tx3, ty1, ty2, ty3, tz1, tz2, tz3,
dx1, dx2, dx3, dx4, dx5, dy1, dy2, dy3, dy4,
dy5, dz1, dz2, dz3, dz4, dz5, dssp, dt,
ce[5][13], dxmax, dymax, dzmax, xxcon1, xxcon2,
xxcon3, xxcon4, xxcon5, dx1tx1, dx2tx1, dx3tx1,
dx4tx1, dx5tx1, yycon1, yycon2, yycon3, yycon4,
yycon5, dy1ty1, dy2ty1, dy3ty1, dy4ty1, dy5ty1,
zzcon1, zzcon2, zzcon3, zzcon4, zzcon5, dz1tz1,
dz2tz1, dz3tz1, dz4tz1, dz5tz1, dnxm1, dnym1,
dnzm1, c1c2, c1c5, c3c4, c1345, conz1, c1, c2,
c3, c4, c5, c4dssp, c5dssp, dtdssp, dttx1, bt,
dttx2, dtty1, dtty2, dttz1, dttz2, c2dttx1,
c2dtty1, c2dttz1, comz1, comz4, comz5, comz6,
c3c4tx3, c3c4ty3, c3c4tz3, c2iv, con43, con16;
/* common /fields/ */
double u[5][KMAX][JMAXP+1][IMAXP+1];
double us [KMAX][JMAXP+1][IMAXP+1];
double vs [KMAX][JMAXP+1][IMAXP+1];
double ws [KMAX][JMAXP+1][IMAXP+1];
double qs [KMAX][JMAXP+1][IMAXP+1];
double rho_i [KMAX][JMAXP+1][IMAXP+1];
double speed [KMAX][JMAXP+1][IMAXP+1];
double square [KMAX][JMAXP+1][IMAXP+1];
double rhs[5][KMAX][JMAXP+1][IMAXP+1];
double forcing[5][KMAX][JMAXP+1][IMAXP+1];
#pragma omp end declare target
brisbane_mem mem_u;
brisbane_mem mem_us;
brisbane_mem mem_vs;
brisbane_mem mem_ws;
brisbane_mem mem_qs;
brisbane_mem mem_rho_i;
brisbane_mem mem_speed;
brisbane_mem mem_square;
brisbane_mem mem_rhs;
brisbane_mem mem_forcing;
/* common /work_1d/ */
double cv [PROBLEM_SIZE];
double rhon[PROBLEM_SIZE];
double rhos[PROBLEM_SIZE];
double rhoq[PROBLEM_SIZE];
double cuf [PROBLEM_SIZE];
double q [PROBLEM_SIZE];
double ue [PROBLEM_SIZE][5];
double buf[PROBLEM_SIZE][5];
/* common /work_lhs/ */
double lhs [IMAXP+1][IMAXP+1][5];
double lhsp[IMAXP+1][IMAXP+1][5];
double lhsm[IMAXP+1][IMAXP+1][5];
int main(int argc, char *argv[])
{
brisbane_init(&argc, &argv, true);
int i, niter, step, n3;
double mflops, t, tmax, trecs[t_last+1];
int verified;
char Class;
char *t_names[t_last+1];
brisbane_mem_create(5 * KMAX * (JMAXP + 1) * (IMAXP + 1) * sizeof(double), &mem_u);
brisbane_mem_create(1 * KMAX * (JMAXP + 1) * (IMAXP + 1) * sizeof(double), &mem_us);
brisbane_mem_create(1 * KMAX * (JMAXP + 1) * (IMAXP + 1) * sizeof(double), &mem_vs);
brisbane_mem_create(1 * KMAX * (JMAXP + 1) * (IMAXP + 1) * sizeof(double), &mem_ws);
brisbane_mem_create(1 * KMAX * (JMAXP + 1) * (IMAXP + 1) * sizeof(double), &mem_qs);
brisbane_mem_create(1 * KMAX * (JMAXP + 1) * (IMAXP + 1) * sizeof(double), &mem_rho_i);
brisbane_mem_create(1 * KMAX * (JMAXP + 1) * (IMAXP + 1) * sizeof(double), &mem_speed);
brisbane_mem_create(1 * KMAX * (JMAXP + 1) * (IMAXP + 1) * sizeof(double), &mem_square);
brisbane_mem_create(5 * KMAX * (JMAXP + 1) * (IMAXP + 1) * sizeof(double), &mem_rhs);
brisbane_mem_create(5 * KMAX * (JMAXP + 1) * (IMAXP + 1) * sizeof(double), &mem_forcing);
#if 1
{
const rlim_t stack_size = 1024 * 1024 * 1024;
struct rlimit rl;
int result = getrlimit(RLIMIT_STACK, &rl);
printf("[%s:%d] current stack[%d] target_stack[%d]", __FILE__, __LINE__, rl.rlim_cur, stack_size);
if (rl.rlim_cur < stack_size) {
rl.rlim_cur = stack_size;
result = setrlimit(RLIMIT_STACK, &rl);
if (result != 0) printf("[%s:%d] error[%d]\n", __FILE__, __LINE__, result);
}
}
#endif
//---------------------------------------------------------------------
// Read input file (if it exists), else take
// defaults from parameters
//---------------------------------------------------------------------
FILE *fp;
if ((fp = fopen("timer.flag", "r")) != NULL) {
timeron = 1;
t_names[t_total] = "total";
t_names[t_rhsx] = "rhsx";
t_names[t_rhsy] = "rhsy";
t_names[t_rhsz] = "rhsz";
t_names[t_rhs] = "rhs";
t_names[t_xsolve] = "xsolve";
t_names[t_ysolve] = "ysolve";
t_names[t_zsolve] = "zsolve";
t_names[t_rdis1] = "redist1";
t_names[t_rdis2] = "redist2";
t_names[t_tzetar] = "tzetar";
t_names[t_ninvr] = "ninvr";
t_names[t_pinvr] = "pinvr";
t_names[t_txinvr] = "txinvr";
t_names[t_add] = "add";
fclose(fp);
} else {
timeron = 0;
}
printf("\n\n NAS Parallel Benchmarks (NPB3.3-OMP) - SP Benchmark\n\n");
if ((fp = fopen("inputsp.data", "r")) != NULL) {
int result;
printf(" Reading from input file inputsp.data\n");
result = fscanf(fp, "%d", &niter);
while (fgetc(fp) != '\n');
result = fscanf(fp, "%lf", &dt);
while (fgetc(fp) != '\n');
result = fscanf(fp, "%d%d%d", &grid_points[0], &grid_points[1], &grid_points[2]);
fclose(fp);
} else {
printf(" No input file inputsp.data. Using compiled defaults\n");
niter = NITER_DEFAULT;
dt = DT_DEFAULT;
grid_points[0] = PROBLEM_SIZE;
grid_points[1] = PROBLEM_SIZE;
grid_points[2] = PROBLEM_SIZE;
}
printf(" Size: %4dx%4dx%4d\n",
grid_points[0], grid_points[1], grid_points[2]);
printf(" Iterations: %4d dt: %10.6f\n", niter, dt);
printf("\n");
if ((grid_points[0] > IMAX) ||
(grid_points[1] > JMAX) ||
(grid_points[2] > KMAX) ) {
printf(" %d, %d, %d\n", grid_points[0], grid_points[1], grid_points[2]);
printf(" Problem size too big for compiled array sizes\n");
return 0;
}
nx2 = grid_points[0] - 2;
ny2 = grid_points[1] - 2;
nz2 = grid_points[2] - 2;
#pragma omp target update to(nx2,ny2,nz2,dt,grid_points)
set_constants();
{
exact_rhs();
initialize();
//---------------------------------------------------------------------
// do one time step to touch all code, and reinitialize
//---------------------------------------------------------------------
adi();
initialize();
for (step = 1; step <= niter; step++) {
if ((step % 20) == 0 || step == 1) {
printf(" Time step %4d\n", step);
}
adi();
}
#pragma omp target update from(u)
brisbane_task task0;
brisbane_task_create(&task0);
brisbane_task_d2h_full(task0, mem_u, u);
brisbane_task_submit(task0, brisbane_default, NULL, true);
verify(niter, &Class, &verified);
mflops = 0.0;
} /*end omp data*/
print_results("SP", Class, grid_points[0],
grid_points[1], grid_points[2], niter,
tmax, mflops, " floating point",
verified, NPBVERSION,COMPILETIME, CS1, CS2, CS3, CS4, CS5,
CS6);
brisbane_finalize();
return 0;
}
|
loop.c | #include <stdio.h>
#include "assert.h"
#include <unistd.h>
void vmul(int*a, int*b, int*c, int N){
#pragma omp target map(to: a[0:N],b[0:N]) map(from:c[0:N*N])
//#pragma omp teams distribute parallel for collapse(2)
//#pragma omp teams distribute collapse(2)
#pragma omp target teams loop collapse(2)
for(int i=0;i<N;i++)
for(int j=0;j<N;j++){
c[i*N+j]=a[i]*b[j];
}
}
int main(){
const int N = 1000;
int a[N],b[N],c[N*N],validate[N*N];
int flag=-1; // Mark Success
for(int i=0;i<N;i++){
a[i]=i+1;
for(int j=0;j<N;j++){
b[j]=j+2;
validate[i*N+j]=a[i]*b[j];
}
}
vmul(a,b,c,N);
for(int i=0;i<N*N;i++) {
if(c[i]!=validate[i]) {
// print 1st bad index
if( flag == -1 )
printf("First fail: c[%d](%d) != validate[%d](%d)\n",i,c[i],i,validate[i]);
flag = i;
}
}
if( flag == -1 ){
printf("Success\n");
return 0;
} else {
printf("Last fail: c[%d](%d) != validate[%d](%d)\n",flag,c[flag],flag,validate[flag]);
printf("Fail\n");
return 1;
}
}
|
GB_binop__pow_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pow_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__pow_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__pow_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__pow_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_int32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pow_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__pow_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_int32)
// C=scalar+B GB (_bind1st__pow_int32)
// C=scalar+B' GB (_bind1st_tran__pow_int32)
// C=A+scalar GB (_bind2nd__pow_int32)
// C=A'+scalar GB (_bind2nd_tran__pow_int32)
// C type: int32_t
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = GB_pow_int32 (aij, bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_pow_int32 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_INT32 || GxB_NO_POW_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__pow_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pow_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pow_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pow_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__pow_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__pow_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__pow_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__pow_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__pow_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_pow_int32 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__pow_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_pow_int32 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow_int32 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__pow_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow_int32 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__pow_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
random_forest.h | #ifndef __RANDOM_FOREST_H__
#define __RANDOM_FOREST_H__
#include "../utils/helper.h"
#include "../utils/random.h"
using namespace RandomNumbers;
#include "../patterns/classification_rule.h"
namespace RandomForestRelated
{
vector<double> featureImportance;
int RANDOM_FEATURES = 4;
int RANDOM_POSITIONS = 8;
const int CLASSIFICATION = 0xa001;
const int REGRESSION = 0xa002;
const int SURVIVAL = 0xa003;
int TASK_TYPE = CLASSIFICATION; // default
double binaryEntropy(int p1, int total)
{
if (p1 == 0 || p1 == total) {
return 0;
}
double p = p1 / (double)total;
return - p * log2(p) - (1 - p) * log2(1 - p);
}
double calculateLoss(const vector<int> IDs, const vector<double> &labels)
{
if (TASK_TYPE == CLASSIFICATION) {
// entropy
unordered_map<int, int> hist;
FOR (id, IDs) {
++ hist[(int)labels[*id]];
}
double entropy = 0;
FOR (iter, hist) {
double p = iter->second / (double) IDs.size();
entropy += -p * log(p);
}
return entropy;
} else if (TASK_TYPE == REGRESSION) {
// mean square error
double avg = 0;
FOR (id, IDs) {
avg += labels[*id];
}
if (IDs.size()) {
avg /= IDs.size();
}
double squareError = 0;
FOR (id, IDs) {
squareError += sqr(avg - labels[*id]);
}
return squareError / IDs.size();
} else if (TASK_TYPE == SURVIVAL) {
cerr << "TODO survival" << endl;
exit(-1);
} else {
myAssert(false, "Unknown Task Type!");
}
return 0;
}
struct TreeNode {
bool leaf;
int level, feature;
double value, result;
int left, right;
TreeNode() {
leaf = false;
level = feature = left = right = -1;
value = result = 0;
}
};
class DecisionTree
{
public:
vector<TreeNode> nodes;
void dump(FILE* out) {
size_t size = nodes.size();
fwrite(&size, sizeof(size), 1, out);
if (size > 0) {
fwrite(&nodes[0], sizeof(nodes[0]), size, out);
}
}
void load(FILE* in) {
size_t size;
fread(&size, sizeof(size), 1, in);
nodes.resize(size);
if (size > 0) {
fread(&nodes[0], sizeof(nodes[0]), size, in);
}
}
DecisionTree() {}
void train(const vector< vector<double> > &features, const vector<double> &results, int minNodeSize, int maxLevel = 18, vector<string> featureNames = vector<string>()) {
int threadID = omp_get_thread_num();
if (features.size() == 0) {
return;
}
vector< vector<int> > featureGroups;
if (featureNames.size() != 0) {
unordered_map<string, int> name2id;
for (int i = 0; i < featureNames.size(); ++ i) {
string name = featureNames[i];
if (name.find("=") != -1) {
name = name.substr(0, name.find("="));
}
if (!name2id.count(name)) {
name2id[name] = featureGroups.size();
featureGroups.push_back(vector<int>());
}
featureGroups[name2id[name]].push_back(i);
}
} else {
for (int i = 0; i < features[0].size(); ++ i) {
featureGroups.push_back(vector<int>(1, i));
}
}
TreeNode root;
root.level = 0;
nodes.push_back(root);
// bootstrapping
vector<int> rootBag;
int samplesN = max((int)results.size(), 100);
for (int i = 0; i < samplesN; ++ i) {
rootBag.push_back(rng[threadID].next(results.size()));
}
vector<vector<int>> nodeBags;
nodeBags.push_back(rootBag);
for (int curNode = 0; curNode < (int)nodes.size(); ++ curNode) {
TreeNode &node = nodes[curNode];
vector<int> &bag = nodeBags[curNode];
myAssert(bag.size() > 0, "[ERROR] empty node in decision tree!");
myAssert(bag.size() >= minNodeSize, "[ERROR] bag is too small!");
bool equal = true;
double first = results[bag[0]];
for (int i = 1; i < (int)bag.size(); ++ i) {
if (sign(results[bag[i]] - first)) {
equal = false;
break;
}
}
if (equal || (int)bag.size() < minNodeSize * 2 || node.level >= maxLevel) {
// leaf
node.leaf = true;
for (int i = 0; i < (int)bag.size(); ++ i) {
node.result += results[bag[i]];
}
node.result /= bag.size();
continue;
}
double bagLoss = calculateLoss(bag, results);
int bestFeature = -1;
int bestLeft = 0, bestRight = 0;
double bestValue = 0;
double bestLoss = 1e100;
vector<int> leftBag, rightBag;
for (int _ = 0; _ < RANDOM_FEATURES; ++ _) {
int groupID = rng[threadID].next(featureGroups.size());
int featureID = featureGroups[groupID][rng[threadID].next(featureGroups[groupID].size())];
bool continuous = false;
if (featureGroups[groupID].size() == 1) {
// continuous variable
continuous = true;
} else {
// categorical variable
continuous = false;
}
for (int __ = 0; __ < RANDOM_POSITIONS; ++ __) {
double splitValue = 0.5; // for categorical variable
if (continuous) {
// continuous
int instanceID = bag[rng[threadID].next(bag.size())];
splitValue = features[instanceID][featureID];
} else {
// categorical
if (__) {
// get a new value
featureID = featureGroups[groupID][rng[threadID].next(featureGroups[groupID].size())];
}
}
vector<int> currentLeftBag, currentRightBag;
for (int i = 0; i < (int)bag.size(); ++ i) {
int id = bag[i];
if (features[id][featureID] < splitValue) {
currentLeftBag.push_back(id);
} else {
currentRightBag.push_back(id);
}
}
if (currentLeftBag.size() < minNodeSize || currentRightBag.size() < minNodeSize) {
continue;
}
double currentLoss = (calculateLoss(currentLeftBag, results) * currentLeftBag.size() + calculateLoss(currentRightBag, results) * currentRightBag.size()) / bag.size();
if (currentLoss < bestLoss) {
bestLoss = currentLoss;
bestValue = splitValue;
bestFeature = featureID;
leftBag = currentLeftBag;
rightBag = currentRightBag;
}
}
}
if (leftBag.size() < minNodeSize || rightBag.size() < minNodeSize) {
// leaf
node.leaf = true;
for (int i = 0; i < (int)bag.size(); ++ i) {
node.result += results[bag[i]];
}
node.result /= bag.size();
continue;
}
myAssert(leftBag.size() >= minNodeSize && rightBag.size() >= minNodeSize, "[ERROR] bag is too small");
featureImportance[bestFeature] += bagLoss - bestLoss;
double nextValue = -1e100;
for (int i = 0; i < (int)leftBag.size(); ++ i) {
int id = leftBag[i];
nextValue = max(nextValue, features[id][bestFeature]);
}
TreeNode left, right;
left.level = right.level = node.level + 1;
node.feature = bestFeature;
node.value = (bestValue + nextValue) / 2;
node.left = nodes.size();
node.right = nodes.size() + 1;
nodes.push_back(left);
nodes.push_back(right);
nodeBags.push_back(leftBag);
nodeBags.push_back(rightBag);
}
}
double estimate(vector<double> &features) {
TreeNode *current = &nodes[0];
while (!current->leaf) {
if (features[current->feature] < current->value) {
current = &nodes[current->left];
} else {
current = &nodes[current->right];
}
}
return current->result;
}
void traverse(int id, Rule ¤t, Rules &all, vector< vector<double> > &train, vector<double> &trainY, int MIN_SUP) {
if (current.satisfiedTrainings.size() < MIN_SUP) {
return;
}
if (id != 0) {
// not root
current.loss = calculateLoss(current.satisfiedTrainings, trainY);
all.push_back(current);
}
if (nodes[id].leaf) {
return;
}
vector<int> bag = current.satisfiedTrainings;
//split left & right
vector<int> leftBag, rightBag;
int index = nodes[id].feature;
double sep = nodes[id].value;
FOR (tid, bag) {
if (train[*tid][index] < sep) {
leftBag.push_back(*tid);
} else {
rightBag.push_back(*tid);
}
}
current.push_back(Condition(index, sep, true));
current.satisfiedTrainings = leftBag;
traverse(nodes[id].left, current, all, train, trainY, MIN_SUP);
current.pop_back();
current.push_back(Condition(index, sep, false));
current.satisfiedTrainings = rightBag;
traverse(nodes[id].right, current, all, train, trainY, MIN_SUP);
current.pop_back();
current.satisfiedTrainings = bag;
}
Rules getRules(vector< vector<double> > &train, vector<double> &trainY, int MIN_SUP) {
Rule current;
Rules all;
for (int i = 0; i < train.size(); ++ i) {
current.satisfiedTrainings.push_back(i);
}
traverse(0, current, all, train, trainY, MIN_SUP);
return all;
}
};
class RandomForest
{
vector<DecisionTree> trees;
vector< vector<double> > features;
vector<double> results;
public:
void dump(string filename) {
FILE* out = fopen(filename.c_str(), "wb");
size_t size = trees.size();
fwrite(&size, sizeof(size), 1, out);
for (size_t i = 0; i < trees.size(); ++ i) {
trees[i].dump(out);
}
fclose(out);
}
void load(string filename) {
FILE* in = fopen(filename.c_str(), "rb");
size_t size;
fread(&size, sizeof(size), 1, in);
trees.resize(size);
for (size_t i = 0; i < trees.size(); ++ i) {
trees[i].load(in);
}
fclose(in);
}
void clear() {
features.clear();
results.clear();
trees.clear();
}
void train(vector< vector<double> > &_features, vector<double> _results, int treesNo = 100, int minNodeSize = 100, int maxLevel = 100, vector<string> featureNames = vector<string>()) {
if (features.size() == 0) {
features = _features;
results = _results;
if (features.size() > 0) {
featureImportance.resize(features[0].size(), 0);
}
}
myAssert(features.size() == results.size(), "[ERROR] wrong training data!");
trees.resize(treesNo);
#pragma omp parallel for
for (int i = 0; i < treesNo; ++ i) {
trees[i].train(_features, _results, minNodeSize, maxLevel, featureNames);
}
}
Rules getRules(vector< vector<double> > &train, vector<double> &trainY, int MIN_SUP) {
Rules ret;
for (int i = 0; i < (int)trees.size(); ++ i) {
ret.extend(trees[i].getRules(train, trainY, MIN_SUP));
}
return ret;
}
double estimate(vector<double> &features) {
if (trees.size() == 0) {
return 0.0;
}
double sum = 0;
for (int i = 0; i < (int)trees.size(); ++ i) {
sum += trees[i].estimate(features);
}
return sum / trees.size();
}
};
};
#endif
|
conv.h | #ifndef CONV_H
#define CONV_H
namespace TSnap {
/// Sequentially converts the table into a graph with links from nodes in \c SrcCol to those in \c DstCol.
template<class PGraph>
PGraph ToGraph(PTable Table, const TStr& SrcCol, const TStr& DstCol, TAttrAggr AggrPolicy)
{
PGraph Graph = PGraph::TObj::New();
const TAttrType NodeType = Table->GetColType(SrcCol);
Assert(NodeType == Table->GetColType(DstCol));
const TInt SrcColIdx = Table->GetColIdx(SrcCol);
const TInt DstColIdx = Table->GetColIdx(DstCol);
// make single pass over all rows in the table
if (NodeType == atInt) {
for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) {
if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; }
// add src and dst nodes to graph if they are not seen earlier
TInt SVal = (Table->IntCols)[SrcColIdx][CurrRowIdx];
TInt DVal = (Table->IntCols)[DstColIdx][CurrRowIdx];
//Using AddNodeUnchecked ensures that no error is thrown when the same node is seen twice
Graph->AddNodeUnchecked(SVal);
Graph->AddNodeUnchecked(DVal);
Graph->AddEdgeUnchecked(SVal, DVal);
}
} else if (NodeType == atFlt) {
// node values - i.e. the unique values of src/dst col
//THashSet<TInt> IntNodeVals; // for both int and string node attr types.
THash<TFlt, TInt> FltNodeVals;
for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) {
if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; }
// add src and dst nodes to graph if they are not seen earlier
TInt SVal, DVal;
TFlt FSVal = (Table->FltCols)[SrcColIdx][CurrRowIdx];
SVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FSVal);
TFlt FDVal = (Table->FltCols)[SrcColIdx][CurrRowIdx];
DVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FDVal);
Graph->AddEdge(SVal, DVal);
}
} else {
for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) {
if ((Table->Next)[CurrRowIdx] == Table->Invalid) { continue; }
// add src and dst nodes to graph if they are not seen earlier
TInt SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx];
// if (strlen(Table->GetContextKey(SVal)) == 0) { continue; } //illegal value
TInt DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx];
// if (strlen(Table->GetContextKey(DVal)) == 0) { continue; } //illegal value
//Using AddNodeUnchecked ensures that no error is thrown when the same node is seen twice
Graph->AddNodeUnchecked(SVal);
Graph->AddNodeUnchecked(DVal);
Graph->AddEdgeUnchecked(SVal, DVal);
}
}
Graph->SortNodeAdjV();
return Graph;
}
/// Converts the Table into a graph with edges from \c SrcCol to \c DstCol, and attribute vector
/// defined by the arguments.
template<class PGraph>
PGraph ToNetwork(PTable Table,
const TStr& SrcCol, const TStr& DstCol,
TStrV& SrcAttrV, TStrV& DstAttrV, TStrV& EdgeAttrV,
TAttrAggr AggrPolicy)
{
PGraph Graph = PGraph::TObj::New();
const TAttrType NodeType = Table->GetColType(SrcCol);
Assert(NodeType == Table->GetColType(DstCol));
const TInt SrcColIdx = Table->GetColIdx(SrcCol);
const TInt DstColIdx = Table->GetColIdx(DstCol);
//Table->AddGraphAttributeV(SrcAttrV, false, true, false);
//Table->AddGraphAttributeV(DstAttrV, false, false, true);
//Table->AddGraphAttributeV(EdgeAttrV, true, false, true);
// node values - i.e. the unique values of src/dst col
//THashSet<TInt> IntNodeVals; // for both int and string node attr types.
THash<TFlt, TInt> FltNodeVals;
// node attributes
THash<TInt, TStrIntVH> NodeIntAttrs;
THash<TInt, TStrFltVH> NodeFltAttrs;
THash<TInt, TStrStrVH> NodeStrAttrs;
// make single pass over all rows in the table
for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) {
if ((Table->Next)[CurrRowIdx] == Table->Invalid) {
continue;
}
// add src and dst nodes to graph if they are not seen earlier
TInt SVal, DVal;
if (NodeType == atFlt) {
TFlt FSVal = (Table->FltCols)[SrcColIdx][CurrRowIdx];
SVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FSVal);
TFlt FDVal = (Table->FltCols)[SrcColIdx][CurrRowIdx];
DVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FDVal);
} else if (NodeType == atInt || NodeType == atStr) {
if (NodeType == atInt) {
SVal = (Table->IntCols)[SrcColIdx][CurrRowIdx];
DVal = (Table->IntCols)[DstColIdx][CurrRowIdx];
} else {
SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx];
if (strlen(Table->GetContextKey(SVal)) == 0) { continue; } //illegal value
DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx];
if (strlen(Table->GetContextKey(DVal)) == 0) { continue; } //illegal value
}
if (!Graph->IsNode(SVal)) {Graph->AddNode(SVal); }
if (!Graph->IsNode(DVal)) {Graph->AddNode(DVal); }
//CheckAndAddIntNode(Graph, IntNodeVals, SVal);
//CheckAndAddIntNode(Graph, IntNodeVals, DVal);
}
// add edge and edge attributes
Graph->AddEdge(SVal, DVal, CurrRowIdx);
// Aggregate edge attributes and add to graph
for (TInt i = 0; i < EdgeAttrV.Len(); i++) {
TStr ColName = EdgeAttrV[i];
TAttrType T = Table->GetColType(ColName);
TInt Index = Table->GetColIdx(ColName);
switch (T) {
case atInt:
Graph->AddIntAttrDatE(CurrRowIdx, Table->IntCols[Index][CurrRowIdx], ColName);
break;
case atFlt:
Graph->AddFltAttrDatE(CurrRowIdx, Table->FltCols[Index][CurrRowIdx], ColName);
break;
case atStr:
Graph->AddStrAttrDatE(CurrRowIdx, Table->GetStrVal(Index, CurrRowIdx), ColName);
break;
}
}
// get src and dst node attributes into hashmaps
if ((Table->SrcNodeAttrV).Len() > 0) {
Table->AddNodeAttributes(SVal, Table->SrcNodeAttrV, CurrRowIdx, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs);
}
if ((Table->DstNodeAttrV).Len() > 0) {
Table->AddNodeAttributes(DVal, Table->DstNodeAttrV, CurrRowIdx, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs);
}
}
// aggregate node attributes and add to graph
if ((Table->SrcNodeAttrV).Len() > 0 || (Table->DstNodeAttrV).Len() > 0) {
for (TNEANet::TNodeI NodeI = Graph->BegNI(); NodeI < Graph->EndNI(); NodeI++) {
TInt NId = NodeI.GetId();
if (NodeIntAttrs.IsKey(NId)) {
TStrIntVH IntAttrVals = NodeIntAttrs.GetDat(NId);
for (TStrIntVH::TIter it = IntAttrVals.BegI(); it < IntAttrVals.EndI(); it++) {
TInt AttrVal = Table->AggregateVector<TInt>(it.GetDat(), AggrPolicy);
Graph->AddIntAttrDatN(NId, AttrVal, it.GetKey());
}
}
if (NodeFltAttrs.IsKey(NId)) {
TStrFltVH FltAttrVals = NodeFltAttrs.GetDat(NId);
for (TStrFltVH::TIter it = FltAttrVals.BegI(); it < FltAttrVals.EndI(); it++) {
TFlt AttrVal = Table->AggregateVector<TFlt>(it.GetDat(), AggrPolicy);
Graph->AddFltAttrDatN(NId, AttrVal, it.GetKey());
}
}
if (NodeStrAttrs.IsKey(NId)) {
TStrStrVH StrAttrVals = NodeStrAttrs.GetDat(NId);
for (TStrStrVH::TIter it = StrAttrVals.BegI(); it < StrAttrVals.EndI(); it++) {
TStr AttrVal = Table->AggregateVector<TStr>(it.GetDat(), AggrPolicy);
Graph->AddStrAttrDatN(NId, AttrVal, it.GetKey());
}
}
}
}
return Graph;
}
/// Calls ToNetwork with an empty attribute vector. Convenience wrapper.
template<class PGraph>
PGraph ToNetwork(PTable Table,
const TStr& SrcCol, const TStr& DstCol, TAttrAggr AggrPolicy)
{
TStrV V;
return ToNetwork<PGraph>(Table, SrcCol, DstCol, V, AggrPolicy);
}
#ifdef GCC_ATOMIC
/// Performs table to graph conversion in parallel using the sort-first algorithm. This is the recommended method to use.
template<class PGraphMP>
PGraphMP ToGraphMP(PTable Table, const TStr& SrcCol, const TStr& DstCol) {
// double start = omp_get_wtime();
const TInt SrcColIdx = Table->GetColIdx(SrcCol);
const TInt DstColIdx = Table->GetColIdx(DstCol);
const TAttrType NodeType = Table->GetColType(SrcCol);
Assert(NodeType == Table->GetColType(DstCol));
const TInt NumRows = Table->NumValidRows;
TIntV SrcCol1, DstCol1, SrcCol2, DstCol2;
#pragma omp parallel sections num_threads(4)
{
#pragma omp section
{ SrcCol1.Reserve(NumRows, NumRows); }
#pragma omp section
{ SrcCol2.Reserve(NumRows, NumRows); }
#pragma omp section
{ DstCol1.Reserve(NumRows, NumRows); }
#pragma omp section
{ DstCol2.Reserve(NumRows, NumRows); }
}
// double endResize = omp_get_wtime();
// printf("Resize time = %f\n", endResize-start);
TIntPrV Partitions;
Table->GetPartitionRanges(Partitions, omp_get_max_threads());
TInt PartitionSize = Partitions[0].GetVal2()-Partitions[0].GetVal1()+1;
// double endPartition = omp_get_wtime();
// printf("Partition time = %f\n", endPartition-endResize);
omp_set_num_threads(omp_get_max_threads());
if (NodeType == atInt) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < Partitions.Len(); i++) {
TRowIterator RowI(Partitions[i].GetVal1(), Table());
TRowIterator EndI(Partitions[i].GetVal2(), Table());
while (RowI < EndI) {
TInt RowId = RowI.GetRowIdx();
SrcCol1[RowId] = RowI.GetIntAttr(SrcColIdx);
SrcCol2[RowId] = RowI.GetIntAttr(SrcColIdx);
DstCol1[RowId] = RowI.GetIntAttr(DstColIdx);
DstCol2[RowId] = RowI.GetIntAttr(DstColIdx);
RowI++;
}
}
}
else if (NodeType == atStr) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < Partitions.Len(); i++) {
TRowIterator RowI(Partitions[i].GetVal1(), Table());
TRowIterator EndI(Partitions[i].GetVal2(), Table());
while (RowI < EndI) {
TInt RowId = RowI.GetRowIdx();
SrcCol1[RowId] = RowI.GetStrMapById(SrcColIdx);
SrcCol2[RowId] = RowI.GetStrMapById(SrcColIdx);
DstCol1[RowId] = RowI.GetStrMapById(DstColIdx);
DstCol2[RowId] = RowI.GetStrMapById(DstColIdx);
RowI++;
}
}
}
omp_set_num_threads(omp_get_max_threads());
#pragma omp parallel
{
#pragma omp single nowait
{
#pragma omp task untied shared(SrcCol1, DstCol1)
{ TTable::QSortKeyVal(SrcCol1, DstCol1, 0, NumRows-1); }
}
#pragma omp single nowait
{
#pragma omp task untied shared(SrcCol2, DstCol2)
{ TTable::QSortKeyVal(DstCol2, SrcCol2, 0, NumRows-1); }
}
#pragma omp taskwait
}
// TTable::PSRSKeyVal(SrcCol1, DstCol1, 0, NumRows-1);
// TTable::PSRSKeyVal(DstCol2, SrcCol2, 0, NumRows-1);
// TInt IsS = TTable::CheckSortedKeyVal(SrcCol1, DstCol1, 0, NumRows-1);
// TInt IsD = TTable::CheckSortedKeyVal(DstCol2, SrcCol2, 0, NumRows-1);
// printf("IsSorted = %d %d\n", IsS.Val, IsD.Val);
// double endSort = omp_get_wtime();
// printf("Sort time = %f\n", endSort-endCopy);
//return TNGraphMP::New(10, 100);
TInt NumThreads = omp_get_max_threads();
TInt PartSize = (NumRows/NumThreads);
TIntV SrcOffsets, DstOffsets;
SrcOffsets.Add(0);
for (TInt i = 1; i < NumThreads; i++) {
TInt CurrOffset = i * PartSize;
while (CurrOffset < (i+1) * PartSize &&
SrcCol1[CurrOffset-1] == SrcCol1[CurrOffset]) {
CurrOffset++;
}
if (CurrOffset < (i+1) * PartSize) { SrcOffsets.Add(CurrOffset); }
}
SrcOffsets.Add(NumRows);
DstOffsets.Add(0);
for (TInt i = 1; i < NumThreads; i++) {
TInt CurrOffset = i * PartSize;
while (CurrOffset < (i+1) * PartSize &&
DstCol2[CurrOffset-1] == DstCol2[CurrOffset]) {
CurrOffset++;
}
if (CurrOffset < (i+1) * PartSize) { DstOffsets.Add(CurrOffset); }
}
DstOffsets.Add(NumRows);
TInt SrcPartCnt = SrcOffsets.Len()-1;
TInt DstPartCnt = DstOffsets.Len()-1;
// for (TInt i = 0; i < SrcOffsets.Len(); i++) {
// printf("%d ", SrcOffsets[i].Val);
// }
// printf("\n");
// for (TInt i = 0; i < DstOffsets.Len(); i++) {
// printf("%d ", DstOffsets[i].Val);
// }
// printf("\n");
TIntV SrcNodeCounts, DstNodeCounts;
SrcNodeCounts.Reserve(SrcPartCnt, SrcPartCnt);
DstNodeCounts.Reserve(DstPartCnt, DstPartCnt);
#pragma omp parallel for schedule(dynamic)
for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) {
if (t < SrcPartCnt) {
TInt i = t;
if (SrcOffsets[i] != SrcOffsets[i+1]) {
SrcNodeCounts[i] = 1;
TInt CurrNode = SrcCol1[SrcOffsets[i]];
for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) {
while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; }
if (j < SrcOffsets[i+1]) {
SrcNodeCounts[i]++;
CurrNode = SrcCol1[j];
}
}
}
} else {
TInt i = t - SrcPartCnt;
if (DstOffsets[i] != DstOffsets[i+1]) {
DstNodeCounts[i] = 1;
TInt CurrNode = DstCol2[DstOffsets[i]];
for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) {
while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; }
if (j < DstOffsets[i+1]) {
DstNodeCounts[i]++;
CurrNode = DstCol2[j];
}
}
}
}
}
// for (TInt i = 0; i < SrcNodeCounts.Len(); i++) {
// printf("%d ", SrcNodeCounts[i].Val);
// }
// printf("\n");
// for (TInt i = 0; i < DstNodeCounts.Len(); i++) {
// printf("%d ", DstNodeCounts[i].Val);
// }
// printf("\n");
TInt TotalSrcNodes = 0;
TIntV SrcIdOffsets;
for (int i = 0; i < SrcPartCnt; i++) {
SrcIdOffsets.Add(TotalSrcNodes);
TotalSrcNodes += SrcNodeCounts[i];
}
TInt TotalDstNodes = 0;
TIntV DstIdOffsets;
for (int i = 0; i < DstPartCnt; i++) {
DstIdOffsets.Add(TotalDstNodes);
TotalDstNodes += DstNodeCounts[i];
}
// printf("Total Src = %d, Total Dst = %d\n", TotalSrcNodes.Val, TotalDstNodes.Val);
TIntPrV SrcNodeIds, DstNodeIds;
#pragma omp parallel sections
{
#pragma omp section
{ SrcNodeIds.Reserve(TotalSrcNodes, TotalSrcNodes); }
#pragma omp section
{ DstNodeIds.Reserve(TotalDstNodes, TotalDstNodes); }
}
#pragma omp parallel for schedule(dynamic)
for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) {
if (t < SrcPartCnt) {
TInt i = t;
if (SrcOffsets[i] != SrcOffsets[i+1]) {
TInt CurrNode = SrcCol1[SrcOffsets[i]];
TInt ThreadOffset = SrcIdOffsets[i];
SrcNodeIds[ThreadOffset] = TIntPr(CurrNode, SrcOffsets[i]);
TInt CurrCount = 1;
for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) {
while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; }
if (j < SrcOffsets[i+1]) {
CurrNode = SrcCol1[j];
SrcNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j);
CurrCount++;
}
}
}
} else {
TInt i = t - SrcPartCnt;
if (DstOffsets[i] != DstOffsets[i+1]) {
TInt CurrNode = DstCol2[DstOffsets[i]];
TInt ThreadOffset = DstIdOffsets[i];
DstNodeIds[ThreadOffset] = TIntPr(CurrNode, DstOffsets[i]);
TInt CurrCount = 1;
for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) {
while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; }
if (j < DstOffsets[i+1]) {
CurrNode = DstCol2[j];
DstNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j);
CurrCount++;
}
}
}
}
}
// double endNode = omp_get_wtime();
// printf("Node time = %f\n", endNode-endSort);
TIntTrV Nodes;
Nodes.Reserve(TotalSrcNodes+TotalDstNodes);
// double endNodeResize = omp_get_wtime();
// printf("(NodeResize time = %f)\n", endNodeResize-endNode);
TInt i = 0, j = 0;
while (i < TotalSrcNodes && j < TotalDstNodes) {
if (SrcNodeIds[i].Val1 == DstNodeIds[j].Val1) {
Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, j));
i++;
j++;
} else if (SrcNodeIds[i].Val1 < DstNodeIds[j].Val1) {
Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1));
i++;
} else {
Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j));
j++;
}
}
for (; i < TotalSrcNodes; i++) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); }
for (; j < TotalDstNodes; j++) { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); }
// double endMerge = omp_get_wtime();
// printf("Merge time = %f\n", endMerge-endNode);
TInt NumNodes = Nodes.Len();
// printf("NumNodes = %d\n", NumNodes.Val);
PGraphMP Graph = TNGraphMP::New(NumNodes, NumRows);
NumThreads = 1;
int Delta = (NumNodes+NumThreads-1)/NumThreads;
TVec<TIntV> InVV(NumNodes);
TVec<TIntV> OutVV(NumNodes);
omp_set_num_threads(NumThreads);
#pragma omp parallel for schedule(static,Delta)
for (int m = 0; m < NumNodes; m++) {
//double startTr = omp_get_wtime();
//TIntV OutV, InV;
TInt n, i, j;
Nodes[m].GetVal(n, i, j);
if (i >= 0) {
TInt Offset = SrcNodeIds[i].GetVal2();
TInt Sz = DstCol1.Len()-Offset;
if (i < SrcNodeIds.Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; }
//printf("OutV: %d %d %d\n", n.Val, Offset.Val, Sz.Val);
OutVV[m].Reserve(Sz);
}
if (j >= 0) {
TInt Offset = DstNodeIds[j].GetVal2();
TInt Sz = SrcCol2.Len()-Offset;
if (j < DstNodeIds.Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; }
//printf("OutV: %d %d %d\n", n.Val, Offset.Val, Sz.Val);
InVV[m].Reserve(Sz);
}
//double endTr = omp_get_wtime();
//printf("Thread=%d, i=%d, t=%f\n", omp_get_thread_num(), m, endTr-startTr);
}
// double endAlloc = omp_get_wtime();
// printf("Alloc time = %f\n", endAlloc-endMerge);
NumThreads = omp_get_max_threads();
Delta = (NumNodes+NumThreads-1)/(10*NumThreads);
omp_set_num_threads(NumThreads);
#pragma omp parallel for schedule(dynamic)
for (int m = 0; m < NumNodes; m++) {
//double startTr = omp_get_wtime();
//TIntV OutV, InV;
TInt n, i, j;
Nodes[m].GetVal(n, i, j);
if (i >= 0) {
TInt Offset = SrcNodeIds[i].GetVal2();
TInt Sz = DstCol1.Len()-Offset;
if (i < SrcNodeIds.Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; }
//printf("OutV: %d %d %d\n", n.Val, Offset.Val, Sz.Val);
OutVV[m].CopyUniqueFrom(DstCol1, Offset, Sz);
}
if (j >= 0) {
TInt Offset = DstNodeIds[j].GetVal2();
TInt Sz = SrcCol2.Len()-Offset;
if (j < DstNodeIds.Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; }
//printf("OutV: %d %d %d\n", n.Val, Offset.Val, Sz.Val);
InVV[m].CopyUniqueFrom(SrcCol2, Offset, Sz);
}
Graph->AddNodeWithEdges(n, InVV[m], OutVV[m]);
//double endTr = omp_get_wtime();
//printf("Thread=%d, i=%d, t=%f\n", omp_get_thread_num(), m, endTr-startTr);
}
Graph->SetNodes(NumNodes);
// double endAdd = omp_get_wtime();
// printf("Add time = %f\n", endAdd-endAlloc);
return Graph;
}
/// Performs table to graph conversion in parallel. Uses the hash-first method, which is less optimal, use ToGraphMP instead.
#ifdef GCC_ATOMIC
template<class PGraphMP>
PGraphMP ToGraphMP3(PTable Table, const TStr& SrcCol, const TStr& DstCol) {
PNGraphMP Graph;
int MaxThreads = omp_get_max_threads();
int Length, Threads, Delta, Nodes, Last;
uint64_t NumNodesEst;
TInt SrcColIdx, DstColIdx;
TIntV InVec, OutVec;
SrcColIdx = Table->GetColIdx(SrcCol);
DstColIdx = Table->GetColIdx(DstCol);
const TAttrType NodeType = Table->GetColType(SrcCol);
Assert(NodeType == Table->GetColType(DstCol));
/* Estimate number of nodes in the graph */
int NumRows = Table->Next.Len();
double Load = 10;
int sz = NumRows / Load;
int *buckets = (int *)malloc(sz * sizeof(int));
#pragma omp parallel for
for (int i = 0; i < sz; i++)
buckets[i] = 0;
if (NodeType == atInt) {
#pragma omp parallel for
for (int i = 0; i < NumRows; i++) {
int vert = Table->IntCols[DstColIdx][i];
buckets[vert % sz] = 1;
}
}
else if (NodeType == atStr ) {
#pragma omp parallel for
for (int i = 0; i < NumRows; i++) {
int vert = (Table->StrColMaps)[DstColIdx][i];
buckets[vert % sz] = 1;
}
}
int cnt = 0;
#pragma omp parallel for reduction(+:cnt)
for (int i = 0; i < sz; i++) {
if (buckets[i] == 0)
cnt += 1;
}
NumNodesEst = sz * log ((double)sz / cnt);
free (buckets);
/* Until we correctly estimate the number of nodes */
while (1)
{
Graph = TNGraphMP::New(NumNodesEst, 100);
Length = Graph->Reserved();
Threads = MaxThreads/2;
Delta = (Length + Threads - 1) / Threads;
OutVec.Gen(Length);
InVec.Gen(Length);
/* build the node hash table, count the size of edge lists */
Last = NumRows;
Nodes = 0;
omp_set_num_threads(Threads);
#pragma omp parallel for schedule(static, Delta)
for (int CurrRowIdx = 0; CurrRowIdx < Last; CurrRowIdx++) {
if ((uint64_t) Nodes + 1000 >= NumNodesEst) {
/* need bigger hash table */
continue;
}
TInt SVal, DVal;
if (NodeType == atInt) {
SVal = Table->IntCols[SrcColIdx][CurrRowIdx];
DVal = Table->IntCols[DstColIdx][CurrRowIdx];
}
else if (NodeType == atStr ) {
SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx];
DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx];
}
int SrcIdx = abs((SVal.GetPrimHashCd()) % Length);
if (!Graph->AddOutEdge1(SrcIdx, SVal, DVal)) {
#pragma omp critical
{
Nodes++;
}
}
__sync_fetch_and_add(&OutVec[SrcIdx].Val, 1);
int DstIdx = abs((DVal.GetPrimHashCd()) % Length);
if (!Graph->AddInEdge1(DstIdx, SVal, DVal)) {
#pragma omp critical
{
Nodes++;
}
}
__sync_fetch_and_add(&InVec[DstIdx].Val, 1);
}
if ((uint64_t) Nodes + 1000 >= NumNodesEst) {
/* We need to double our num nodes estimate */
Graph.Clr();
InVec.Clr();
OutVec.Clr();
NumNodesEst *= 2;
}
else {
break;
}
}
Graph->SetNodes(Nodes);
uint Edges = 0;
for (int i = 0; i < Length; i++) {
Edges += OutVec[i] + InVec[i];
}
for (int Idx = 0; Idx < Length; Idx++) {
if (OutVec[Idx] > 0 || InVec[Idx] > 0) {
Graph->ReserveNodeDegs(Idx, InVec[Idx], OutVec[Idx]);
}
}
/* assign edges */
Length = Graph->Reserved();
Threads = MaxThreads;
Delta = (Length + Threads - 1) / Threads;
omp_set_num_threads(Threads);
#pragma omp parallel for schedule(static,Delta)
for (int CurrRowIdx = 0; CurrRowIdx < Last; CurrRowIdx++) {
TInt SVal, DVal;
if (NodeType == atInt) {
SVal = Table->IntCols[SrcColIdx][CurrRowIdx];
DVal = Table->IntCols[DstColIdx][CurrRowIdx];
}
else if (NodeType == atStr) {
SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx];
DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx];
}
Graph->AddOutEdge2(SVal, DVal);
Graph->AddInEdge2(SVal, DVal);
}
/* sort edges */
Length = Graph->Reserved();
Threads = MaxThreads*2;
Delta = (Length + Threads - 1) / Threads;
omp_set_num_threads(Threads);
#pragma omp parallel for schedule(dynamic)
for (int Idx = 0; Idx < Length; Idx++) {
if (OutVec[Idx] > 0 || InVec[Idx] > 0) {
Graph->SortEdges(Idx, InVec[Idx], OutVec[Idx]);
}
}
return Graph;
}
/// Does Table to Network conversion in parallel using the sort-first algorithm. This is the recommended method to use.
template<class PGraphMP>
inline PGraphMP ToNetworkMP(PTable Table,
const TStr& SrcCol, const TStr& DstCol,
TStrV& SrcAttrV, TStrV& DstAttrV, TStrV& EdgeAttrV,
TAttrAggr AggrPolicy) {
TStopwatch* Sw = TStopwatch::GetInstance();
Sw->Start(TStopwatch::AllocateColumnCopies);
const TInt SrcColIdx = Table->GetColIdx(SrcCol);
const TInt DstColIdx = Table->GetColIdx(DstCol);
const TInt NumRows = Table->GetNumValidRows();
const TAttrType NodeType = Table->GetColType(SrcCol);
Assert(NodeType == Table->GetColType(DstCol));
TIntV SrcCol1, EdgeCol1, EdgeCol2, DstCol2;
THash<TInt, TStrIntVH> NodeIntAttrs;
THash<TInt, TStrFltVH> NodeFltAttrs;
THash<TInt, TStrStrVH> NodeStrAttrs;
#pragma omp parallel sections num_threads(4)
{
#pragma omp section
{ SrcCol1.Reserve(NumRows, NumRows); }
#pragma omp section
{ EdgeCol1.Reserve(NumRows, NumRows); }
#pragma omp section
{ DstCol2.Reserve(NumRows, NumRows); }
#pragma omp section
{ EdgeCol2.Reserve(NumRows, NumRows); }
}
Sw->Stop(TStopwatch::AllocateColumnCopies);
Sw->Start(TStopwatch::CopyColumns);
TIntPrV Partitions;
Table->GetPartitionRanges(Partitions, omp_get_max_threads());
TInt PartitionSize = Partitions[0].GetVal2()-Partitions[0].GetVal1()+1;
// double endPartition = omp_get_wtime();
// printf("Partition time = %f\n", endPartition-endResize);
omp_set_num_threads(omp_get_max_threads());
if (NodeType == atInt) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < Partitions.Len(); i++) {
TRowIterator RowI(Partitions[i].GetVal1(), Table());
TRowIterator EndI(Partitions[i].GetVal2(), Table());
while (RowI < EndI) {
TInt RowId = RowI.GetRowIdx();
SrcCol1[RowId] = RowI.GetIntAttr(SrcColIdx);
EdgeCol1[RowId] = RowId;
DstCol2[RowId] = RowI.GetIntAttr(DstColIdx);
EdgeCol2[RowId] = RowId;
RowI++;
}
}
}
else if (NodeType == atStr) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < Partitions.Len(); i++) {
TRowIterator RowI(Partitions[i].GetVal1(), Table());
TRowIterator EndI(Partitions[i].GetVal2(), Table());
while (RowI < EndI) {
TInt RowId = RowI.GetRowIdx();
SrcCol1[RowId] = RowI.GetStrMapById(SrcColIdx);
EdgeCol1[RowId] = RowId;
DstCol2[RowId] = RowI.GetStrMapById(DstColIdx);
EdgeCol2[RowId] = RowId;
RowI++;
}
}
}
Sw->Stop(TStopwatch::CopyColumns);
Sw->Start(TStopwatch::Sort);
omp_set_num_threads(omp_get_max_threads());
#pragma omp parallel
{
#pragma omp single nowait
{
#ifndef GLib_WIN32
#pragma omp task untied shared(SrcCol1, EdgeCol1)
#endif
{ TTable::QSortKeyVal(SrcCol1, EdgeCol1, 0, NumRows-1); }
}
#pragma omp single nowait
{
#ifndef GLib_WIN32
#pragma omp task untied shared(EdgeCol2, DstCol2)
#endif
{ TTable::QSortKeyVal(DstCol2, EdgeCol2, 0, NumRows-1); }
}
#ifndef GLib_WIN32
#pragma omp taskwait
#endif
}
Sw->Stop(TStopwatch::Sort);
Sw->Start(TStopwatch::Group);
TInt NumThreads = omp_get_max_threads();
TInt PartSize = (NumRows/NumThreads);
// Find the offset of all partitions, each of which contains a list of rows.
// Nodes from same sources or destinations are ensured to be kept within same partition.
TIntV SrcOffsets, DstOffsets;
SrcOffsets.Add(0);
for (TInt i = 1; i < NumThreads; i++) {
TInt CurrOffset = i * PartSize;
while (CurrOffset < (i+1) * PartSize &&
SrcCol1[CurrOffset-1] == SrcCol1[CurrOffset]) {
// ensure that rows from the same sources are grouped together
CurrOffset++;
}
if (CurrOffset < (i+1) * PartSize) { SrcOffsets.Add(CurrOffset); }
}
SrcOffsets.Add(NumRows);
DstOffsets.Add(0);
for (TInt i = 1; i < NumThreads; i++) {
TInt CurrOffset = i * PartSize;
while (CurrOffset < (i+1) * PartSize &&
DstCol2[CurrOffset-1] == DstCol2[CurrOffset]) {
// ensure that rows to the same destinations are grouped together
CurrOffset++;
}
if (CurrOffset < (i+1) * PartSize) { DstOffsets.Add(CurrOffset); }
}
DstOffsets.Add(NumRows);
TInt SrcPartCnt = SrcOffsets.Len()-1; // number of partitions
TInt DstPartCnt = DstOffsets.Len()-1; // number of partitions
// count the number of source nodes and destination nodes in each partition
TIntV SrcNodeCounts, DstNodeCounts;
SrcNodeCounts.Reserve(SrcPartCnt, SrcPartCnt);
DstNodeCounts.Reserve(DstPartCnt, DstPartCnt);
#pragma omp parallel for schedule(dynamic)
for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) {
if (t < SrcPartCnt) {
TInt i = t;
if (SrcOffsets[i] != SrcOffsets[i+1]) {
SrcNodeCounts[i] = 1;
TInt CurrNode = SrcCol1[SrcOffsets[i]];
for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) {
while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; }
if (j < SrcOffsets[i+1]) {
SrcNodeCounts[i]++;
CurrNode = SrcCol1[j];
}
}
}
} else {
TInt i = t - SrcPartCnt;
if (DstOffsets[i] != DstOffsets[i+1]) {
DstNodeCounts[i] = 1;
TInt CurrNode = DstCol2[DstOffsets[i]];
for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) {
while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; }
if (j < DstOffsets[i+1]) {
DstNodeCounts[i]++;
CurrNode = DstCol2[j];
}
}
}
}
}
TInt TotalSrcNodes = 0;
TIntV SrcIdOffsets;
for (int i = 0; i < SrcPartCnt; i++) {
SrcIdOffsets.Add(TotalSrcNodes);
TotalSrcNodes += SrcNodeCounts[i];
}
TInt TotalDstNodes = 0;
TIntV DstIdOffsets;
for (int i = 0; i < DstPartCnt; i++) {
DstIdOffsets.Add(TotalDstNodes);
TotalDstNodes += DstNodeCounts[i];
}
// printf("Total Src = %d, Total Dst = %d\n", TotalSrcNodes.Val, TotalDstNodes.Val);
// find vector of (node_id, start_offset) where start_offset is the index of the first row with node_id
TIntPrV SrcNodeIds, DstNodeIds;
#pragma omp parallel sections
{
#pragma omp section
{ SrcNodeIds.Reserve(TotalSrcNodes, TotalSrcNodes); }
#pragma omp section
{ DstNodeIds.Reserve(TotalDstNodes, TotalDstNodes); }
}
// Find the starting offset of each node (in both src and dst)
#pragma omp parallel for schedule(dynamic)
for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) {
if (t < SrcPartCnt) {
TInt i = t;
if (SrcOffsets[i] != SrcOffsets[i+1]) {
TInt CurrNode = SrcCol1[SrcOffsets[i]];
TInt ThreadOffset = SrcIdOffsets[i];
SrcNodeIds[ThreadOffset] = TIntPr(CurrNode, SrcOffsets[i]);
TInt CurrCount = 1;
for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) {
while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; }
if (j < SrcOffsets[i+1]) {
CurrNode = SrcCol1[j];
SrcNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j);
CurrCount++;
}
}
}
} else {
TInt i = t - SrcPartCnt;
if (DstOffsets[i] != DstOffsets[i+1]) {
TInt CurrNode = DstCol2[DstOffsets[i]];
TInt ThreadOffset = DstIdOffsets[i];
DstNodeIds[ThreadOffset] = TIntPr(CurrNode, DstOffsets[i]);
TInt CurrCount = 1;
for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) {
while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; }
if (j < DstOffsets[i+1]) {
CurrNode = DstCol2[j];
DstNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j);
CurrCount++;
}
}
}
}
}
Sw->Stop(TStopwatch::Group);
Sw->Start(TStopwatch::MergeNeighborhoods);
// Find the combined neighborhood (both out-neighbors and in-neighbors) of each node
TIntTrV Nodes;
Nodes.Reserve(TotalSrcNodes+TotalDstNodes);
TInt i = 0, j = 0;
while (i < TotalSrcNodes && j < TotalDstNodes) {
if (SrcNodeIds[i].Val1 == DstNodeIds[j].Val1) {
Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, j));
i++;
j++;
} else if (SrcNodeIds[i].Val1 < DstNodeIds[j].Val1) {
Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1));
i++;
} else {
Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j));
j++;
}
}
for (; i < TotalSrcNodes; i++) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); }
for (; j < TotalDstNodes; j++) { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); }
Sw->Stop(TStopwatch::MergeNeighborhoods);
Sw->Start(TStopwatch::AddNeighborhoods);
TInt NumNodes = Nodes.Len();
PGraphMP Graph = PGraphMP::TObj::New(NumNodes, NumRows);
// NumThreads = omp_get_max_threads();
// int Delta = (NumNodes+NumThreads-1)/NumThreads;
TVec<TIntV> InVV(NumNodes);
TVec<TIntV> OutVV(NumNodes);
// omp_set_num_threads(NumThreads);
#pragma omp parallel for schedule(static,100)
for (int m = 0; m < NumNodes; m++) {
//double startTr = omp_get_wtime();
//TIntV OutV, InV;
TInt n, i, j;
Nodes[m].GetVal(n, i, j);
if (i >= 0) {
TInt Offset = SrcNodeIds[i].GetVal2();
TInt Sz = EdgeCol1.Len()-Offset;
if (i < SrcNodeIds.Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; }
OutVV[m].Reserve(Sz);
OutVV[m].CopyUniqueFrom(EdgeCol1, Offset, Sz);
}
if (j >= 0) {
TInt Offset = DstNodeIds[j].GetVal2();
TInt Sz = EdgeCol2.Len()-Offset;
if (j < DstNodeIds.Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; }
InVV[m].Reserve(Sz);
InVV[m].CopyUniqueFrom(EdgeCol2, Offset, Sz);
}
Graph->AddNodeWithEdges(n, InVV[m], OutVV[m]);
}
Graph->SetNodes(NumNodes);
Sw->Stop(TStopwatch::AddNeighborhoods);
Sw->Start(TStopwatch::AddEdges);
omp_set_num_threads(omp_get_max_threads());
if (NodeType == atInt) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < Partitions.Len(); i++) {
TRowIterator RowI(Partitions[i].GetVal1(), Table());
TRowIterator EndI(Partitions[i].GetVal2(), Table());
while (RowI < EndI) {
TInt RowId = RowI.GetRowIdx(); // EdgeId
TInt SrcId = RowI.GetIntAttr(SrcColIdx);
TInt DstId = RowI.GetIntAttr(DstColIdx);
Graph->AddEdgeUnchecked(RowId, SrcId, DstId);
RowI++;
for (TInt ea_i = 0; ea_i < EdgeAttrV.Len(); ea_i++) {
TStr ColName = EdgeAttrV[ea_i];
TAttrType T = Table->GetColType(ColName);
TInt Index = Table->GetColIdx(ColName);
switch (T) {
case atInt:
Graph->AddIntAttrDatE(RowId, Table->IntCols[Index][RowId], ColName);
break;
case atFlt:
Graph->AddFltAttrDatE(RowId, Table->FltCols[Index][RowId], ColName);
break;
case atStr:
Graph->AddStrAttrDatE(RowId, Table->GetStrVal(Index, RowId), ColName);
break;
}
}
if ((Table->SrcNodeAttrV).Len() > 0) {
Table->AddNodeAttributes(SrcId, Table->SrcNodeAttrV, RowId, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs);
}
if ((Table->DstNodeAttrV).Len() > 0) {
Table->AddNodeAttributes(SrcId, Table->DstNodeAttrV, RowId, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs);
}
}
}
}
else if (NodeType == atStr) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < Partitions.Len(); i++) {
TRowIterator RowI(Partitions[i].GetVal1(), Table());
TRowIterator EndI(Partitions[i].GetVal2(), Table());
while (RowI < EndI) {
TInt RowId = RowI.GetRowIdx(); // EdgeId
TInt SrcId = RowI.GetStrMapById(SrcColIdx);
TInt DstId = RowI.GetStrMapById(DstColIdx);
Graph->AddEdgeUnchecked(RowId, SrcId, DstId);
RowI++;
for (TInt ea_i = 0; ea_i < EdgeAttrV.Len(); ea_i++) {
TStr ColName = EdgeAttrV[ea_i];
TAttrType T = Table->GetColType(ColName);
TInt Index = Table->GetColIdx(ColName);
switch (T) {
case atInt:
Graph->AddIntAttrDatE(RowId, Table->IntCols[Index][RowId], ColName);
break;
case atFlt:
Graph->AddFltAttrDatE(RowId, Table->FltCols[Index][RowId], ColName);
break;
case atStr:
Graph->AddStrAttrDatE(RowId, Table->GetStrVal(Index, RowId), ColName);
break;
}
}
if ((Table->SrcNodeAttrV).Len() > 0) {
Table->AddNodeAttributes(SrcId, Table->SrcNodeAttrV, RowId, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs);
}
if ((Table->DstNodeAttrV).Len() > 0) {
Table->AddNodeAttributes(SrcId, Table->DstNodeAttrV, RowId, NodeIntAttrs, NodeFltAttrs, NodeStrAttrs);
}
}
}
}
// aggregate node attributes and add to graph
if ((Table->SrcNodeAttrV).Len() > 0 || (Table->DstNodeAttrV).Len() > 0) {
for (typename PGraphMP::TObj::TNodeI NodeI = Graph->BegNI(); NodeI < Graph->EndNI(); NodeI++) {
TInt NId = NodeI.GetId();
if (NodeIntAttrs.IsKey(NId)) {
TStrIntVH IntAttrVals = NodeIntAttrs.GetDat(NId);
for (TStrIntVH::TIter it = IntAttrVals.BegI(); it < IntAttrVals.EndI(); it++) {
TInt AttrVal = Table->AggregateVector<TInt>(it.GetDat(), AggrPolicy);
Graph->AddIntAttrDatN(NId, AttrVal, it.GetKey());
}
}
if (NodeFltAttrs.IsKey(NId)) {
TStrFltVH FltAttrVals = NodeFltAttrs.GetDat(NId);
for (TStrFltVH::TIter it = FltAttrVals.BegI(); it < FltAttrVals.EndI(); it++) {
TFlt AttrVal = Table->AggregateVector<TFlt>(it.GetDat(), AggrPolicy);
Graph->AddFltAttrDatN(NId, AttrVal, it.GetKey());
}
}
if (NodeStrAttrs.IsKey(NId)) {
TStrStrVH StrAttrVals = NodeStrAttrs.GetDat(NId);
for (TStrStrVH::TIter it = StrAttrVals.BegI(); it < StrAttrVals.EndI(); it++) {
TStr AttrVal = Table->AggregateVector<TStr>(it.GetDat(), AggrPolicy);
Graph->AddStrAttrDatN(NId, AttrVal, it.GetKey());
}
}
}
}
Graph->SetEdges(NumRows);
Sw->Stop(TStopwatch::AddEdges);
// double endAdd = omp_get_wtime();
// printf("Add time = %f\n", endAdd-endAlloc);
return Graph;
}
/// Calls ToNetworkMP with empty attribute vector. Convenience wrapper.
template<class PGraphMP>
PGraphMP ToNetworkMP(PTable Table,
const TStr& SrcCol, const TStr& DstCol, TAttrAggr AggrPolicy)
{
TStrV V;
return ToNetworkMP<PGraphMP>(Table, SrcCol, DstCol, V,AggrPolicy);
}
///Implements table to network conversion in parallel. Not the recommended algorithm, using ToNetworkMP instead.
template<class PGraphMP>
inline PGraphMP ToNetworkMP2(PTable Table,
const TStr& SrcCol, const TStr& DstCol,
TStrV& SrcAttrV, TStrV& DstAttrV, TStrV& EdgeAttrV,
TAttrAggr AggrPolicy) {
TStopwatch* Sw = TStopwatch::GetInstance();
Sw->Start(TStopwatch::AllocateColumnCopies);
const TInt SrcColIdx = Table->GetColIdx(SrcCol);
const TInt DstColIdx = Table->GetColIdx(DstCol);
const TInt NumRows = Table->NumValidRows;
const TAttrType NodeType = Table->GetColType(SrcCol);
Assert(NodeType == Table->GetColType(DstCol));
TIntV SrcCol1, EdgeCol1, EdgeCol2, DstCol2;
#pragma omp parallel sections num_threads(4)
{
#pragma omp section
{ SrcCol1.Reserve(NumRows, NumRows); }
#pragma omp section
{ EdgeCol1.Reserve(NumRows, NumRows); }
#pragma omp section
{ DstCol2.Reserve(NumRows, NumRows); }
#pragma omp section
{ EdgeCol2.Reserve(NumRows, NumRows); }
}
Sw->Stop(TStopwatch::AllocateColumnCopies);
Sw->Start(TStopwatch::CopyColumns);
TIntPrV Partitions;
// int NThreads = omp_get_max_threads();
const int NThreads = 40;
Table->GetPartitionRanges(Partitions, NThreads);
TInt PartitionSize = Partitions[0].GetVal2()-Partitions[0].GetVal1()+1;
// double endPartition = omp_get_wtime();
// printf("Partition time = %f\n", endPartition-endResize);
if (NodeType == atInt) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < Partitions.Len(); i++) {
TRowIterator RowI(Partitions[i].GetVal1(), Table());
TRowIterator EndI(Partitions[i].GetVal2(), Table());
while (RowI < EndI) {
TInt RowId = RowI.GetRowIdx();
SrcCol1[RowId] = RowI.GetIntAttr(SrcColIdx);
EdgeCol1[RowId] = RowId;
DstCol2[RowId] = RowI.GetIntAttr(DstColIdx);
EdgeCol2[RowId] = RowId;
RowI++;
}
}
}
else if (NodeType == atStr) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < Partitions.Len(); i++) {
TRowIterator RowI(Partitions[i].GetVal1(), Table());
TRowIterator EndI(Partitions[i].GetVal2(), Table());
while (RowI < EndI) {
TInt RowId = RowI.GetRowIdx();
SrcCol1[RowId] = RowI.GetStrMapById(SrcColIdx);
EdgeCol1[RowId] = RowId;
DstCol2[RowId] = RowI.GetStrMapById(DstColIdx);
EdgeCol2[RowId] = RowId;
RowI++;
}
}
}
// printf("NumRows = %d\n", NumRows.Val);
// printf("NThreads = %d\n", NThreads);
// for (int i = 0; i < Partitions.Len(); i++) {
// printf("Partition %d %d->%d\n", i, Partitions[i].GetVal1().Val, Partitions[i].GetVal2().Val);
// }
int Parts[NThreads+1];
for (int i = 0; i < NThreads; i++) {
Parts[i] = NumRows.Val / NThreads * i;
}
Parts[NThreads] = NumRows;
// for (int i = 0; i < NThreads+1; i++) {
// printf("Parts[%d] = %d\n", i, Parts[i]);
// }
Sw->Stop(TStopwatch::CopyColumns);
Sw->Start(TStopwatch::Sort);
TInt ExtremePoints[4][NThreads];
omp_set_num_threads(omp_get_max_threads());
#pragma omp parallel
{
#pragma omp for schedule(static) nowait
for (int i = 0; i < NThreads; i++) {
TInt StartPos = Parts[i];
TInt EndPos = Parts[i+1]-1;
// TODO: Handle empty partition
TTable::QSortKeyVal(SrcCol1, EdgeCol1, StartPos, EndPos);
ExtremePoints[0][i] = SrcCol1[StartPos];
ExtremePoints[2][i] = SrcCol1[EndPos];
}
#pragma omp for schedule(static) nowait
for (int i = 0; i < NThreads; i++) {
TInt StartPos = Parts[i];
TInt EndPos = Parts[i+1]-1;
// TODO: Handle empty partition
TTable::QSortKeyVal(DstCol2, EdgeCol2, StartPos, EndPos);
ExtremePoints[1][i] = DstCol2[StartPos];
ExtremePoints[3][i] = DstCol2[EndPos];
}
}
// for (int i = 0; i < NThreads; i++) {
// printf("ExtremePoints[%d] = %d-%d -> %d-%d\n", i, ExtremePoints[0][i].Val, ExtremePoints[1][i].Val, ExtremePoints[2][i].Val, ExtremePoints[3][i].Val);
// }
// find min points
TInt MinId(INT_MAX);
for (int j = 0; j < 2; j++) {
for (int i = 0; i < NThreads; i++) {
if (MinId > ExtremePoints[j][i]) { MinId = ExtremePoints[j][i]; }
}
}
TInt MaxId(-1);
for (int j = 2; j < 4; j++) {
for (int i = 0; i < NThreads; i++) {
if (MaxId < ExtremePoints[j][i]) { MaxId = ExtremePoints[j][i]; }
}
}
// printf("MinId = %d\n", MinId.Val);
// printf("MaxId = %d\n", MaxId.Val);
Sw->Stop(TStopwatch::Sort);
Sw->Start(TStopwatch::Group);
// const int NumCollectors = omp_get_max_threads();
const int NumCollectors = 20;
int Range = MaxId.Val - MinId.Val;
TIntV IdRanges(NumCollectors+1);
for (int j = 0; j < NumCollectors; j++) {
IdRanges[j] = MinId + Range/NumCollectors*j;
}
IdRanges[NumCollectors] = MaxId+1;
// for (int i = 0; i < NumCollectors+1; i++) {
// printf("IdRanges[%d] = %d\n", i, IdRanges[i].Val);
// }
int SrcOffsets[NThreads][NumCollectors+1];
#pragma omp parallel for schedule(static)
for (int i = 0; i < NThreads; i++) {
int CollectorId = 0;
for (int j = Parts[i]; j < Parts[i+1]; j++) {
while (SrcCol1[j] >= IdRanges[CollectorId]) {
SrcOffsets[i][CollectorId++] = j;
}
}
while (CollectorId <= NumCollectors) {
SrcOffsets[i][CollectorId++] = Parts[i+1];
}
}
int DstOffsets[NThreads][NumCollectors+1];
#pragma omp parallel for schedule(static)
for (int i = 0; i < NThreads; i++) {
int CollectorId = 0;
for (int j = Parts[i]; j < Parts[i+1]; j++) {
while (DstCol2[j] >= IdRanges[CollectorId]) {
DstOffsets[i][CollectorId++] = j;
}
}
while (CollectorId <= NumCollectors) {
DstOffsets[i][CollectorId++] = Parts[i+1];
}
}
// for (int i = 0; i < NThreads; i++) {
// for (int j = 0; j < NumCollectors+1; j++) {
// printf("SrcOffsets[%d][%d] = %d\n", i, j, SrcOffsets[i][j]);
// }
// }
// for (int i = 0; i < NThreads; i++) {
// for (int j = 0; j < NumCollectors+1; j++) {
// printf("DstOffsets[%d][%d] = %d\n", i, j, DstOffsets[i][j]);
// }
// }
TIntV SrcCollectorOffsets(NumCollectors+1);
SrcCollectorOffsets[0] = 0;
for (int k = 0; k < NumCollectors; k++) {
int SumOffset = 0;
for (int i = 0; i < NThreads; i++) {
SumOffset += SrcOffsets[i][k+1] - SrcOffsets[i][k];
}
SrcCollectorOffsets[k+1] = SrcCollectorOffsets[k] + SumOffset;
}
TIntV DstCollectorOffsets(NumCollectors+1);
DstCollectorOffsets[0] = 0;
for (int k = 0; k < NumCollectors; k++) {
int SumOffset = 0;
for (int i = 0; i < NThreads; i++) {
SumOffset += DstOffsets[i][k+1] - DstOffsets[i][k];
}
DstCollectorOffsets[k+1] = DstCollectorOffsets[k] + SumOffset;
}
// for (int i = 0; i < NumCollectors+1; i++) {
// printf("SrcCollectorOffsets[%d] = %d\n", i, SrcCollectorOffsets[i].Val);
// }
// for (int i = 0; i < NumCollectors+1; i++) {
// printf("DstCollectorOffsets[%d] = %d\n", i, DstCollectorOffsets[i].Val);
// }
TIntV SrcCol3, EdgeCol3, EdgeCol4, DstCol4;
#pragma omp parallel sections num_threads(4)
{
#pragma omp section
{ SrcCol3.Reserve(NumRows, NumRows); }
#pragma omp section
{ EdgeCol3.Reserve(NumRows, NumRows); }
#pragma omp section
{ DstCol4.Reserve(NumRows, NumRows); }
#pragma omp section
{ EdgeCol4.Reserve(NumRows, NumRows); }
}
TIntV SrcNodeCounts(NumCollectors), DstNodeCounts(NumCollectors);
#pragma omp parallel for schedule(static)
for (int k = 0; k < NumCollectors; k++) {
int ind = SrcCollectorOffsets[k];
for (int i = 0; i < NThreads; i++) {
for (int j = SrcOffsets[i][k]; j < SrcOffsets[i][k+1]; j++) {
SrcCol3[ind] = SrcCol1[j];
EdgeCol3[ind] = EdgeCol1[j];
ind++;
}
}
TTable::QSortKeyVal(SrcCol3, EdgeCol3, SrcCollectorOffsets[k], SrcCollectorOffsets[k+1]-1);
int SrcCount = 0;
if (SrcCollectorOffsets[k+1] > SrcCollectorOffsets[k]) {
SrcCount = 1;
for (int j = SrcCollectorOffsets[k]+1; j < SrcCollectorOffsets[k+1]; j++) {
if (SrcCol3[j] != SrcCol3[j-1]) { SrcCount++; }
}
}
SrcNodeCounts[k] = SrcCount;
ind = DstCollectorOffsets[k];
for (int i = 0; i < NThreads; i++) {
for (int j = DstOffsets[i][k]; j < DstOffsets[i][k+1]; j++) {
DstCol4[ind] = DstCol2[j];
EdgeCol4[ind] = EdgeCol2[j];
ind++;
}
}
TTable::QSortKeyVal(DstCol4, EdgeCol4, DstCollectorOffsets[k], DstCollectorOffsets[k+1]-1);
int DstCount = 0;
if (DstCollectorOffsets[k+1] > DstCollectorOffsets[k]) {
DstCount = 1;
for (int j = DstCollectorOffsets[k]+1; j < DstCollectorOffsets[k+1]; j++) {
if (DstCol4[j] != DstCol4[j-1]) { DstCount++; }
}
}
DstNodeCounts[k] = DstCount;
}
TInt TotalSrcNodes = 0;
TIntV SrcIdOffsets;
for (int i = 0; i < NumCollectors; i++) {
SrcIdOffsets.Add(TotalSrcNodes);
TotalSrcNodes += SrcNodeCounts[i];
}
// printf("Sorted = %d - %d\n", SrcCol3.IsSorted(), DstCol4.IsSorted());
// for (int i = 0; i < NumRows-1; i++) {
// if (SrcCol3[i] > SrcCol3[i+1]) { printf("i=%d: %d %d\n", i, SrcCol3[i].Val, SrcCol3[i+1].Val); }
// }
// for (int i = 0; i < NumRows-1; i++) {
// if (DstCol4[i] > DstCol4[i+1]) { printf("i=%d: %d %d\n", i, DstCol4[i].Val, DstCol4[i+1].Val); }
// }
TInt TotalDstNodes = 0;
TIntV DstIdOffsets;
for (int i = 0; i < NumCollectors; i++) {
DstIdOffsets.Add(TotalDstNodes);
TotalDstNodes += DstNodeCounts[i];
}
// find vector of (node_id, start_offset) where start_offset is the index of the first row with node_id
TIntPrV SrcNodeIds, DstNodeIds;
#pragma omp parallel sections
{
#pragma omp section
{ SrcNodeIds.Reserve(TotalSrcNodes, TotalSrcNodes); }
#pragma omp section
{ DstNodeIds.Reserve(TotalDstNodes, TotalDstNodes); }
}
// Find the starting offset of each node (in both src and dst)
#pragma omp parallel for schedule(dynamic)
for (int t = 0; t < 2*NumCollectors; t++) {
if (t < NumCollectors) {
TInt i = t;
if (SrcCollectorOffsets[i] < SrcCollectorOffsets[i+1]) {
TInt CurrNode = SrcCol3[SrcCollectorOffsets[i]];
TInt ThreadOffset = SrcIdOffsets[i];
SrcNodeIds[ThreadOffset] = TIntPr(CurrNode, SrcCollectorOffsets[i]);
TInt CurrCount = 1;
for (TInt j = SrcCollectorOffsets[i]+1; j < SrcCollectorOffsets[i+1]; j++) {
while (j < SrcCollectorOffsets[i+1] && SrcCol3[j] == CurrNode) { j++; }
if (j < SrcCollectorOffsets[i+1]) {
CurrNode = SrcCol3[j];
SrcNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j);
CurrCount++;
}
}
}
} else {
TInt i = t - NumCollectors;
if (DstCollectorOffsets[i] < DstCollectorOffsets[i+1]) {
TInt CurrNode = DstCol4[DstCollectorOffsets[i]];
TInt ThreadOffset = DstIdOffsets[i];
DstNodeIds[ThreadOffset] = TIntPr(CurrNode, DstCollectorOffsets[i]);
TInt CurrCount = 1;
for (TInt j = DstCollectorOffsets[i]+1; j < DstCollectorOffsets[i+1]; j++) {
while (j < DstCollectorOffsets[i+1] && DstCol4[j] == CurrNode) { j++; }
if (j < DstCollectorOffsets[i+1]) {
CurrNode = DstCol4[j];
DstNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j);
CurrCount++;
}
}
}
}
}
Sw->Stop(TStopwatch::Group);
Sw->Start(TStopwatch::MergeNeighborhoods);
// Find the combined neighborhood (both out-neighbors and in-neighbors) of each node
TIntTrV Nodes;
Nodes.Reserve(TotalSrcNodes+TotalDstNodes);
TInt i = 0, j = 0;
while (i < TotalSrcNodes && j < TotalDstNodes) {
if (SrcNodeIds[i].Val1 == DstNodeIds[j].Val1) {
Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, j));
i++;
j++;
} else if (SrcNodeIds[i].Val1 < DstNodeIds[j].Val1) {
Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1));
i++;
} else {
Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j));
j++;
}
}
for (; i < TotalSrcNodes; i++) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); }
for (; j < TotalDstNodes; j++) { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); }
Sw->Stop(TStopwatch::MergeNeighborhoods);
Sw->Start(TStopwatch::AddNeighborhoods);
TInt NumNodes = Nodes.Len();
PGraphMP Graph = PGraphMP::TObj::New(NumNodes, NumRows);
// NumThreads = omp_get_max_threads();
// int Delta = (NumNodes+NumThreads-1)/NumThreads;
TVec<TIntV> InVV(NumNodes);
TVec<TIntV> OutVV(NumNodes);
// omp_set_num_threads(NumThreads);
#pragma omp parallel for schedule(static,100)
for (int m = 0; m < NumNodes; m++) {
//double startTr = omp_get_wtime();
//TIntV OutV, InV;
TInt n, i, j;
Nodes[m].GetVal(n, i, j);
if (i >= 0) {
TInt Offset = SrcNodeIds[i].GetVal2();
TInt Sz = EdgeCol3.Len()-Offset;
if (i < SrcNodeIds.Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; }
OutVV[m].Reserve(Sz);
OutVV[m].CopyUniqueFrom(EdgeCol3, Offset, Sz);
}
if (j >= 0) {
TInt Offset = DstNodeIds[j].GetVal2();
TInt Sz = EdgeCol4.Len()-Offset;
if (j < DstNodeIds.Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; }
InVV[m].Reserve(Sz);
InVV[m].CopyUniqueFrom(EdgeCol4, Offset, Sz);
}
Graph->AddNodeWithEdges(n, InVV[m], OutVV[m]);
}
Graph->SetNodes(NumNodes);
Sw->Stop(TStopwatch::AddNeighborhoods);
Sw->Start(TStopwatch::AddEdges);
omp_set_num_threads(omp_get_max_threads());
if (NodeType == atInt) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < Partitions.Len(); i++) {
TRowIterator RowI(Partitions[i].GetVal1(), Table());
TRowIterator EndI(Partitions[i].GetVal2(), Table());
while (RowI < EndI) {
TInt RowId = RowI.GetRowIdx(); // EdgeId
TInt SrcId = RowI.GetIntAttr(SrcColIdx);
TInt DstId = RowI.GetIntAttr(DstColIdx);
Graph->AddEdgeUnchecked(RowId, SrcId, DstId);
RowI++;
}
}
}
else if (NodeType == atStr) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < Partitions.Len(); i++) {
TRowIterator RowI(Partitions[i].GetVal1(), Table());
TRowIterator EndI(Partitions[i].GetVal2(), Table());
while (RowI < EndI) {
TInt RowId = RowI.GetRowIdx(); // EdgeId
TInt SrcId = RowI.GetStrMapById(SrcColIdx);
TInt DstId = RowI.GetStrMapById(DstColIdx);
Graph->AddEdgeUnchecked(RowId, SrcId, DstId);
RowI++;
}
}
}
Graph->SetEdges(NumRows);
Sw->Stop(TStopwatch::AddEdges);
// double endAdd = omp_get_wtime();
// printf("Add time = %f\n", endAdd-endAlloc);
return Graph;
}
/// Calls ToNetworkMP2 with an empty attribute vector. Convenience wrapper.
template<class PGraphMP>
PGraphMP ToNetworkMP2(PTable Table,
const TStr& SrcCol, const TStr& DstCol, TAttrAggr AggrPolicy)
{
TStrV V;
return ToNetworkMP2<PGraphMP>(Table, SrcCol, DstCol, V, V, V, AggrPolicy);
}
#endif // GCC_ATOMIC
/// Loads a mode, with name Name, into the PMMNet from the TTable. NCol specifies the node id column and NodeAttrV the node attributes.
int LoadModeNetToNet(PMMNet Graph, const TStr& Name, PTable Table, const TStr& NCol,
TStrV& NodeAttrV);
/// Loads the nodes specified in column NCol from the TTable with the attributes specified in NodeAttrV.
int LoadMode(TModeNet& Graph, PTable Table, const TStr& NCol,
TStrV& NodeAttrV);
/// Loads a crossnet from Mode1 to Mode2, with name CrossName, from the provided TTable. EdgeAttrV specifies edge attributes.
int LoadCrossNetToNet(PMMNet Graph, const TStr& Mode1, const TStr& Mode2, const TStr& CrossName,
PTable Table, const TStr& SrcCol, const TStr& DstCol, TStrV& EdgeAttrV);
/// Loads the edges from the TTable and EdgeAttrV specifies columns containing edge attributes.
int LoadCrossNet(TCrossNet& Graph, PTable Table, const TStr& SrcCol, const TStr& DstCol,
TStrV& EdgeAttrV);
/// Converts table to a network sequentially. Use if network has only edge attributes.
template<class PGraph>
PGraph ToNetwork(PTable Table,
const TStr& SrcCol, const TStr& DstCol,
TStrV& EdgeAttrV,
TAttrAggr AggrPolicy) {
PGraph Graph = PGraph::TObj::New();
const TAttrType NodeType = Table->GetColType(SrcCol);
Assert(NodeType == Table->GetColType(DstCol));
const TInt SrcColIdx = Table->GetColIdx(SrcCol);
const TInt DstColIdx = Table->GetColIdx(DstCol);
//Table->AddGraphAttributeV(SrcAttrV, false, true, false);
//Table->AddGraphAttributeV(DstAttrV, false, false, true);
//Table->AddGraphAttributeV(EdgeAttrV, true, false, true);
// node values - i.e. the unique values of src/dst col
//THashSet<TInt> IntNodeVals; // for both int and string node attr types.
THash<TFlt, TInt> FltNodeVals;
// make single pass over all rows in the table
for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) {
if ((Table->Next)[CurrRowIdx] == Table->Invalid) {
continue;
}
// add src and dst nodes to graph if they are not seen earlier
TInt SVal, DVal;
if (NodeType == atFlt) {
TFlt FSVal = (Table->FltCols)[SrcColIdx][CurrRowIdx];
SVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FSVal);
TFlt FDVal = (Table->FltCols)[SrcColIdx][CurrRowIdx];
DVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FDVal);
}
else if (NodeType == atInt || NodeType == atStr) {
if (NodeType == atInt) {
SVal = (Table->IntCols)[SrcColIdx][CurrRowIdx];
DVal = (Table->IntCols)[DstColIdx][CurrRowIdx];
}
else {
SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx];
// if (strlen(Table->GetContextKey(SVal)) == 0) { continue; } //illegal value
DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx];
// if (strlen(Table->GetContextKey(DVal)) == 0) { continue; } //illegal value
}
if (!Graph->IsNode(SVal)) {Graph->AddNode(SVal); }
if (!Graph->IsNode(DVal)) {Graph->AddNode(DVal); }
//CheckAndAddIntNode(Graph, IntNodeVals, SVal);
//CheckAndAddIntNode(Graph, IntNodeVals, DVal);
}
// add edge and edge attributes
Graph->AddEdge(SVal, DVal, CurrRowIdx);
// Aggregate edge attributes and add to graph
for (TInt i = 0; i < EdgeAttrV.Len(); i++) {
TStr ColName = EdgeAttrV[i];
TAttrType T = Table->GetColType(ColName);
TInt Index = Table->GetColIdx(ColName);
switch (T) {
case atInt:
Graph->AddIntAttrDatE(CurrRowIdx, Table->IntCols[Index][CurrRowIdx], ColName);
break;
case atFlt:
Graph->AddFltAttrDatE(CurrRowIdx, Table->FltCols[Index][CurrRowIdx], ColName);
break;
case atStr:
Graph->AddStrAttrDatE(CurrRowIdx, Table->GetStrVal(Index, CurrRowIdx), ColName);
break;
}
}
}
return Graph;
}
/// Converts table to network in parallel. Use if network has only edge attributes.
template<class PGraphMP>
inline PGraphMP ToNetworkMP(PTable Table,
const TStr& SrcCol, const TStr& DstCol,
TStrV& EdgeAttrV,
TAttrAggr AggrPolicy) {
TStopwatch* Sw = TStopwatch::GetInstance();
Sw->Start(TStopwatch::AllocateColumnCopies);
const TInt SrcColIdx = Table->GetColIdx(SrcCol);
const TInt DstColIdx = Table->GetColIdx(DstCol);
const TInt NumRows = Table->GetNumValidRows();
const TAttrType NodeType = Table->GetColType(SrcCol);
Assert(NodeType == Table->GetColType(DstCol));
TIntV SrcCol1, EdgeCol1, EdgeCol2, DstCol2;
THash<TInt, TStrIntVH> NodeIntAttrs;
THash<TInt, TStrFltVH> NodeFltAttrs;
THash<TInt, TStrStrVH> NodeStrAttrs;
#pragma omp parallel sections num_threads(4)
{
#pragma omp section
{ SrcCol1.Reserve(NumRows, NumRows); }
#pragma omp section
{ EdgeCol1.Reserve(NumRows, NumRows); }
#pragma omp section
{ DstCol2.Reserve(NumRows, NumRows); }
#pragma omp section
{ EdgeCol2.Reserve(NumRows, NumRows); }
}
Sw->Stop(TStopwatch::AllocateColumnCopies);
Sw->Start(TStopwatch::CopyColumns);
TIntPrV Partitions;
Table->GetPartitionRanges(Partitions, omp_get_max_threads());
TInt PartitionSize = Partitions[0].GetVal2()-Partitions[0].GetVal1()+1;
// double endPartition = omp_get_wtime();
// printf("Partition time = %f\n", endPartition-endResize);
omp_set_num_threads(omp_get_max_threads());
if (NodeType == atInt) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < Partitions.Len(); i++) {
TRowIterator RowI(Partitions[i].GetVal1(), Table());
TRowIterator EndI(Partitions[i].GetVal2(), Table());
while (RowI < EndI) {
TInt RowId = RowI.GetRowIdx();
SrcCol1[RowId] = RowI.GetIntAttr(SrcColIdx);
EdgeCol1[RowId] = RowId;
DstCol2[RowId] = RowI.GetIntAttr(DstColIdx);
EdgeCol2[RowId] = RowId;
RowI++;
}
}
}
else if (NodeType == atStr) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < Partitions.Len(); i++) {
TRowIterator RowI(Partitions[i].GetVal1(), Table());
TRowIterator EndI(Partitions[i].GetVal2(), Table());
while (RowI < EndI) {
TInt RowId = RowI.GetRowIdx();
SrcCol1[RowId] = RowI.GetStrMapById(SrcColIdx);
EdgeCol1[RowId] = RowId;
DstCol2[RowId] = RowI.GetStrMapById(DstColIdx);
EdgeCol2[RowId] = RowId;
RowI++;
}
}
}
Sw->Stop(TStopwatch::CopyColumns);
Sw->Start(TStopwatch::Sort);
omp_set_num_threads(omp_get_max_threads());
#pragma omp parallel
{
#pragma omp single nowait
{
#ifndef GLib_WIN32
#pragma omp task untied shared(SrcCol1, EdgeCol1)
#endif
{ TTable::QSortKeyVal(SrcCol1, EdgeCol1, 0, NumRows-1); }
}
#pragma omp single nowait
{
#ifndef GLib_WIN32
#pragma omp task untied shared(EdgeCol2, DstCol2)
#endif
{ TTable::QSortKeyVal(DstCol2, EdgeCol2, 0, NumRows-1); }
}
#ifndef GLib_WIN32
#pragma omp taskwait
#endif
}
Sw->Stop(TStopwatch::Sort);
Sw->Start(TStopwatch::Group);
TInt NumThreads = omp_get_max_threads();
TInt PartSize = (NumRows/NumThreads);
// Find the offset of all partitions, each of which contains a list of rows.
// Nodes from same sources or destinations are ensured to be kept within same partition.
TIntV SrcOffsets, DstOffsets;
SrcOffsets.Add(0);
for (TInt i = 1; i < NumThreads; i++) {
TInt CurrOffset = i * PartSize;
while (CurrOffset < (i+1) * PartSize &&
SrcCol1[CurrOffset-1] == SrcCol1[CurrOffset]) {
// ensure that rows from the same sources are grouped together
CurrOffset++;
}
if (CurrOffset < (i+1) * PartSize) { SrcOffsets.Add(CurrOffset); }
}
SrcOffsets.Add(NumRows);
DstOffsets.Add(0);
for (TInt i = 1; i < NumThreads; i++) {
TInt CurrOffset = i * PartSize;
while (CurrOffset < (i+1) * PartSize &&
DstCol2[CurrOffset-1] == DstCol2[CurrOffset]) {
// ensure that rows to the same destinations are grouped together
CurrOffset++;
}
if (CurrOffset < (i+1) * PartSize) { DstOffsets.Add(CurrOffset); }
}
DstOffsets.Add(NumRows);
TInt SrcPartCnt = SrcOffsets.Len()-1; // number of partitions
TInt DstPartCnt = DstOffsets.Len()-1; // number of partitions
// count the number of source nodes and destination nodes in each partition
TIntV SrcNodeCounts, DstNodeCounts;
SrcNodeCounts.Reserve(SrcPartCnt, SrcPartCnt);
DstNodeCounts.Reserve(DstPartCnt, DstPartCnt);
#pragma omp parallel for schedule(dynamic)
for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) {
if (t < SrcPartCnt) {
TInt i = t;
if (SrcOffsets[i] != SrcOffsets[i+1]) {
SrcNodeCounts[i] = 1;
TInt CurrNode = SrcCol1[SrcOffsets[i]];
for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) {
while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; }
if (j < SrcOffsets[i+1]) {
SrcNodeCounts[i]++;
CurrNode = SrcCol1[j];
}
}
}
} else {
TInt i = t - SrcPartCnt;
if (DstOffsets[i] != DstOffsets[i+1]) {
DstNodeCounts[i] = 1;
TInt CurrNode = DstCol2[DstOffsets[i]];
for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) {
while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; }
if (j < DstOffsets[i+1]) {
DstNodeCounts[i]++;
CurrNode = DstCol2[j];
}
}
}
}
}
TInt TotalSrcNodes = 0;
TIntV SrcIdOffsets;
for (int i = 0; i < SrcPartCnt; i++) {
SrcIdOffsets.Add(TotalSrcNodes);
TotalSrcNodes += SrcNodeCounts[i];
}
TInt TotalDstNodes = 0;
TIntV DstIdOffsets;
for (int i = 0; i < DstPartCnt; i++) {
DstIdOffsets.Add(TotalDstNodes);
TotalDstNodes += DstNodeCounts[i];
}
// printf("Total Src = %d, Total Dst = %d\n", TotalSrcNodes.Val, TotalDstNodes.Val);
// find vector of (node_id, start_offset) where start_offset is the index of the first row with node_id
TIntPrV SrcNodeIds, DstNodeIds;
#pragma omp parallel sections
{
#pragma omp section
{ SrcNodeIds.Reserve(TotalSrcNodes, TotalSrcNodes); }
#pragma omp section
{ DstNodeIds.Reserve(TotalDstNodes, TotalDstNodes); }
}
// Find the starting offset of each node (in both src and dst)
#pragma omp parallel for schedule(dynamic)
for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) {
if (t < SrcPartCnt) {
TInt i = t;
if (SrcOffsets[i] != SrcOffsets[i+1]) {
TInt CurrNode = SrcCol1[SrcOffsets[i]];
TInt ThreadOffset = SrcIdOffsets[i];
SrcNodeIds[ThreadOffset] = TIntPr(CurrNode, SrcOffsets[i]);
TInt CurrCount = 1;
for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) {
while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; }
if (j < SrcOffsets[i+1]) {
CurrNode = SrcCol1[j];
SrcNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j);
CurrCount++;
}
}
}
} else {
TInt i = t - SrcPartCnt;
if (DstOffsets[i] != DstOffsets[i+1]) {
TInt CurrNode = DstCol2[DstOffsets[i]];
TInt ThreadOffset = DstIdOffsets[i];
DstNodeIds[ThreadOffset] = TIntPr(CurrNode, DstOffsets[i]);
TInt CurrCount = 1;
for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) {
while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; }
if (j < DstOffsets[i+1]) {
CurrNode = DstCol2[j];
DstNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j);
CurrCount++;
}
}
}
}
}
Sw->Stop(TStopwatch::Group);
Sw->Start(TStopwatch::MergeNeighborhoods);
// Find the combined neighborhood (both out-neighbors and in-neighbors) of each node
TIntTrV Nodes;
Nodes.Reserve(TotalSrcNodes+TotalDstNodes);
TInt i = 0, j = 0;
while (i < TotalSrcNodes && j < TotalDstNodes) {
if (SrcNodeIds[i].Val1 == DstNodeIds[j].Val1) {
Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, j));
i++;
j++;
} else if (SrcNodeIds[i].Val1 < DstNodeIds[j].Val1) {
Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1));
i++;
} else {
Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j));
j++;
}
}
for (; i < TotalSrcNodes; i++) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); }
for (; j < TotalDstNodes; j++) { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); }
Sw->Stop(TStopwatch::MergeNeighborhoods);
Sw->Start(TStopwatch::AddNeighborhoods);
TInt NumNodes = Nodes.Len();
PGraphMP Graph = PGraphMP::TObj::New(NumNodes, NumRows);
// NumThreads = omp_get_max_threads();
// int Delta = (NumNodes+NumThreads-1)/NumThreads;
TVec<TIntV> InVV(NumNodes);
TVec<TIntV> OutVV(NumNodes);
// omp_set_num_threads(NumThreads);
#pragma omp parallel for schedule(static,100)
for (int m = 0; m < NumNodes; m++) {
//double startTr = omp_get_wtime();
//TIntV OutV, InV;
TInt n, i, j;
Nodes[m].GetVal(n, i, j);
if (i >= 0) {
TInt Offset = SrcNodeIds[i].GetVal2();
TInt Sz = EdgeCol1.Len()-Offset;
if (i < SrcNodeIds.Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; }
OutVV[m].Reserve(Sz);
OutVV[m].CopyUniqueFrom(EdgeCol1, Offset, Sz);
}
if (j >= 0) {
TInt Offset = DstNodeIds[j].GetVal2();
TInt Sz = EdgeCol2.Len()-Offset;
if (j < DstNodeIds.Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; }
InVV[m].Reserve(Sz);
InVV[m].CopyUniqueFrom(EdgeCol2, Offset, Sz);
}
Graph->AddNodeWithEdges(n, InVV[m], OutVV[m]);
}
Graph->SetNodes(NumNodes);
Sw->Stop(TStopwatch::AddNeighborhoods);
Sw->Start(TStopwatch::AddEdges);
omp_set_num_threads(omp_get_max_threads());
if (NodeType == atInt) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < Partitions.Len(); i++) {
TRowIterator RowI(Partitions[i].GetVal1(), Table());
TRowIterator EndI(Partitions[i].GetVal2(), Table());
while (RowI < EndI) {
TInt RowId = RowI.GetRowIdx(); // EdgeId
TInt SrcId = RowI.GetIntAttr(SrcColIdx);
TInt DstId = RowI.GetIntAttr(DstColIdx);
Graph->AddEdgeUnchecked(RowId, SrcId, DstId);
RowI++;
}
}
}
else if (NodeType == atStr) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < Partitions.Len(); i++) {
TRowIterator RowI(Partitions[i].GetVal1(), Table());
TRowIterator EndI(Partitions[i].GetVal2(), Table());
while (RowI < EndI) {
TInt RowId = RowI.GetRowIdx(); // EdgeId
TInt SrcId = RowI.GetStrMapById(SrcColIdx);
TInt DstId = RowI.GetStrMapById(DstColIdx);
Graph->AddEdgeUnchecked(RowId, SrcId, DstId);
RowI++;
}
}
}
Graph->SetEdges(NumRows);
Graph->SetMxEId(NumRows);
Sw->Stop(TStopwatch::AddEdges);
// make single pass over all rows in the table to add attributes
for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) {
if ((Table->Next)[CurrRowIdx] == Table->Invalid) {
continue;
}
for (TInt ea_i = 0; ea_i < EdgeAttrV.Len(); ea_i++) {
TStr ColName = EdgeAttrV[ea_i];
TAttrType T = Table->GetColType(ColName);
TInt Index = Table->GetColIdx(ColName);
switch (T) {
case atInt:
Graph->AddIntAttrDatE(CurrRowIdx, Table->IntCols[Index][CurrRowIdx], ColName);
break;
case atFlt:
Graph->AddFltAttrDatE(CurrRowIdx, Table->FltCols[Index][CurrRowIdx], ColName);
break;
case atStr:
Graph->AddStrAttrDatE(CurrRowIdx, Table->GetStrVal(Index, CurrRowIdx), ColName);
break;
}
}
}
// double endAdd = omp_get_wtime();
// printf("Add time = %f\n", endAdd-endAlloc);
return Graph;
}
/// Converts table to network sequentially. Takes edges from \c Table and nodes explicitly from \c NodeCol in \c NodeTable, with attribute vectors passed as columns in corresponding tables.
template<class PGraph>
PGraph ToNetwork(PTable Table,
const TStr& SrcCol, const TStr& DstCol,
TStrV& EdgeAttrV, PTable NodeTable, const TStr& NodeCol, TStrV& NodeAttrV,
TAttrAggr AggrPolicy) {
PGraph Graph = PGraph::TObj::New();
const TAttrType NodeType = Table->GetColType(SrcCol);
Assert(NodeType == Table->GetColType(DstCol));
const TInt SrcColIdx = Table->GetColIdx(SrcCol);
const TInt DstColIdx = Table->GetColIdx(DstCol);
const TAttrType NodeTypeN = NodeTable->GetColType(NodeCol);
const TInt NodeColIdx = NodeTable->GetColIdx(NodeCol);
THash<TInt, TStrIntVH> NodeIntAttrs;
THash<TInt, TStrFltVH> NodeFltAttrs;
THash<TInt, TStrStrVH> NodeStrAttrs;
//Table->AddGraphAttributeV(SrcAttrV, false, true, false);
//Table->AddGraphAttributeV(DstAttrV, false, false, true);
//Table->AddGraphAttributeV(EdgeAttrV, true, false, true);
// node values - i.e. the unique values of src/dst col
//THashSet<TInt> IntNodeVals; // for both int and string node attr types.
THash<TFlt, TInt> FltNodeVals;
// make single pass over all rows in the table
for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) {
if ((Table->Next)[CurrRowIdx] == Table->Invalid) {
continue;
}
// add src and dst nodes to graph if they are not seen earlier
TInt SVal, DVal;
if (NodeType == atFlt) {
TFlt FSVal = (Table->FltCols)[SrcColIdx][CurrRowIdx];
SVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FSVal);
TFlt FDVal = (Table->FltCols)[SrcColIdx][CurrRowIdx];
DVal = Table->CheckAndAddFltNode(Graph, FltNodeVals, FDVal);
}
else if (NodeType == atInt || NodeType == atStr) {
if (NodeType == atInt) {
SVal = (Table->IntCols)[SrcColIdx][CurrRowIdx];
DVal = (Table->IntCols)[DstColIdx][CurrRowIdx];
}
else {
SVal = (Table->StrColMaps)[SrcColIdx][CurrRowIdx];
// if (strlen(Table->GetContextKey(SVal)) == 0) { continue; } //illegal value
DVal = (Table->StrColMaps)[DstColIdx][CurrRowIdx];
// if (strlen(Table->GetContextKey(DVal)) == 0) { continue; } //illegal value
}
if (!Graph->IsNode(SVal)) {Graph->AddNode(SVal); }
if (!Graph->IsNode(DVal)) {Graph->AddNode(DVal); }
//CheckAndAddIntNode(Graph, IntNodeVals, SVal);
//CheckAndAddIntNode(Graph, IntNodeVals, DVal);
}
// add edge and edge attributes
Graph->AddEdge(SVal, DVal, CurrRowIdx);
// Aggregate edge attributes and add to graph
for (TInt i = 0; i < EdgeAttrV.Len(); i++) {
TStr ColName = EdgeAttrV[i];
TAttrType T = Table->GetColType(ColName);
TInt Index = Table->GetColIdx(ColName);
switch (T) {
case atInt:
Graph->AddIntAttrDatE(CurrRowIdx, Table->IntCols[Index][CurrRowIdx], ColName);
break;
case atFlt:
Graph->AddFltAttrDatE(CurrRowIdx, Table->FltCols[Index][CurrRowIdx], ColName);
break;
case atStr:
Graph->AddStrAttrDatE(CurrRowIdx, Table->GetStrVal(Index, CurrRowIdx), ColName);
break;
}
}
}
//Add node attribtes
if (NodeAttrV.Len() > 0) {
for (int CurrRowIdx = 0; CurrRowIdx < (NodeTable->Next).Len(); CurrRowIdx++) {
if ((NodeTable->Next)[CurrRowIdx] == NodeTable->Invalid) {
continue;
}
TInt NId;
if (NodeTypeN == atInt) {
NId = (NodeTable->IntCols)[NodeColIdx][CurrRowIdx];
}
else if (NodeTypeN == atStr){
NId = (NodeTable->StrColMaps)[NodeColIdx][CurrRowIdx];
}
for (TInt i = 0; i < NodeAttrV.Len(); i++) {
TStr ColName = NodeAttrV[i];
TAttrType T = NodeTable->GetColType(ColName);
TInt Index = NodeTable->GetColIdx(ColName);
switch (T) {
case atInt:
Graph->AddIntAttrDatN(NId, NodeTable->IntCols[Index][CurrRowIdx], ColName);
break;
case atFlt:
Graph->AddFltAttrDatN(NId, NodeTable->FltCols[Index][CurrRowIdx], ColName);
break;
case atStr:
Graph->AddStrAttrDatN(NId, NodeTable->GetStrVal(Index, CurrRowIdx), ColName);
break;
}
}
}
}
return Graph;
}
/// Converts table to network in parallel. Takes edges from \c Table and nodes explicitly from \c NodeCol in \c NodeTable, with attribute vectors passed as columns in corresponding tables.
template<class PGraphMP>
inline PGraphMP ToNetworkMP(PTable Table,
const TStr& SrcCol, const TStr& DstCol,
TStrV& EdgeAttrV, PTable NodeTable, const TStr& NodeCol, TStrV& NodeAttrV,
TAttrAggr AggrPolicy) {
TStopwatch* Sw = TStopwatch::GetInstance();
Sw->Start(TStopwatch::AllocateColumnCopies);
const TInt SrcColIdx = Table->GetColIdx(SrcCol);
const TInt DstColIdx = Table->GetColIdx(DstCol);
const TInt NumRows = Table->GetNumValidRows();
const TAttrType NodeType = Table->GetColType(SrcCol);
Assert(NodeType == Table->GetColType(DstCol));
TIntV SrcCol1, EdgeCol1, EdgeCol2, DstCol2;
const TAttrType NodeTypeN = NodeTable->GetColType(NodeCol);
const TInt NodeColIdx = NodeTable->GetColIdx(NodeCol);
THash<TInt, TStrIntVH> NodeIntAttrs;
THash<TInt, TStrFltVH> NodeFltAttrs;
THash<TInt, TStrStrVH> NodeStrAttrs;
#pragma omp parallel sections num_threads(4)
{
#pragma omp section
{ SrcCol1.Reserve(NumRows, NumRows); }
#pragma omp section
{ EdgeCol1.Reserve(NumRows, NumRows); }
#pragma omp section
{ DstCol2.Reserve(NumRows, NumRows); }
#pragma omp section
{ EdgeCol2.Reserve(NumRows, NumRows); }
}
Sw->Stop(TStopwatch::AllocateColumnCopies);
Sw->Start(TStopwatch::CopyColumns);
TIntPrV Partitions;
Table->GetPartitionRanges(Partitions, omp_get_max_threads());
TInt PartitionSize = Partitions[0].GetVal2()-Partitions[0].GetVal1()+1;
// double endPartition = omp_get_wtime();
// printf("Partition time = %f\n", endPartition-endResize);
omp_set_num_threads(omp_get_max_threads());
if (NodeType == atInt) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < Partitions.Len(); i++) {
TRowIterator RowI(Partitions[i].GetVal1(), Table());
TRowIterator EndI(Partitions[i].GetVal2(), Table());
while (RowI < EndI) {
TInt RowId = RowI.GetRowIdx();
SrcCol1[RowId] = RowI.GetIntAttr(SrcColIdx);
EdgeCol1[RowId] = RowId;
DstCol2[RowId] = RowI.GetIntAttr(DstColIdx);
EdgeCol2[RowId] = RowId;
RowI++;
}
}
}
else if (NodeType == atStr) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < Partitions.Len(); i++) {
TRowIterator RowI(Partitions[i].GetVal1(), Table());
TRowIterator EndI(Partitions[i].GetVal2(), Table());
while (RowI < EndI) {
TInt RowId = RowI.GetRowIdx();
SrcCol1[RowId] = RowI.GetStrMapById(SrcColIdx);
EdgeCol1[RowId] = RowId;
DstCol2[RowId] = RowI.GetStrMapById(DstColIdx);
EdgeCol2[RowId] = RowId;
RowI++;
}
}
}
Sw->Stop(TStopwatch::CopyColumns);
Sw->Start(TStopwatch::Sort);
omp_set_num_threads(omp_get_max_threads());
#pragma omp parallel
{
#pragma omp single nowait
{
#ifndef GLib_WIN32
#pragma omp task untied shared(SrcCol1, EdgeCol1)
#endif
{ TTable::QSortKeyVal(SrcCol1, EdgeCol1, 0, NumRows-1); }
}
#pragma omp single nowait
{
#ifndef GLib_WIN32
#pragma omp task untied shared(EdgeCol2, DstCol2)
#endif
{ TTable::QSortKeyVal(DstCol2, EdgeCol2, 0, NumRows-1); }
}
#ifndef GLib_WIN32
#pragma omp taskwait
#endif
}
Sw->Stop(TStopwatch::Sort);
Sw->Start(TStopwatch::Group);
TInt NumThreads = omp_get_max_threads();
TInt PartSize = (NumRows/NumThreads);
// Find the offset of all partitions, each of which contains a list of rows.
// Nodes from same sources or destinations are ensured to be kept within same partition.
TIntV SrcOffsets, DstOffsets;
SrcOffsets.Add(0);
for (TInt i = 1; i < NumThreads; i++) {
TInt CurrOffset = i * PartSize;
while (CurrOffset < (i+1) * PartSize &&
SrcCol1[CurrOffset-1] == SrcCol1[CurrOffset]) {
// ensure that rows from the same sources are grouped together
CurrOffset++;
}
if (CurrOffset < (i+1) * PartSize) { SrcOffsets.Add(CurrOffset); }
}
SrcOffsets.Add(NumRows);
DstOffsets.Add(0);
for (TInt i = 1; i < NumThreads; i++) {
TInt CurrOffset = i * PartSize;
while (CurrOffset < (i+1) * PartSize &&
DstCol2[CurrOffset-1] == DstCol2[CurrOffset]) {
// ensure that rows to the same destinations are grouped together
CurrOffset++;
}
if (CurrOffset < (i+1) * PartSize) { DstOffsets.Add(CurrOffset); }
}
DstOffsets.Add(NumRows);
TInt SrcPartCnt = SrcOffsets.Len()-1; // number of partitions
TInt DstPartCnt = DstOffsets.Len()-1; // number of partitions
// count the number of source nodes and destination nodes in each partition
TIntV SrcNodeCounts, DstNodeCounts;
SrcNodeCounts.Reserve(SrcPartCnt, SrcPartCnt);
DstNodeCounts.Reserve(DstPartCnt, DstPartCnt);
#pragma omp parallel for schedule(dynamic)
for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) {
if (t < SrcPartCnt) {
TInt i = t;
if (SrcOffsets[i] != SrcOffsets[i+1]) {
SrcNodeCounts[i] = 1;
TInt CurrNode = SrcCol1[SrcOffsets[i]];
for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) {
while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; }
if (j < SrcOffsets[i+1]) {
SrcNodeCounts[i]++;
CurrNode = SrcCol1[j];
}
}
}
} else {
TInt i = t - SrcPartCnt;
if (DstOffsets[i] != DstOffsets[i+1]) {
DstNodeCounts[i] = 1;
TInt CurrNode = DstCol2[DstOffsets[i]];
for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) {
while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; }
if (j < DstOffsets[i+1]) {
DstNodeCounts[i]++;
CurrNode = DstCol2[j];
}
}
}
}
}
TInt TotalSrcNodes = 0;
TIntV SrcIdOffsets;
for (int i = 0; i < SrcPartCnt; i++) {
SrcIdOffsets.Add(TotalSrcNodes);
TotalSrcNodes += SrcNodeCounts[i];
}
TInt TotalDstNodes = 0;
TIntV DstIdOffsets;
for (int i = 0; i < DstPartCnt; i++) {
DstIdOffsets.Add(TotalDstNodes);
TotalDstNodes += DstNodeCounts[i];
}
// printf("Total Src = %d, Total Dst = %d\n", TotalSrcNodes.Val, TotalDstNodes.Val);
// find vector of (node_id, start_offset) where start_offset is the index of the first row with node_id
TIntPrV SrcNodeIds, DstNodeIds;
#pragma omp parallel sections
{
#pragma omp section
{ SrcNodeIds.Reserve(TotalSrcNodes, TotalSrcNodes); }
#pragma omp section
{ DstNodeIds.Reserve(TotalDstNodes, TotalDstNodes); }
}
// Find the starting offset of each node (in both src and dst)
#pragma omp parallel for schedule(dynamic)
for (int t = 0; t < SrcPartCnt+DstPartCnt; t++) {
if (t < SrcPartCnt) {
TInt i = t;
if (SrcOffsets[i] != SrcOffsets[i+1]) {
TInt CurrNode = SrcCol1[SrcOffsets[i]];
TInt ThreadOffset = SrcIdOffsets[i];
SrcNodeIds[ThreadOffset] = TIntPr(CurrNode, SrcOffsets[i]);
TInt CurrCount = 1;
for (TInt j = SrcOffsets[i]+1; j < SrcOffsets[i+1]; j++) {
while (j < SrcOffsets[i+1] && SrcCol1[j] == CurrNode) { j++; }
if (j < SrcOffsets[i+1]) {
CurrNode = SrcCol1[j];
SrcNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j);
CurrCount++;
}
}
}
} else {
TInt i = t - SrcPartCnt;
if (DstOffsets[i] != DstOffsets[i+1]) {
TInt CurrNode = DstCol2[DstOffsets[i]];
TInt ThreadOffset = DstIdOffsets[i];
DstNodeIds[ThreadOffset] = TIntPr(CurrNode, DstOffsets[i]);
TInt CurrCount = 1;
for (TInt j = DstOffsets[i]+1; j < DstOffsets[i+1]; j++) {
while (j < DstOffsets[i+1] && DstCol2[j] == CurrNode) { j++; }
if (j < DstOffsets[i+1]) {
CurrNode = DstCol2[j];
DstNodeIds[ThreadOffset+CurrCount] = TIntPr(CurrNode, j);
CurrCount++;
}
}
}
}
}
Sw->Stop(TStopwatch::Group);
Sw->Start(TStopwatch::MergeNeighborhoods);
// Find the combined neighborhood (both out-neighbors and in-neighbors) of each node
TIntTrV Nodes;
Nodes.Reserve(TotalSrcNodes+TotalDstNodes);
TInt i = 0, j = 0;
while (i < TotalSrcNodes && j < TotalDstNodes) {
if (SrcNodeIds[i].Val1 == DstNodeIds[j].Val1) {
Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, j));
i++;
j++;
} else if (SrcNodeIds[i].Val1 < DstNodeIds[j].Val1) {
Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1));
i++;
} else {
Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j));
j++;
}
}
for (; i < TotalSrcNodes; i++) { Nodes.Add(TIntTr(SrcNodeIds[i].Val1, i, -1)); }
for (; j < TotalDstNodes; j++) { Nodes.Add(TIntTr(DstNodeIds[j].Val1, -1, j)); }
Sw->Stop(TStopwatch::MergeNeighborhoods);
Sw->Start(TStopwatch::AddNeighborhoods);
TInt NumNodes = Nodes.Len();
PGraphMP Graph = PGraphMP::TObj::New(NumNodes, NumRows);
// NumThreads = omp_get_max_threads();
// int Delta = (NumNodes+NumThreads-1)/NumThreads;
TVec<TIntV> InVV(NumNodes);
TVec<TIntV> OutVV(NumNodes);
// omp_set_num_threads(NumThreads);
#pragma omp parallel for schedule(static,100)
for (int m = 0; m < NumNodes; m++) {
//double startTr = omp_get_wtime();
//TIntV OutV, InV;
TInt n, i, j;
Nodes[m].GetVal(n, i, j);
if (i >= 0) {
TInt Offset = SrcNodeIds[i].GetVal2();
TInt Sz = EdgeCol1.Len()-Offset;
if (i < SrcNodeIds.Len()-1) { Sz = SrcNodeIds[i+1].GetVal2()-Offset; }
OutVV[m].Reserve(Sz);
OutVV[m].CopyUniqueFrom(EdgeCol1, Offset, Sz);
}
if (j >= 0) {
TInt Offset = DstNodeIds[j].GetVal2();
TInt Sz = EdgeCol2.Len()-Offset;
if (j < DstNodeIds.Len()-1) { Sz = DstNodeIds[j+1].GetVal2()-Offset; }
InVV[m].Reserve(Sz);
InVV[m].CopyUniqueFrom(EdgeCol2, Offset, Sz);
}
Graph->AddNodeWithEdges(n, InVV[m], OutVV[m]);
}
Graph->SetNodes(NumNodes);
Sw->Stop(TStopwatch::AddNeighborhoods);
Sw->Start(TStopwatch::AddEdges);
omp_set_num_threads(omp_get_max_threads());
if (NodeType == atInt) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < Partitions.Len(); i++) {
TRowIterator RowI(Partitions[i].GetVal1(), Table());
TRowIterator EndI(Partitions[i].GetVal2(), Table());
while (RowI < EndI) {
TInt RowId = RowI.GetRowIdx(); // EdgeId
TInt SrcId = RowI.GetIntAttr(SrcColIdx);
TInt DstId = RowI.GetIntAttr(DstColIdx);
Graph->AddEdgeUnchecked(RowId, SrcId, DstId);
RowI++;
}
}
}
else if (NodeType == atStr) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < Partitions.Len(); i++) {
TRowIterator RowI(Partitions[i].GetVal1(), Table());
TRowIterator EndI(Partitions[i].GetVal2(), Table());
while (RowI < EndI) {
TInt RowId = RowI.GetRowIdx(); // EdgeId
TInt SrcId = RowI.GetStrMapById(SrcColIdx);
TInt DstId = RowI.GetStrMapById(DstColIdx);
Graph->AddEdgeUnchecked(RowId, SrcId, DstId);
RowI++;
}
}
}
Graph->SetEdges(NumRows);
Graph->SetMxEId(NumRows);
Sw->Stop(TStopwatch::AddEdges);
// make single pass over all rows in the table to add attributes
for (int CurrRowIdx = 0; CurrRowIdx < (Table->Next).Len(); CurrRowIdx++) {
if ((Table->Next)[CurrRowIdx] == Table->Invalid) {
continue;
}
for (TInt ea_i = 0; ea_i < EdgeAttrV.Len(); ea_i++) {
TStr ColName = EdgeAttrV[ea_i];
TAttrType T = Table->GetColType(ColName);
TInt Index = Table->GetColIdx(ColName);
switch (T) {
case atInt:
Graph->AddIntAttrDatE(CurrRowIdx, Table->IntCols[Index][CurrRowIdx], ColName);
break;
case atFlt:
Graph->AddFltAttrDatE(CurrRowIdx, Table->FltCols[Index][CurrRowIdx], ColName);
break;
case atStr:
Graph->AddStrAttrDatE(CurrRowIdx, Table->GetStrVal(Index, CurrRowIdx), ColName);
break;
}
}
}
//Add node attribtes
if (NodeAttrV.Len() > 0) {
for (int CurrRowIdx = 0; CurrRowIdx < (NodeTable->Next).Len(); CurrRowIdx++) {
if ((NodeTable->Next)[CurrRowIdx] == NodeTable->Invalid) {
continue;
}
TInt NId;
if (NodeTypeN == atInt) {
NId = (NodeTable->IntCols)[NodeColIdx][CurrRowIdx];
}
else if (NodeTypeN == atStr){
NId = (NodeTable->StrColMaps)[NodeColIdx][CurrRowIdx];
}
for (TInt i = 0; i < NodeAttrV.Len(); i++) {
TStr ColName = NodeAttrV[i];
TAttrType T = NodeTable->GetColType(ColName);
TInt Index = NodeTable->GetColIdx(ColName);
switch (T) {
case atInt:
Graph->AddIntAttrDatN(NId, NodeTable->IntCols[Index][CurrRowIdx], ColName);
break;
case atFlt:
Graph->AddFltAttrDatN(NId, NodeTable->FltCols[Index][CurrRowIdx], ColName);
break;
case atStr:
Graph->AddStrAttrDatN(NId, NodeTable->GetStrVal(Index, CurrRowIdx), ColName);
break;
}
}
}
}
// double endAdd = omp_get_wtime();
// printf("Add time = %f\n", endAdd-endAlloc);
return Graph;
}
#endif // GCC_ATOMIC
}; // TSnap namespace
// TODO tidy up GCC_ATOMIC directives
#endif // CONV_H
|
elec.c | /*-
* Copyright (c) 2012-2017 Ilya Kaliman
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "balance.h"
#include "elec.h"
#include "private.h"
static double
get_screen_damping(double r_ij, double pi, double pj)
{
if (pj == HUGE_VAL) { /* j is nucleus */
return 1.0 - exp(-pi * r_ij);
}
else if (fabs(pi - pj) < 1.0e-5) {
return 1.0 - (1.0 + 0.5 * pi * r_ij) * exp(-pi * r_ij);
}
else {
return 1.0 - exp(-pi * r_ij) * pj * pj / (pj * pj - pi * pi) -
exp(-pj * r_ij) * pi * pi / (pi * pi - pj * pj);
}
}
static double
get_screen_damping_grad(double r_ij, double pi, double pj)
{
if (pj == HUGE_VAL) { /* j is nucleus */
return 1.0 - exp(-r_ij * pi) * (1.0 + pi * r_ij);
}
else if (fabs(pi - pj) < 1.0e-5) {
return 1.0 - exp(-r_ij * pi) * (1.0 + pi * r_ij +
0.5 * pi * pi * r_ij * r_ij);
}
else {
return 1.0 - exp(-r_ij * pi) * (1.0 + pi * r_ij) *
pj * pj / (pj * pj - pi * pi) -
exp(-r_ij * pj) * (1.0 + pj * r_ij) *
pi * pi / (pi * pi - pj * pj);
}
}
static double
atom_mult_energy(struct efp *efp, struct frag *fr_i, struct frag *fr_j,
size_t atom_i_idx, size_t pt_j_idx, const struct swf *swf)
{
struct efp_atom *at_i = fr_i->atoms + atom_i_idx;
struct multipole_pt *pt_j = fr_j->multipole_pts + pt_j_idx;
vec_t dr = {
pt_j->x - at_i->x - swf->cell.x,
pt_j->y - at_i->y - swf->cell.y,
pt_j->z - at_i->z - swf->cell.z
};
double energy = 0.0, ccdamp = 1.0;
if (efp->opts.elec_damp == EFP_ELEC_DAMP_SCREEN) {
double r = vec_len(&dr);
double sp = fr_j->screen_params[pt_j_idx];
ccdamp = get_screen_damping(r, sp, HUGE_VAL);
}
/* charge - monopole */
energy += ccdamp * efp_charge_charge_energy(at_i->znuc,
pt_j->monopole, &dr);
/* charge - dipole */
energy += efp_charge_dipole_energy(at_i->znuc, &pt_j->dipole, &dr);
/* charge - quadrupole */
energy += efp_charge_quadrupole_energy(at_i->znuc,
pt_j->quadrupole, &dr);
/* charge - octupole */
energy += efp_charge_octupole_energy(at_i->znuc, pt_j->octupole, &dr);
return energy;
}
static void
atom_mult_grad(struct efp *efp, size_t fr_i_idx, size_t fr_j_idx,
size_t atom_i_idx, size_t pt_j_idx, const struct swf *swf)
{
const struct frag *fr_i = efp->frags + fr_i_idx;
const struct frag *fr_j = efp->frags + fr_j_idx;
const struct efp_atom *at_i = fr_i->atoms + atom_i_idx;
const struct multipole_pt *pt_j = fr_j->multipole_pts + pt_j_idx;
vec_t dr = {
pt_j->x - at_i->x - swf->cell.x,
pt_j->y - at_i->y - swf->cell.y,
pt_j->z - at_i->z - swf->cell.z
};
vec_t force_, torque_i_, torque_j_;
vec_t force = vec_zero, torque_i = vec_zero, torque_j = vec_zero;
/* charge - charge */
efp_charge_charge_grad(at_i->znuc, pt_j->monopole, &dr,
&force_, &torque_i_, &torque_j_);
if (efp->opts.elec_damp == EFP_ELEC_DAMP_SCREEN) {
double r = vec_len(&dr);
double sp = fr_j->screen_params[pt_j_idx];
double gdamp = get_screen_damping_grad(r, sp, HUGE_VAL);
force_.x *= gdamp;
force_.y *= gdamp;
force_.z *= gdamp;
}
add_3(&force, &force_, &torque_i, &torque_i_, &torque_j, &torque_j_);
/* charge - dipole */
efp_charge_dipole_grad(at_i->znuc, &pt_j->dipole, &dr,
&force_, &torque_i_, &torque_j_);
add_3(&force, &force_, &torque_i, &torque_i_, &torque_j, &torque_j_);
/* charge - quadrupole */
efp_charge_quadrupole_grad(at_i->znuc, pt_j->quadrupole, &dr,
&force_, &torque_i_, &torque_j_);
vec_negate(&torque_j_);
add_3(&force, &force_, &torque_i, &torque_i_, &torque_j, &torque_j_);
/* charge - octupole */
efp_charge_octupole_grad(at_i->znuc, pt_j->octupole, &dr,
&force_, &torque_i_, &torque_j_);
add_3(&force, &force_, &torque_i, &torque_i_, &torque_j, &torque_j_);
vec_scale(&force, swf->swf);
vec_scale(&torque_i, swf->swf);
vec_scale(&torque_j, swf->swf);
efp_add_force(efp->grad + fr_i_idx, CVEC(fr_i->x), CVEC(at_i->x),
&force, &torque_i);
efp_sub_force(efp->grad + fr_j_idx, CVEC(fr_j->x), CVEC(pt_j->x),
&force, &torque_j);
efp_add_stress(&swf->dr, &force, &efp->stress);
}
static double
mult_mult_energy(struct efp *efp, size_t fr_i_idx, size_t fr_j_idx,
size_t pt_i_idx, size_t pt_j_idx, const struct swf *swf)
{
struct frag *fr_i = efp->frags + fr_i_idx;
struct frag *fr_j = efp->frags + fr_j_idx;
struct multipole_pt *pt_i = fr_i->multipole_pts + pt_i_idx;
struct multipole_pt *pt_j = fr_j->multipole_pts + pt_j_idx;
vec_t dr = {
pt_j->x - pt_i->x - swf->cell.x,
pt_j->y - pt_i->y - swf->cell.y,
pt_j->z - pt_i->z - swf->cell.z
};
double energy = 0.0, ccdamp = 1.0;
if (efp->opts.elec_damp == EFP_ELEC_DAMP_SCREEN) {
double r = vec_len(&dr);
double screen_i = fr_i->screen_params[pt_i_idx];
double screen_j = fr_j->screen_params[pt_j_idx];
ccdamp = get_screen_damping(r, screen_i, screen_j);
}
/* monopole - monopole */
energy += ccdamp * efp_charge_charge_energy(pt_i->monopole,
pt_j->monopole, &dr);
/* monopole - dipole */
energy += efp_charge_dipole_energy(pt_i->monopole, &pt_j->dipole, &dr);
/* dipole - monopole */
energy -= efp_charge_dipole_energy(pt_j->monopole, &pt_i->dipole, &dr);
/* monopole - quadrupole */
energy += efp_charge_quadrupole_energy(pt_i->monopole,
pt_j->quadrupole, &dr);
/* quadrupole - monopole */
energy += efp_charge_quadrupole_energy(pt_j->monopole,
pt_i->quadrupole, &dr);
/* monopole - octupole */
energy += efp_charge_octupole_energy(pt_i->monopole,
pt_j->octupole, &dr);
/* octupole - monopole */
energy -= efp_charge_octupole_energy(pt_j->monopole,
pt_i->octupole, &dr);
/* dipole - dipole */
energy += efp_dipole_dipole_energy(&pt_i->dipole, &pt_j->dipole, &dr);
/* dipole - quadrupole */
energy += efp_dipole_quadrupole_energy(&pt_i->dipole,
pt_j->quadrupole, &dr);
/* quadrupole - dipole */
energy -= efp_dipole_quadrupole_energy(&pt_j->dipole,
pt_i->quadrupole, &dr);
/* quadrupole - quadrupole */
energy += efp_quadrupole_quadrupole_energy(pt_i->quadrupole,
pt_j->quadrupole, &dr);
return energy;
}
static void
mult_mult_grad(struct efp *efp, size_t fr_i_idx, size_t fr_j_idx,
size_t pt_i_idx, size_t pt_j_idx, const struct swf *swf)
{
struct frag *fr_i = efp->frags + fr_i_idx;
struct frag *fr_j = efp->frags + fr_j_idx;
struct multipole_pt *pt_i = fr_i->multipole_pts + pt_i_idx;
struct multipole_pt *pt_j = fr_j->multipole_pts + pt_j_idx;
vec_t dr = {
pt_j->x - pt_i->x - swf->cell.x,
pt_j->y - pt_i->y - swf->cell.y,
pt_j->z - pt_i->z - swf->cell.z
};
vec_t force_, torque_i_, torque_j_;
vec_t force = vec_zero, torque_i = vec_zero, torque_j = vec_zero;
/* monopole - monopole */
efp_charge_charge_grad(pt_i->monopole, pt_j->monopole, &dr,
&force_, &torque_i_, &torque_j_);
if (efp->opts.elec_damp == EFP_ELEC_DAMP_SCREEN) {
double r = vec_len(&dr);
double screen_i = fr_i->screen_params[pt_i_idx];
double screen_j = fr_j->screen_params[pt_j_idx];
double gdamp = get_screen_damping_grad(r, screen_i, screen_j);
force_.x *= gdamp;
force_.y *= gdamp;
force_.z *= gdamp;
}
add_3(&force, &force_, &torque_i, &torque_i_, &torque_j, &torque_j_);
/* monopole - dipole */
efp_charge_dipole_grad(pt_i->monopole, &pt_j->dipole, &dr,
&force_, &torque_i_, &torque_j_);
add_3(&force, &force_, &torque_i, &torque_i_, &torque_j, &torque_j_);
/* dipole - monopole */
efp_charge_dipole_grad(pt_j->monopole, &pt_i->dipole, &dr,
&force_, &torque_j_, &torque_i_);
vec_negate(&force_);
add_3(&force, &force_, &torque_i, &torque_i_, &torque_j, &torque_j_);
/* monopole - quadrupole */
efp_charge_quadrupole_grad(pt_i->monopole, pt_j->quadrupole, &dr,
&force_, &torque_i_, &torque_j_);
vec_negate(&torque_j_);
add_3(&force, &force_, &torque_i, &torque_i_, &torque_j, &torque_j_);
/* quadrupole - monopole */
efp_charge_quadrupole_grad(pt_j->monopole, pt_i->quadrupole, &dr,
&force_, &torque_j_, &torque_i_);
add_3(&force, &force_, &torque_i, &torque_i_, &torque_j, &torque_j_);
/* monopole - octupole */
efp_charge_octupole_grad(pt_i->monopole, pt_j->octupole, &dr,
&force_, &torque_i_, &torque_j_);
add_3(&force, &force_, &torque_i, &torque_i_, &torque_j, &torque_j_);
/* octupole - monopole */
efp_charge_octupole_grad(pt_j->monopole, pt_i->octupole, &dr,
&force_, &torque_j_, &torque_i_);
vec_negate(&force_);
add_3(&force, &force_, &torque_i, &torque_i_, &torque_j, &torque_j_);
/* dipole - dipole */
efp_dipole_dipole_grad(&pt_i->dipole, &pt_j->dipole, &dr,
&force_, &torque_i_, &torque_j_);
vec_negate(&torque_j_);
add_3(&force, &force_, &torque_i, &torque_i_, &torque_j, &torque_j_);
/* dipole - quadrupole */
efp_dipole_quadrupole_grad(&pt_i->dipole, pt_j->quadrupole, &dr,
&force_, &torque_i_, &torque_j_);
add_3(&force, &force_, &torque_i, &torque_i_, &torque_j, &torque_j_);
/* quadrupole - dipole */
efp_dipole_quadrupole_grad(&pt_j->dipole, pt_i->quadrupole, &dr,
&force_, &torque_j_, &torque_i_);
vec_negate(&force_);
add_3(&force, &force_, &torque_i, &torque_i_, &torque_j, &torque_j_);
/* quadrupole - quadrupole */
efp_quadrupole_quadrupole_grad(pt_i->quadrupole, pt_j->quadrupole,
&dr, &force_, &torque_i_, &torque_j_);
vec_negate(&torque_j_);
add_3(&force, &force_, &torque_i, &torque_i_, &torque_j, &torque_j_);
vec_scale(&force, swf->swf);
vec_scale(&torque_i, swf->swf);
vec_scale(&torque_j, swf->swf);
efp_add_force(efp->grad + fr_i_idx, CVEC(fr_i->x), CVEC(pt_i->x),
&force, &torque_i);
efp_sub_force(efp->grad + fr_j_idx, CVEC(fr_j->x), CVEC(pt_j->x),
&force, &torque_j);
efp_add_stress(&swf->dr, &force, &efp->stress);
}
double
efp_frag_frag_elec(struct efp *efp, size_t fr_i_idx, size_t fr_j_idx)
{
struct frag *fr_i = efp->frags + fr_i_idx;
struct frag *fr_j = efp->frags + fr_j_idx;
struct swf swf = efp_make_swf(efp, fr_i, fr_j);
double energy = 0.0;
/* nuclei - nuclei */
for (size_t ii = 0; ii < fr_i->n_atoms; ii++) {
for (size_t jj = 0; jj < fr_j->n_atoms; jj++) {
struct efp_atom *at_i = fr_i->atoms + ii;
struct efp_atom *at_j = fr_j->atoms + jj;
vec_t dr = {
at_j->x - at_i->x - swf.cell.x,
at_j->y - at_i->y - swf.cell.y,
at_j->z - at_i->z - swf.cell.z
};
energy += efp_charge_charge_energy(at_i->znuc,
at_j->znuc, &dr);
if (efp->do_gradient) {
vec_t force, add_i, add_j;
efp_charge_charge_grad(at_i->znuc, at_j->znuc,
&dr, &force, &add_i, &add_j);
vec_scale(&force, swf.swf);
efp_add_force(efp->grad + fr_i_idx,
CVEC(fr_i->x), CVEC(at_i->x), &force, NULL);
efp_sub_force(efp->grad + fr_j_idx,
CVEC(fr_j->x), CVEC(at_j->x), &force, NULL);
efp_add_stress(&swf.dr, &force, &efp->stress);
}
}
}
/* nuclei - mult points */
for (size_t ii = 0; ii < fr_i->n_atoms; ii++) {
for (size_t jj = 0; jj < fr_j->n_multipole_pts; jj++) {
energy += atom_mult_energy(efp, fr_i, fr_j,
ii, jj, &swf);
if (efp->do_gradient) {
atom_mult_grad(efp, fr_i_idx, fr_j_idx,
ii, jj, &swf);
}
}
}
/* mult points - nuclei */
for (size_t jj = 0; jj < fr_j->n_atoms; jj++) {
for (size_t ii = 0; ii < fr_i->n_multipole_pts; ii++) {
struct swf swf2 = swf;
vec_negate(&swf2.cell);
vec_negate(&swf2.dr);
vec_negate(&swf2.dswf);
energy += atom_mult_energy(efp, fr_j, fr_i,
jj, ii, &swf2);
if (efp->do_gradient) {
atom_mult_grad(efp, fr_j_idx, fr_i_idx,
jj, ii, &swf2);
}
}
}
/* mult points - mult points */
for (size_t ii = 0; ii < fr_i->n_multipole_pts; ii++) {
for (size_t jj = 0; jj < fr_j->n_multipole_pts; jj++) {
energy += mult_mult_energy(efp, fr_i_idx, fr_j_idx,
ii, jj, &swf);
if (efp->do_gradient) {
mult_mult_grad(efp, fr_i_idx, fr_j_idx,
ii, jj, &swf);
}
}
}
vec_t force = {
swf.dswf.x * energy,
swf.dswf.y * energy,
swf.dswf.z * energy
};
six_atomic_add_xyz(efp->grad + fr_i_idx, &force);
six_atomic_sub_xyz(efp->grad + fr_j_idx, &force);
efp_add_stress(&swf.dr, &force, &efp->stress);
return energy * swf.swf;
}
static void
rotate_quadrupole(const mat_t *rotmat, const double *in, double *out)
{
double full_in[9], full_out[9];
for (size_t a = 0; a < 3; a++)
for (size_t b = 0; b < 3; b++)
full_in[a * 3 + b] = in[quad_idx(a, b)];
efp_rotate_t2(rotmat, full_in, full_out);
for (size_t a = 0; a < 3; a++)
for (size_t b = 0; b < 3; b++)
out[quad_idx(a, b)] = full_out[a * 3 + b];
}
static void
rotate_octupole(const mat_t *rotmat, const double *in, double *out)
{
double full_in[27], full_out[27];
for (size_t a = 0; a < 3; a++)
for (size_t b = 0; b < 3; b++)
for (size_t c = 0; c < 3; c++) {
size_t idx = 9 * a + 3 * b + c;
full_in[idx] = in[oct_idx(a, b, c)];
}
efp_rotate_t3(rotmat, full_in, full_out);
for (size_t a = 0; a < 3; a++)
for (size_t b = 0; b < 3; b++)
for (size_t c = 0; c < 3; c++) {
size_t idx = 9 * a + 3 * b + c;
out[oct_idx(a, b, c)] = full_out[idx];
}
}
void
efp_update_elec(struct frag *frag)
{
for (size_t i = 0; i < frag->n_multipole_pts; i++) {
const struct multipole_pt *in = frag->lib->multipole_pts + i;
struct multipole_pt *out = frag->multipole_pts + i;
/* move point position */
efp_move_pt(CVEC(frag->x), &frag->rotmat,
CVEC(in->x), VEC(out->x));
/* rotate dipole */
out->dipole = mat_vec(&frag->rotmat, &in->dipole);
/* rotate quadrupole */
rotate_quadrupole(&frag->rotmat,
in->quadrupole, out->quadrupole);
/* correction for Buckingham quadrupoles */
double *quad = out->quadrupole;
double qtr = quad[quad_idx(0, 0)] +
quad[quad_idx(1, 1)] +
quad[quad_idx(2, 2)];
quad[0] = 1.5 * quad[0] - 0.5 * qtr;
quad[1] = 1.5 * quad[1] - 0.5 * qtr;
quad[2] = 1.5 * quad[2] - 0.5 * qtr;
quad[3] = 1.5 * quad[3];
quad[4] = 1.5 * quad[4];
quad[5] = 1.5 * quad[5];
/* rotate octupole */
rotate_octupole(&frag->rotmat, in->octupole, out->octupole);
/* correction for Buckingham octupoles */
double *oct = out->octupole;
double otrx = oct[oct_idx(0, 0, 0)] +
oct[oct_idx(0, 1, 1)] +
oct[oct_idx(0, 2, 2)];
double otry = oct[oct_idx(0, 0, 1)] +
oct[oct_idx(1, 1, 1)] +
oct[oct_idx(1, 2, 2)];
double otrz = oct[oct_idx(0, 0, 2)] +
oct[oct_idx(1, 1, 2)] +
oct[oct_idx(2, 2, 2)];
oct[0] = 2.5 * oct[0] - 1.5 * otrx;
oct[1] = 2.5 * oct[1] - 1.5 * otry;
oct[2] = 2.5 * oct[2] - 1.5 * otrz;
oct[3] = 2.5 * oct[3] - 0.5 * otry;
oct[4] = 2.5 * oct[4] - 0.5 * otrz;
oct[5] = 2.5 * oct[5] - 0.5 * otrx;
oct[6] = 2.5 * oct[6] - 0.5 * otrz;
oct[7] = 2.5 * oct[7] - 0.5 * otrx;
oct[8] = 2.5 * oct[8] - 0.5 * otry;
oct[9] = 2.5 * oct[9];
}
}
static double
compute_ai_elec_frag(struct efp *efp, size_t frag_idx)
{
struct frag *fr_i = efp->frags + frag_idx;
double energy = 0.0;
for (size_t i = 0; i < fr_i->n_atoms; i++) {
for (size_t j = 0; j < efp->n_ptc; j++) {
struct efp_atom *at_i = fr_i->atoms + i;
vec_t dr = vec_sub(CVEC(at_i->x), efp->ptc_xyz + j);
energy += efp_charge_charge_energy(at_i->znuc,
efp->ptc[j], &dr);
}
}
for (size_t i = 0; i < fr_i->n_multipole_pts; i++) {
for (size_t j = 0; j < efp->n_ptc; j++) {
struct multipole_pt *pt_i = fr_i->multipole_pts + i;
vec_t dr = vec_sub(CVEC(pt_i->x), efp->ptc_xyz + j);
/* charge - monopole */
energy += efp_charge_charge_energy(efp->ptc[j],
pt_i->monopole, &dr);
/* charge - dipole */
energy += efp_charge_dipole_energy(efp->ptc[j],
&pt_i->dipole, &dr);
/* charge - quadrupole */
energy += efp_charge_quadrupole_energy(efp->ptc[j],
pt_i->quadrupole, &dr);
/* charge - octupole */
energy += efp_charge_octupole_energy(efp->ptc[j],
pt_i->octupole, &dr);
}
}
return energy;
}
static void
compute_ai_elec_frag_grad(struct efp *efp, size_t frag_idx)
{
struct frag *fr_j = efp->frags + frag_idx;
vec_t force, add_i, add_j, force_, add_i_, add_j_;
for (size_t i = 0; i < efp->n_ptc; i++) {
/* ab initio atom - fragment atoms */
for (size_t k = 0; k < fr_j->n_atoms; k++) {
struct efp_atom *at_j = fr_j->atoms + k;
vec_t dr = vec_sub(CVEC(at_j->x), efp->ptc_xyz + i);
efp_charge_charge_grad(efp->ptc[i], at_j->znuc, &dr,
&force, &add_i, &add_j);
vec_atomic_add(efp->ptc_grad + i, &force);
efp_sub_force(efp->grad + frag_idx, CVEC(fr_j->x),
CVEC(at_j->x), &force, &add_j);
}
/* ab initio atom - fragment multipoles */
for (size_t k = 0; k < fr_j->n_multipole_pts; k++) {
struct multipole_pt *pt_j = fr_j->multipole_pts + k;
vec_t dr = vec_sub(CVEC(pt_j->x), efp->ptc_xyz + i);
force = vec_zero;
add_i = vec_zero;
add_j = vec_zero;
/* monopole */
efp_charge_charge_grad(efp->ptc[i], pt_j->monopole, &dr,
&force_, &add_i_, &add_j_);
add_3(&force, &force_, &add_i, &add_i_,
&add_j, &add_j_);
/* dipole */
efp_charge_dipole_grad(efp->ptc[i], &pt_j->dipole, &dr,
&force_, &add_i_, &add_j_);
add_3(&force, &force_, &add_i, &add_i_,
&add_j, &add_j_);
/* quadrupole */
efp_charge_quadrupole_grad(efp->ptc[i],
pt_j->quadrupole, &dr, &force_, &add_i_, &add_j_);
vec_negate(&add_j_);
add_3(&force, &force_, &add_i, &add_i_,
&add_j, &add_j_);
/* octupole */
efp_charge_octupole_grad(efp->ptc[i], pt_j->octupole,
&dr, &force_, &add_i_, &add_j_);
add_3(&force, &force_, &add_i, &add_i_,
&add_j, &add_j_);
vec_atomic_add(efp->ptc_grad + i, &force);
efp_sub_force(efp->grad + frag_idx, CVEC(fr_j->x),
CVEC(pt_j->x), &force, &add_j);
}
}
}
static void
compute_ai_elec_range(struct efp *efp, size_t from, size_t to, void *data)
{
double energy = 0.0;
(void)data;
#ifdef _OPENMP
#pragma omp parallel for schedule(dynamic) reduction(+:energy)
#endif
for (size_t i = from; i < to; i++) {
energy += compute_ai_elec_frag(efp, i);
if (efp->do_gradient)
compute_ai_elec_frag_grad(efp, i);
}
efp->energy.electrostatic_point_charges += energy;
}
enum efp_result
efp_compute_ai_elec(struct efp *efp)
{
if (!(efp->opts.terms & EFP_TERM_AI_ELEC))
return EFP_RESULT_SUCCESS;
efp_balance_work(efp, compute_ai_elec_range, NULL);
efp_allreduce(&efp->energy.electrostatic_point_charges, 1);
return EFP_RESULT_SUCCESS;
}
|
DFA on 8th Round Encryption.c | #include <stdio.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <omp.h>
#include <inttypes.h>
#include <time.h>
#include "AES.h"
uint8_t ***DFA_round8Phase1(state_t* y, state_t* y_fault, int* SizeKeySet0, int* SizeKeySet1, int* SizeKeySet2, int* SizeKeySet3) {
uint8_t ***KeyHypothesisSet = (uint8_t ***)malloc(4*sizeof(uint8_t **));
KeyHypothesisSet[0] = (uint8_t **)malloc(1000*sizeof(uint8_t *)); // For key k_1,k_8,k_11,k_14
KeyHypothesisSet[1] = (uint8_t **)malloc(1000*sizeof(uint8_t *)); // For key k_2,k_5,k_12,k_15
KeyHypothesisSet[2] = (uint8_t **)malloc(1000*sizeof(uint8_t *)); // For key k_3,k_6,k_9,k_16
KeyHypothesisSet[3] = (uint8_t **)malloc(1000*sizeof(uint8_t *)); // For key k_4,k_7,k_10,k_13
int counter0 = 0;
int counter1 = 0;
int counter2 = 0;
int counter3 = 0;
unsigned long long key = 0;
while (key <= 4294967295u) {
unsigned int key_1 = (key& 0XFF) ;
unsigned int key_2 = ((key& 0XFF00)>>8);
unsigned int key_3 = ((key& 0XFF0000)>>16);
unsigned int key_4 = ((key& 0XFF000000)>>24);
//printf("key_1: %d key_2: %d key_3: %d key_4: %d \n",key_1,key_2,key_3,key_4);
//Column 1
uint8_t f1 = InvSBox((*y)[3][1]^key_4)^InvSBox((*y_fault)[3][1]^key_4); // 14
uint8_t eq_1 = InvSBox((*y)[2][2]^key_3)^InvSBox((*y_fault)[2][2]^key_3); //11
uint8_t eq_2 = InvSBox((*y)[0][0]^key_1)^InvSBox((*y_fault)[0][0]^key_1); //1
uint8_t eq_3 = InvSBox((*y)[1][3]^key_2)^InvSBox((*y_fault)[1][3]^key_2); //8
uint8_t twof1 = xtimes(f1);
uint8_t threef1 = xtimes(f1)^f1;
//printf("test1: %d \n", key);
if (((f1 == eq_1 && twof1 == eq_2) && threef1 == eq_3)) {
printf("test1: %llu \n", key);
KeyHypothesisSet[0][counter0] = (uint8_t*) malloc(4*sizeof(uint8_t));
KeyHypothesisSet[0][counter0][0] = key_1;//1
KeyHypothesisSet[0][counter0][1] = key_2;//8
KeyHypothesisSet[0][counter0][2] = key_3;//11
KeyHypothesisSet[0][counter0][3] = key_4;//14
counter0++;
}
//Column 2
uint8_t f2 = InvSBox((*y)[1][0]^key_2)^InvSBox((*y_fault)[1][0]^key_2); //5
eq_1 = InvSBox((*y)[0][1]^key_1)^InvSBox((*y_fault)[0][1]^key_1); //2
eq_2 = InvSBox((*y)[2][3]^key_3)^InvSBox((*y_fault)[2][3]^key_3); //12
eq_3 = InvSBox((*y)[3][2]^key_4)^InvSBox((*y_fault)[3][2]^key_4); //15
uint8_t twof2 = xtimes(f2);
uint8_t threef2 = xtimes(f2)^f2;
if (((f2 == eq_1 && twof2 == eq_2) && threef2 == eq_3)) {
printf("test2: %llu \n", key);
KeyHypothesisSet[1][counter1] = (uint8_t*) malloc(4*sizeof(uint8_t));
KeyHypothesisSet[1][counter1][0] = key_1;//2
KeyHypothesisSet[1][counter1][1] = key_2;//5
KeyHypothesisSet[1][counter1][2] = key_3;//12
KeyHypothesisSet[1][counter1][3] = key_4;//15
counter1++;
}
//Column 3
uint8_t f3 = InvSBox((*y)[2][0]^key_3)^InvSBox((*y_fault)[2][0]^key_3); //9
eq_1 = InvSBox((*y)[3][3]^key_4)^InvSBox((*y_fault)[3][3]^key_4); //16
eq_2 = InvSBox((*y)[0][2]^key_1)^InvSBox((*y_fault)[0][2]^key_1); //3
eq_3 = InvSBox((*y)[1][1]^key_2)^InvSBox((*y_fault)[1][1]^key_2); //6
uint8_t twof3 = xtimes(f3);
uint8_t threef3 = xtimes(f3)^f3;
if (((f3 == eq_1 && twof3 == eq_2) && threef3 == eq_3)) {
printf("test3: %llu \n", key);
KeyHypothesisSet[2][counter2] = (uint8_t*) malloc(4*sizeof(uint8_t));
KeyHypothesisSet[2][counter2][0] = key_1;//3
KeyHypothesisSet[2][counter2][1] = key_2;//6
KeyHypothesisSet[2][counter2][2] = key_3;//9
KeyHypothesisSet[2][counter2][3] = key_4;//16
counter2++;
}
//Column 4
uint8_t f4 = InvSBox((*y)[0][3]^key_1)^InvSBox((*y_fault)[0][3]^key_1); //4
eq_1 = InvSBox((*y)[1][2]^key_2)^InvSBox((*y_fault)[1][2]^key_2); //7
eq_2 = InvSBox((*y)[2][1]^key_3)^InvSBox((*y_fault)[2][1]^key_3); //10
eq_3 = InvSBox((*y)[3][0]^key_4)^InvSBox((*y_fault)[3][0]^key_4); //13
uint8_t twof4 = xtimes(f4);
uint8_t threef4 = xtimes(f4)^f4;
if (((f4 == eq_1 && twof4 == eq_2) && threef4 == eq_3)) {
printf("test4: %llu \n", key);
KeyHypothesisSet[3][counter3] = (uint8_t*) malloc(4*sizeof(uint8_t));
KeyHypothesisSet[3][counter3][0] = key_1;//4
KeyHypothesisSet[3][counter3][1] = key_2;//7
KeyHypothesisSet[3][counter3][2] = key_3;//10
KeyHypothesisSet[3][counter3][3] = key_4;//13
counter3++;
}
key++;
}
*SizeKeySet0 = counter0;
*SizeKeySet1 = counter1;
*SizeKeySet2 = counter2;
*SizeKeySet3 = counter3;
printf("Size of KeyHypothesisSet0: %d \n", counter0);
printf("Size of KeyHypothesisSet1: %d \n", counter1);
printf("Size of KeyHypothesisSet2: %d \n", counter2);
printf("Size of KeyHypothesisSet3: %d \n", counter3);
return KeyHypothesisSet;
}
uint8_t times9(uint8_t x) {
return xtimes(xtimes(xtimes(x)))^x;
}
uint8_t times11(uint8_t x) {
return xtimes(xtimes(xtimes(x)))^xtimes(x)^x;
}
uint8_t times13(uint8_t x) {
return xtimes(xtimes(xtimes(x)))^xtimes(xtimes(x))^x;
}
uint8_t times14(uint8_t x) {
return xtimes(xtimes(xtimes(x)))^xtimes(xtimes(x))^xtimes(x);
}
uint8_t **equation1and4(state_t* y,state_t* y_fault, uint8_t** Key1Ptr, int* Key1Size, uint8_t** Key2Ptr, int* Key2Size, uint8_t **PartKey, int* PartKeySize, int* SaveSize, int index) {
uint8_t ** Key = (uint8_t **)malloc(5000000*sizeof(uint8_t *));
int KeySize = 0;
int k_1,k_2,part;
for(k_1 = 0; k_1 < *Key1Size; k_1++) {
for(k_2 = 0; k_2 < *Key2Size; k_2++) {
for(part = 0; part < *PartKeySize; part++) {
uint8_t k1 = Key1Ptr[k_1][0];
uint8_t k2 = Key2Ptr[k_2][0];
uint8_t k3 = PartKey[part][0];
uint8_t k4 = PartKey[part][1];
uint8_t k5 = PartKey[part][2];
uint8_t k6 = PartKey[part][3];
uint8_t k7 = PartKey[part][4];
uint8_t k8 = PartKey[part][5];
uint8_t k9 = PartKey[part][6];
uint8_t k10 = PartKey[part][7];
uint8_t k11 = PartKey[part][8];
uint8_t k12 = PartKey[part][9];
uint8_t k13 = PartKey[part][10];
uint8_t k14 = PartKey[part][11];
uint8_t k15 = PartKey[part][12];
uint8_t k16 = PartKey[part][13];
/*printf(" k1: %"SCNd32 " k2: %"SCNd32 " k3: %"SCNd32 " k4: %"SCNd32 " \n", k1, k2, k3, k4);
printf(" k5: %"SCNd32 " k6: %"SCNd32 " k7: %"SCNd32 " k8: %"SCNd32 " \n", k5, k6, k7, k8);
printf(" k9: %"SCNd32 " k10: %"SCNd32 " k11: %"SCNd32 " k12: %"SCNd32 " \n", k9, k10, k11, k12);
printf(" k13: %"SCNd32 " k14: %"SCNd32 " k15: %"SCNd32 " k16: %"SCNd32 " \n", k13, k14, k15, k16);*/
if (Key1Ptr[k_1][1] == k8 && Key1Ptr[k_1][2] == k11 && Key1Ptr[k_1][3] == k14 && Key2Ptr[k_2][1] == k5 && Key2Ptr[k_2][2] == k12 && Key2Ptr[k_2][3] == k15) {
uint8_t eq1 = InvSBox(times14(InvSBox((*y)[0][0] ^ k1) ^ (k1 ^ SBox(k14 ^ k10) ^ Rcon(10)))
^ times11(InvSBox((*y)[3][1] ^ k14) ^ (k2 ^ SBox(k15 ^ k11)))
^ times13(InvSBox((*y)[2][2] ^ k11) ^ (k3 ^ SBox(k16 ^ k12)))
^ times9(InvSBox((*y)[1][3] ^ k8) ^ (k4 ^ SBox(k13 ^ k9))))
^ InvSBox(times14(InvSBox((*y_fault)[0][0] ^ k1) ^ (k1 ^ SBox(k14 ^ k10) ^ Rcon(10)))
^ times11(InvSBox((*y_fault)[3][1] ^ k14) ^ (k2 ^ SBox(k15 ^ k11)))
^ times13(InvSBox((*y_fault)[2][2] ^ k11) ^ (k3 ^ SBox(k16 ^ k12)))
^ times9(InvSBox((*y_fault)[1][3] ^ k8) ^ (k4 ^ SBox(k13 ^ k9))));
uint8_t eq2 = InvSBox(times9(InvSBox((*y)[3][0] ^ k13) ^ (k13 ^ k9))
^ times14(InvSBox((*y)[2][1] ^ k10) ^ (k14 ^ k10))
^ times11(InvSBox((*y)[1][2] ^ k7) ^ (k15 ^ k11))
^ times13(InvSBox((*y)[0][3] ^ k4) ^ (k16 ^ k12)))
^ InvSBox(times9(InvSBox((*y_fault)[3][0] ^ k13) ^ (k13 ^ k9))
^ times14(InvSBox((*y_fault)[2][1] ^ k10) ^ (k14 ^ k10))
^ times11(InvSBox((*y_fault)[1][2] ^ k7) ^ (k15 ^ k11))
^ times13(InvSBox((*y_fault)[0][3] ^ k4) ^ (k16 ^ k12)));
uint8_t eq4 = InvSBox(times11(InvSBox((*y)[1][0] ^ k5) ^ (k5 ^ k1))
^ times13(InvSBox((*y)[0][1] ^ k2) ^ (k6 ^ k2))
^ times9(InvSBox((*y)[3][2] ^ k15) ^ (k7 ^ k3))
^ times14(InvSBox((*y)[2][3] ^ k12) ^ (k8 ^ k4)))
^ InvSBox(times11(InvSBox((*y_fault)[1][0] ^ k5) ^ (k5 ^ k1))
^ times13(InvSBox((*y_fault)[0][1] ^ k2) ^ (k6 ^ k2))
^ times9(InvSBox((*y_fault)[3][2] ^ k15) ^ (k7 ^ k3))
^ times14(InvSBox((*y_fault)[2][3] ^ k12) ^ (k8 ^ k4)));
if ((xtimes(eq2) == eq1) && ((xtimes(eq2) ^ eq2) == eq4)) {
printf("index: %d k_1: %d k_2: %d part: %d KeySize: %d \n", index, k_1, k_2, part, KeySize);
Key[KeySize] = (uint8_t*)malloc(16 * sizeof(uint8_t));
Key[KeySize][0] = k1;
Key[KeySize][1] = k2;
Key[KeySize][2] = k3;
Key[KeySize][3] = k4;
Key[KeySize][4] = k5;
Key[KeySize][5] = k6;
Key[KeySize][6] = k7;
Key[KeySize][7] = k8;
Key[KeySize][8] = k9;
Key[KeySize][9] = k10;
Key[KeySize][10] = k11;
Key[KeySize][11] = k12;
Key[KeySize][12] = k13;
Key[KeySize][13] = k14;
Key[KeySize][14] = k15;
Key[KeySize][15] = k16;
KeySize++;
}
}
}
}
}
*SaveSize = KeySize;
printf("SaveSize inside: %d index: %d \n", *SaveSize, index);
return Key;
}
uint8_t** DFA_round8Phase2_1(state_t* y, state_t* y_fault, uint8_t*** KeyHypothesisSet, int* SizeKeySet0, int* SizeKeySet1, int* SizeKeySet2, int* SizeKeySet3,
int* PartSize, uint8_t** Key11, int* Size11Ptr, uint8_t** Key12, int* Size12Ptr, uint8_t** Key21, int* Size21Ptr, uint8_t** Key22, int* Size22Ptr) {
// Spliting the keys set;
uint8_t** PartialKeySet1 = (uint8_t**)malloc(256 * sizeof(uint8_t*));
uint8_t** PartialKeySet2 = (uint8_t**)malloc(256 * sizeof(uint8_t*));
int i, j, k, l;
int counter11 = 0;
int counter12 = 0;
int counter21 = 0;
int counter22 = 0;
int PartialKeySize1 = 0;
int PartialKeySize2 = 0;
bool duplicate1;
bool duplicate2;
for (i = 0; i < *SizeKeySet0; i++) {
duplicate1 = false;
for (j = 0; j < PartialKeySize1; j++) {
if (((PartialKeySet1[j][0] == KeyHypothesisSet[0][i][1] && PartialKeySet1[j][1] == KeyHypothesisSet[0][i][2])
&& PartialKeySet1[j][2] == KeyHypothesisSet[0][i][3])) {//If k8,k11,;14 are inside PartialKey1, then put the k1 in Key12.
Key12[counter12] = (uint8_t*)malloc(4 * sizeof(uint8_t));
Key12[counter12][0] = KeyHypothesisSet[0][i][0];//1
Key12[counter12][1] = KeyHypothesisSet[0][i][1];//8
Key12[counter12][2] = KeyHypothesisSet[0][i][2];//11
Key12[counter12][3] = KeyHypothesisSet[0][i][3];//14
counter12++;
duplicate1 = true;
}
}
if (duplicate1 == false) {//k8,k11,;14 are not inside PartialKey1 (new partial subkey)
Key11[counter11] = (uint8_t*)malloc(4 * sizeof(uint8_t));
Key11[counter11][0] = KeyHypothesisSet[0][i][0];//1
Key11[counter11][1] = KeyHypothesisSet[0][i][1];//8
Key11[counter11][2] = KeyHypothesisSet[0][i][2];//11
Key11[counter11][3] = KeyHypothesisSet[0][i][3];//14
counter11++;
bool PartDuplicate1 = false;
for (l = 0; l < PartialKeySize1; l++) {// Make sure no duplicates in PartialKeySet1
if (PartialKeySet1[l][0] == KeyHypothesisSet[0][i][1] && PartialKeySet1[l][1] == KeyHypothesisSet[0][i][2] && PartialKeySet1[l][2] == KeyHypothesisSet[0][i][3]) {
printf("There is duplicates in PartialKeySize1");
PartDuplicate1 = true;
}
}
if (PartDuplicate1 == false) {
PartialKeySet1[PartialKeySize1] = (uint8_t*)malloc(3 * sizeof(uint8_t));
PartialKeySet1[PartialKeySize1][0] = KeyHypothesisSet[0][i][1];//8
PartialKeySet1[PartialKeySize1][1] = KeyHypothesisSet[0][i][2];//11
PartialKeySet1[PartialKeySize1][2] = KeyHypothesisSet[0][i][3];//14
PartialKeySize1++;
}
}
}
for (i = 0; i < *SizeKeySet1; i++) {
duplicate2 = false;
for (j = 0; j < PartialKeySize2; j++) {
if (((PartialKeySet2[j][0] == KeyHypothesisSet[1][i][1] && PartialKeySet2[j][1] == KeyHypothesisSet[1][i][2]) &&
PartialKeySet2[j][2] == KeyHypothesisSet[1][i][3])) {//If k8,k11,;14 are inside PartialKey2, then put the k2 in Key22.
Key22[counter22] = (uint8_t*)malloc(4 * sizeof(uint8_t));
Key22[counter22][0] = KeyHypothesisSet[1][i][0];//2
Key22[counter22][1] = KeyHypothesisSet[1][i][1];//5
Key22[counter22][2] = KeyHypothesisSet[1][i][2];//12
Key22[counter22][3] = KeyHypothesisSet[1][i][3];//15
counter22++;
duplicate2 = true;
}
}
if (duplicate2 == false) {
Key21[counter21] = (uint8_t*)malloc(4 * sizeof(uint8_t));
Key21[counter21][0] = KeyHypothesisSet[1][i][0];//2
Key21[counter21][1] = KeyHypothesisSet[1][i][1];//5
Key21[counter21][2] = KeyHypothesisSet[1][i][2];//12
Key21[counter21][3] = KeyHypothesisSet[1][i][3];//15
counter21++;
bool PartDuplicate2 = false;
for (j = 0; j < PartialKeySize2; j++) {// Make sure no duplicates in PartialKeySet2
//printf("flagpart2 j: %d i: %d\n", j, i);
if (PartialKeySet2[j][0] == KeyHypothesisSet[1][i][1] && PartialKeySet2[j][1] == KeyHypothesisSet[1][i][2] && PartialKeySet2[j][2] == KeyHypothesisSet[1][i][3]) {
printf("There is duplicates in PartialKeySize2");
PartDuplicate2 = true;
}
}
if (PartDuplicate2 == false) {
//printf("Help3 flagpart2==false\n");
PartialKeySet2[PartialKeySize2] = (uint8_t*)malloc(3 * sizeof(uint8_t));
PartialKeySet2[PartialKeySize2][0] = KeyHypothesisSet[1][i][1];//5
PartialKeySet2[PartialKeySize2][1] = KeyHypothesisSet[1][i][2];//12
PartialKeySet2[PartialKeySize2][2] = KeyHypothesisSet[1][i][3];//15
PartialKeySize2++;
}
}
}
printf("PartialKeySize1: %d PartialKeySize2: %d \n", PartialKeySize1, PartialKeySize2);
printf("Done with splitting \n");
//Test if it satisfy equation 2 and 3
int counter = 0;
uint8_t** PartKey = (uint8_t**)malloc(33554432u * sizeof(uint8_t*)); //k_3,k_4,k_5,k_6,k_7,k_8,k_9, k_10,k_11,k_12,k_13,k_14,k_15,k_16
int column1, column2, column3, column4;
for (column1 = 0; column1 < PartialKeySize1; column1++) {
for (column2 = 0; column2 < PartialKeySize2; column2++) {
for (column3 = 0; column3 < *SizeKeySet2; column3++) {
for (column4 = 0; column4 < *SizeKeySet3; column4++) {
uint8_t k8 = PartialKeySet1[column1][0];//8
uint8_t k11 = PartialKeySet1[column1][1];//11
uint8_t k14 = PartialKeySet1[column1][2];//14
uint8_t k5 = PartialKeySet2[column2][0];//5
uint8_t k12 = PartialKeySet2[column2][1];//12
uint8_t k15 = PartialKeySet2[column2][2];//15
uint8_t k3 = KeyHypothesisSet[2][column3][0];//3
uint8_t k6 = KeyHypothesisSet[2][column3][1];//6
uint8_t k9 = KeyHypothesisSet[2][column3][2];//9
uint8_t k16 = KeyHypothesisSet[2][column3][3];//16
uint8_t k4 = KeyHypothesisSet[3][column4][0];//4
uint8_t k7 = KeyHypothesisSet[3][column4][1];//7
uint8_t k10 = KeyHypothesisSet[3][column4][2];//10
uint8_t k13 = KeyHypothesisSet[3][column4][3];//13
//printf(" k3: %"SCNd32 " k4: %"SCNd32 " \n", k3, k4);
//printf(" k5: %"SCNd32 " k6: %"SCNd32 " k7: %"SCNd32 " k8: %"SCNd32 " \n", k5, k6, k7, k8);
//printf(" k9: %"SCNd32 " k10: %"SCNd32 " k11: %"SCNd32 " k12: %"SCNd32 " \n", k9, k10, k11, k12);
//printf(" k13: %"SCNd32 " k14: %"SCNd32 " k15: %"SCNd32 " k16: %"SCNd32 " \n", k13, k14, k15, k16);
uint8_t eq2 = InvSBox(times9(InvSBox((*y)[3][0] ^ k13) ^ (k13 ^ k9))
^ times14(InvSBox((*y)[2][1] ^ k10) ^ (k14 ^ k10))
^ times11(InvSBox((*y)[1][2] ^ k7) ^ (k15 ^ k11))
^ times13(InvSBox((*y)[0][3] ^ k4) ^ (k16 ^ k12)))
^ InvSBox(times9(InvSBox((*y_fault)[3][0] ^ k13) ^ (k13 ^ k9))
^ times14(InvSBox((*y_fault)[2][1] ^ k10) ^ (k14 ^ k10))
^ times11(InvSBox((*y_fault)[1][2] ^ k7) ^ (k15 ^ k11))
^ times13(InvSBox((*y_fault)[0][3] ^ k4) ^ (k16 ^ k12)));
uint8_t eq3 = InvSBox(times13(InvSBox((*y)[2][0] ^ k9) ^ (k9 ^ k5))
^ times9(InvSBox((*y)[1][1] ^ k6) ^ (k10 ^ k6))
^ times14(InvSBox((*y)[0][2] ^ k3) ^ (k11 ^ k7))
^ times11(InvSBox((*y)[3][3] ^ k16) ^ (k12 ^ k8)))
^ InvSBox(times13(InvSBox((*y_fault)[2][0] ^ k9) ^ (k9 ^ k5))
^ times9(InvSBox((*y_fault)[1][1] ^ k6) ^ (k10 ^ k6))
^ times14(InvSBox((*y_fault)[0][2] ^ k3) ^ (k11 ^ k7))
^ times11(InvSBox((*y_fault)[3][3] ^ k16) ^ (k12 ^ k8)));
if (eq2 == eq3) {
printf("column1: %d column2: %d column3: %d column4: %d \n", column1, column2, column3, column4);
PartKey[counter] = (uint8_t*)malloc(14 * sizeof(uint8_t));
PartKey[counter][0] = k3;//3
PartKey[counter][1] = k4;//4
PartKey[counter][2] = k5;//5
PartKey[counter][3] = k6;//6
PartKey[counter][4] = k7;//7
PartKey[counter][5] = k8;//8
PartKey[counter][6] = k9;//9
PartKey[counter][7] = k10;//10
PartKey[counter][8] = k11;//11
PartKey[counter][9] = k12;//12
PartKey[counter][10] = k13;//13
PartKey[counter][11] = k14;//14
PartKey[counter][12] = k15;//15
PartKey[counter][13] = k16;//16
counter++;
}
}
}
}
}
printf("Phase2.1 Intensive Part ends \n");
/// Free PartialKey1 and PartialKey2 and KeyHypothesis
for (k = 0; k < PartialKeySize1; k++) {
free(PartialKeySet1[k]);
}
for (k = 0; k < PartialKeySize2; k++) {
free(PartialKeySet2[k]);
}
free(PartialKeySet1);
free(PartialKeySet2);
//Allocate all the size
*PartSize = counter;
*Size11Ptr = counter11;
*Size12Ptr = counter12;
*Size21Ptr = counter21;
*Size22Ptr = counter22;
return PartKey;
}
uint8_t** DFA_round8Phase2_2(state_t* y, state_t* y_fault, uint8_t** PartKey, int* PartSize,
uint8_t** Key11, int* Size11, uint8_t** Key12, int* Size12, uint8_t** Key21, int* Size21, uint8_t** Key22, int* Size22,int* FinalSize) {
uint8_t **Results1 = (uint8_t**)malloc(50000 * sizeof(uint8_t*));
uint8_t **Results2 = (uint8_t**)malloc(50000 * sizeof(uint8_t*));
uint8_t **Results3 = (uint8_t**)malloc(50000 * sizeof(uint8_t*));
uint8_t **Results4 = (uint8_t**)malloc(50000 * sizeof(uint8_t*));
int SaveSize1 = 0;
int SaveSize2 = 0;
int SaveSize3 = 0;
int SaveSize4 = 0;
int* SaveSizePtr1, * SaveSizePtr2, * SaveSizePtr3, * SaveSizePtr4;
SaveSizePtr1 = &SaveSize1;
SaveSizePtr2 = &SaveSize2;
SaveSizePtr3 = &SaveSize3;
SaveSizePtr4 = &SaveSize4;
//Parrallel Program second test
omp_set_num_threads(4);
#pragma omp parallel
{ printf("Help12 num of thread: %d \n", omp_get_num_threads());
if (omp_get_thread_num() == 0) {
Results1 = equation1and4(y, y_fault, Key11, Size11, Key21, Size21, PartKey, PartSize, SaveSizePtr1, 0);
}
if (omp_get_thread_num() == 1) {
Results2 = equation1and4(y, y_fault, Key12, Size12, Key21, Size21, PartKey, PartSize, SaveSizePtr2, 1);
}
if (omp_get_thread_num() == 2) {
Results3 = equation1and4(y, y_fault, Key11, Size11, Key22, Size22, PartKey, PartSize, SaveSizePtr3, 2);
}
if (omp_get_thread_num() == 3) {
Results4 = equation1and4(y, y_fault, Key12, Size12, Key22, Size22, PartKey, PartSize, SaveSizePtr4, 3);
}
}
//Free PartKey
int i,j;
for (i = 0; i < *PartSize; i++) {
free(PartKey[i]);
}
free(PartKey);
printf("SaveSize1 outside: %d \n", *SaveSizePtr1);
printf("SaveSize2 outside: %d \n", *SaveSizePtr2);
printf("SaveSize3 outside: %d \n", *SaveSizePtr3);
printf("SaveSize4 outside: %d \n", *SaveSizePtr4);
//Consolidate into one Final Key Set
uint8_t **FinalKey = (uint8_t **)malloc(5000000 *sizeof(uint8_t *));
int FinalCounter = 0;
for (i = 0; i < SaveSize1; i++) {
bool flag1 = false;
for (j = 0; j < FinalCounter; j++) {
if (FinalKey[j][0] == Results1[i][0] && FinalKey[j][1] == Results1[i][1] && FinalKey[j][2] == Results1[i][2] && FinalKey[j][3] == Results1[i][3] &&
FinalKey[j][4] == Results1[i][4] && FinalKey[j][5] == Results1[i][5] && FinalKey[j][6] == Results1[i][6] && FinalKey[j][7] == Results1[i][7] &&
FinalKey[j][8] == Results1[i][8] && FinalKey[j][9] == Results1[i][9] && FinalKey[j][10] == Results1[i][10] && FinalKey[j][11] == Results1[i][11] &&
FinalKey[j][12] == Results1[i][12] && FinalKey[j][13] == Results1[i][13] && FinalKey[j][14] == Results1[i][14] && FinalKey[j][15] == Results1[i][15]) {
printf("There is duplicates in Result1");
flag1 = true;
}
}
if (flag1 == false) {
FinalKey[FinalCounter] = Results1[i];
FinalCounter++;
}
}
printf("FinalCounter1: %d \n", FinalCounter);
//printf("Help19 \n");
for (i = 0; i < SaveSize2; i++) {
bool flag1 = false;
for (j = 0; j < FinalCounter; j++) {
if (FinalKey[j][0] == Results2[i][0] && FinalKey[j][1] == Results2[i][1] && FinalKey[j][2] == Results2[i][2] && FinalKey[j][3] == Results2[i][3] &&
FinalKey[j][4] == Results2[i][4] && FinalKey[j][5] == Results2[i][5] && FinalKey[j][6] == Results2[i][6] && FinalKey[j][7] == Results2[i][7] &&
FinalKey[j][8] == Results2[i][8] && FinalKey[j][9] == Results2[i][9] && FinalKey[j][10] == Results2[i][10] && FinalKey[j][11] == Results2[i][11] &&
FinalKey[j][12] == Results2[i][12] && FinalKey[j][13] == Results2[i][13] && FinalKey[j][14] == Results2[i][14] && FinalKey[j][15] == Results2[i][15]) {
printf("There is duplicates in Result2");
flag1 = true;
}
}
if (flag1 == false) {
FinalKey[FinalCounter] = Results2[i];
FinalCounter++;
}
}
printf("FinalCounter2: %d \n", FinalCounter);
for (i = 0; i < SaveSize3; i++) {
bool flag1 = false;
for (j = 0; j < FinalCounter; j++) {
if (FinalKey[j][0] == Results3[i][0] && FinalKey[j][1] == Results3[i][1] && FinalKey[j][2] == Results3[i][2] && FinalKey[j][3] == Results3[i][3] &&
FinalKey[j][4] == Results3[i][4] && FinalKey[j][5] == Results3[i][5] && FinalKey[j][6] == Results3[i][6] && FinalKey[j][7] == Results3[i][7] &&
FinalKey[j][8] == Results3[i][8] && FinalKey[j][9] == Results3[i][9] && FinalKey[j][10] == Results3[i][10] && FinalKey[j][11] == Results3[i][11] &&
FinalKey[j][12] == Results3[i][12] && FinalKey[j][13] == Results3[i][13] && FinalKey[j][14] == Results3[i][14] && FinalKey[j][15] == Results3[i][15]) {
printf("There is duplicates in Result3");
flag1 = true;
}
}
if (flag1 == false) {
FinalKey[FinalCounter] = Results3[i];
FinalCounter++;
}
}
printf("FinalCounter3: %d \n", FinalCounter);
for (i = 0; i < SaveSize4; i++) {
bool flag1 = false;
for (j = 0; j < FinalCounter; j++) {
if (FinalKey[j][0] == Results4[i][0] && FinalKey[j][1] == Results4[i][1] && FinalKey[j][2] == Results4[i][2] && FinalKey[j][3] == Results4[i][3] &&
FinalKey[j][4] == Results4[i][4] && FinalKey[j][5] == Results4[i][5] && FinalKey[j][6] == Results4[i][6] && FinalKey[j][7] == Results4[i][7] &&
FinalKey[j][8] == Results4[i][8] && FinalKey[j][9] == Results4[i][9] && FinalKey[j][10] == Results4[i][10] && FinalKey[j][11] == Results4[i][11] &&
FinalKey[j][12] == Results4[i][12] && FinalKey[j][13] == Results4[i][13] && FinalKey[j][14] == Results4[i][14] && FinalKey[j][15] == Results4[i][15]) {
printf("There is duplicates in Result4");
flag1 = true;
}
}
if (flag1 == false) {
FinalKey[FinalCounter] = Results4[i];
FinalCounter++;
}
}
//printf("Help20 \n");
*FinalSize = FinalCounter;
printf("FinalSize Inside: %d \n", FinalCounter);
return FinalKey;
}
void WriteFileKeyHypothesis(uint8_t*** KeyHypothesis, int* SizeKeySet0, int* SizeKeySet1, int* SizeKeySet2, int* SizeKeySet3, FILE* fptr) {
int i;
fprintf(fptr, "%d\n", *SizeKeySet0);
fprintf(fptr, "%d\n", *SizeKeySet1);
fprintf(fptr, "%d\n", *SizeKeySet2);
fprintf(fptr, "%d\n", *SizeKeySet3);
int Size0 = *SizeKeySet0;
for (i = 0; i < Size0 ; i++) {
fprintf(fptr, "%"SCNd32"\n", KeyHypothesis[0][i][0]);
fprintf(fptr, "%"SCNd32"\n", KeyHypothesis[0][i][1]);
fprintf(fptr, "%"SCNd32"\n", KeyHypothesis[0][i][2]);
fprintf(fptr, "%"SCNd32"\n", KeyHypothesis[0][i][3]);
}
int Size1 = *SizeKeySet1;
for (i = 0; i < Size1; i++) {
fprintf(fptr, "%"SCNd32"\n", KeyHypothesis[1][i][0]);
fprintf(fptr, "%"SCNd32"\n", KeyHypothesis[1][i][1]);
fprintf(fptr, "%"SCNd32"\n", KeyHypothesis[1][i][2]);
fprintf(fptr, "%"SCNd32"\n", KeyHypothesis[1][i][3]);
}
int Size2 = *SizeKeySet2;
for (i = 0; i < Size2; i++) {
fprintf(fptr, "%"SCNd32"\n", KeyHypothesis[2][i][0]);
fprintf(fptr, "%"SCNd32"\n", KeyHypothesis[2][i][1]);
fprintf(fptr, "%"SCNd32"\n", KeyHypothesis[2][i][2]);
fprintf(fptr, "%"SCNd32"\n", KeyHypothesis[2][i][3]);
}
int Size3 = *SizeKeySet3;
for (i = 0; i < Size3; i++) {
fprintf(fptr, "%"SCNd32"\n", KeyHypothesis[3][i][0]);
fprintf(fptr, "%"SCNd32"\n", KeyHypothesis[3][i][1]);
fprintf(fptr, "%"SCNd32"\n", KeyHypothesis[3][i][2]);
fprintf(fptr, "%"SCNd32"\n", KeyHypothesis[3][i][3]);
}
}
void ReadFileKeyHypothesis(uint8_t*** KeyHypothesisSet, int* SizeKeySet0,int *SizeKeySet1, int *SizeKeySet2, int *SizeKeySet3 ,FILE* fptr) {
fscanf_s(fptr, "%d", SizeKeySet0);
printf("Size0: %d \n", *SizeKeySet0);
fscanf_s(fptr, "%d", SizeKeySet1);
printf("Size1: %d \n", *SizeKeySet1);
fscanf_s(fptr, "%d", SizeKeySet2);
printf("Size2: %d \n", *SizeKeySet2);
fscanf_s(fptr, "%d", SizeKeySet3);
printf("Size3: %d \n", *SizeKeySet3);
printf("Time to start reading. \n");
int i, j, k, l;
for (i = 0; i < *SizeKeySet0; i++) {
KeyHypothesisSet[0][i] = (uint8_t*)malloc(512 * sizeof(uint8_t));
//printf("i: %d \n", i);
fscanf_s(fptr, "%"SCNu8, &KeyHypothesisSet[0][i][0]);
//printf("%"SCNu8 "\n", KeyHypothesisSet[0][i][0]);
fscanf_s(fptr, "%"SCNu8, &KeyHypothesisSet[0][i][1]);
//printf("%"SCNu8"\n", KeyHypothesisSet[0][i][1]);
fscanf_s(fptr, "%"SCNu8, &KeyHypothesisSet[0][i][2]);
//printf("%"SCNu8"\n", KeyHypothesisSet[0][i][2]);
fscanf_s(fptr, "%"SCNu8, &KeyHypothesisSet[0][i][3]);
//printf("%"SCNu8"\n", KeyHypothesisSet[0][i][3]);
}
for (j = 0; j < *SizeKeySet1; j++) {
//printf("j: %d \n", j);
KeyHypothesisSet[1][j] = (uint8_t*)malloc(512 * sizeof(uint8_t));
fscanf_s(fptr, "%"SCNu8, &KeyHypothesisSet[1][j][0]);
fscanf_s(fptr, "%"SCNu8, &KeyHypothesisSet[1][j][1]);
fscanf_s(fptr, "%"SCNu8, &KeyHypothesisSet[1][j][2]);
fscanf_s(fptr, "%"SCNu8, &KeyHypothesisSet[1][j][3]);
}
for (k = 0; k < *SizeKeySet2; k++) {
//printf("k: %d \n", k);
KeyHypothesisSet[2][k] = (uint8_t*)malloc(512 * sizeof(uint8_t));
fscanf_s(fptr, "%"SCNu8, &KeyHypothesisSet[2][k][0]);
fscanf_s(fptr, "%"SCNu8, &KeyHypothesisSet[2][k][1]);
fscanf_s(fptr, "%"SCNu8, &KeyHypothesisSet[2][k][2]);
fscanf_s(fptr, "%"SCNu8, &KeyHypothesisSet[2][k][3]);
}
for (l = 0; l < *SizeKeySet3; l++) {
//printf("l: %d \n", l);
KeyHypothesisSet[3][l] = (uint8_t*)malloc(512 * sizeof(uint8_t));
fscanf_s(fptr, "%"SCNu8, &KeyHypothesisSet[3][l][0]);
//printf("%"SCNu8"\n", KeyHypothesisSet[3][l][0]);
fscanf_s(fptr, "%"SCNu8, &KeyHypothesisSet[3][l][1]);
//printf("%"SCNu8"\n", KeyHypothesisSet[3][l][1]);
fscanf_s(fptr, "%"SCNu8, &KeyHypothesisSet[3][l][2]);
//printf("%"SCNu8"\n", KeyHypothesisSet[3][l][2]);
fscanf_s(fptr, "%"SCNu8, &KeyHypothesisSet[3][l][3]);
//printf("%"SCNu8"\n", KeyHypothesisSet[3][l][3]);
}
}
void WriteFilePartKey(uint8_t** PartKey, int* PartSize, FILE* fptr) {
int i;
fprintf(fptr, "%d\n", *PartSize);
int Size = *PartSize;
for (i = 0; i < Size; i++) {
fprintf(fptr, "%"SCNd32"\n", PartKey[i][0]);
fprintf(fptr, "%"SCNd32"\n", PartKey[i][1]);
fprintf(fptr, "%"SCNd32"\n", PartKey[i][2]);
fprintf(fptr, "%"SCNd32"\n", PartKey[i][3]);
fprintf(fptr, "%"SCNd32"\n", PartKey[i][4]);
fprintf(fptr, "%"SCNd32"\n", PartKey[i][5]);
fprintf(fptr, "%"SCNd32"\n", PartKey[i][6]);
fprintf(fptr, "%"SCNd32"\n", PartKey[i][7]);
fprintf(fptr, "%"SCNd32"\n", PartKey[i][8]);
fprintf(fptr, "%"SCNd32"\n", PartKey[i][9]);
fprintf(fptr, "%"SCNd32"\n", PartKey[i][10]);
fprintf(fptr, "%"SCNd32"\n", PartKey[i][11]);
fprintf(fptr, "%"SCNd32"\n", PartKey[i][12]);
fprintf(fptr, "%"SCNd32"\n", PartKey[i][13]);
}
}
void ReadFilePartKey(uint8_t** PartKey, int* PartSize, FILE* fptr) {
//Note: The PartSize is read outside for PartKey to be initialised.
printf("Time to start reading. \n");
int i;
for (i = 0; i < *PartSize; i++) {
//printf("i: %d \n", i);
PartKey[i] = (uint8_t*)malloc(14 * sizeof(uint8_t));
fscanf_s(fptr, "%"SCNu8, &PartKey[i][0]);
fscanf_s(fptr, "%"SCNu8, &PartKey[i][1]);
fscanf_s(fptr, "%"SCNu8, &PartKey[i][2]);
fscanf_s(fptr, "%"SCNu8, &PartKey[i][3]);
fscanf_s(fptr, "%"SCNu8, &PartKey[i][4]);
fscanf_s(fptr, "%"SCNu8, &PartKey[i][5]);
fscanf_s(fptr, "%"SCNu8, &PartKey[i][6]);
fscanf_s(fptr, "%"SCNu8, &PartKey[i][7]);
fscanf_s(fptr, "%"SCNu8, &PartKey[i][8]);
fscanf_s(fptr, "%"SCNu8, &PartKey[i][9]);
fscanf_s(fptr, "%"SCNu8, &PartKey[i][10]);
fscanf_s(fptr, "%"SCNu8, &PartKey[i][11]);
fscanf_s(fptr, "%"SCNu8, &PartKey[i][12]);
fscanf_s(fptr, "%"SCNu8, &PartKey[i][13]);
}
}
void WriteFileFinalKey(uint8_t** FinalKey, int* FinalSize, FILE* fptr) {
int i;
fprintf(fptr, "%d\n", *FinalSize);
int Size = *FinalSize;
for (i = 0; i < Size; i++) {
fprintf(fptr, "%"SCNd32"\n", FinalKey[i][0]);
fprintf(fptr, "%"SCNd32"\n", FinalKey[i][1]);
fprintf(fptr, "%"SCNd32"\n", FinalKey[i][2]);
fprintf(fptr, "%"SCNd32"\n", FinalKey[i][3]);
fprintf(fptr, "%"SCNd32"\n", FinalKey[i][4]);
fprintf(fptr, "%"SCNd32"\n", FinalKey[i][5]);
fprintf(fptr, "%"SCNd32"\n", FinalKey[i][6]);
fprintf(fptr, "%"SCNd32"\n", FinalKey[i][7]);
fprintf(fptr, "%"SCNd32"\n", FinalKey[i][8]);
fprintf(fptr, "%"SCNd32"\n", FinalKey[i][9]);
fprintf(fptr, "%"SCNd32"\n", FinalKey[i][10]);
fprintf(fptr, "%"SCNd32"\n", FinalKey[i][11]);
fprintf(fptr, "%"SCNd32"\n", FinalKey[i][12]);
fprintf(fptr, "%"SCNd32"\n", FinalKey[i][13]);
fprintf(fptr, "%"SCNd32"\n", FinalKey[i][14]);
fprintf(fptr, "%"SCNd32"\n", FinalKey[i][15]);
}
}
void ReadFileFinalKey(uint8_t** FinalKey, int* FinalSize, FILE* fptr) {
//Note: The FinalSize is read outside for FinalKey to be initialised.
printf("Time to start reading. \n");
int i;
for (i = 0; i < *FinalSize; i++) {
FinalKey[i] = (uint8_t*)malloc(14 * sizeof(uint8_t));
fscanf_s(fptr, "%"SCNu8, &FinalKey[i][0]);
fscanf_s(fptr, "%"SCNu8, &FinalKey[i][1]);
fscanf_s(fptr, "%"SCNu8, &FinalKey[i][2]);
fscanf_s(fptr, "%"SCNu8, &FinalKey[i][3]);
fscanf_s(fptr, "%"SCNu8, &FinalKey[i][4]);
fscanf_s(fptr, "%"SCNu8, &FinalKey[i][5]);
fscanf_s(fptr, "%"SCNu8, &FinalKey[i][6]);
fscanf_s(fptr, "%"SCNu8, &FinalKey[i][7]);
fscanf_s(fptr, "%"SCNu8, &FinalKey[i][8]);
fscanf_s(fptr, "%"SCNu8, &FinalKey[i][9]);
fscanf_s(fptr, "%"SCNu8, &FinalKey[i][10]);
fscanf_s(fptr, "%"SCNu8, &FinalKey[i][11]);
fscanf_s(fptr, "%"SCNu8, &FinalKey[i][12]);
fscanf_s(fptr, "%"SCNu8, &FinalKey[i][13]);
fscanf_s(fptr, "%"SCNu8, &FinalKey[i][14]);
fscanf_s(fptr, "%"SCNu8, &FinalKey[i][15]);
}
}
void WriteFileKeyArray(uint8_t** Key, int* SizePtr, FILE* fptr) {
int i;
fprintf(fptr, "%d\n", *SizePtr);
for (i = 0; i < *SizePtr; i++) {
fprintf(fptr, "%"SCNd32"\n", Key[i][0]);
fprintf(fptr, "%"SCNd32"\n", Key[i][1]);
fprintf(fptr, "%"SCNd32"\n", Key[i][2]);
fprintf(fptr, "%"SCNd32"\n", Key[i][3]);
}
}
void ReadFileKeyArray(uint8_t** Key, int* SizePtr, FILE* fptr) {
//Note: The FinalSize is read outside for FinalKey to be initialised.
printf("Time to start reading. \n");
int i;
for (i = 0; i < *SizePtr; i++) {
Key[i] = (uint8_t*)malloc(4 * sizeof(uint8_t));
fscanf_s(fptr, "%"SCNu8, &Key[i][0]);
fscanf_s(fptr, "%"SCNu8, &Key[i][1]);
fscanf_s(fptr, "%"SCNu8, &Key[i][2]);
fscanf_s(fptr, "%"SCNu8, &Key[i][3]);
}
}
int main() {
state_t MasterKey = {{0x68, 0x98, 0x10, 0xd4},{0xd5, 0x30, 0x5b, 0xa5},{0x20, 0x8c, 0xbc, 0xd3},{0xab, 0x3c, 0x83, 0x53}};
state_t* MasKeyPtr = &MasterKey;
state_t matrix1 = { {0x68, 0x98, 0x16, 0xd4},{0xd5, 0x30, 0x36, 0xa5},{0x00, 0x8c, 0xbc, 0xd3},{0xbb, 0x3c, 0x83, 0x53} };
state_t matrix2 = { {0x68, 0x98, 0x16, 0xd4},{0xd5, 0x30, 0x36, 0xa5},{0x00, 0x8c, 0xbc, 0xd3},{0xbb, 0x3c, 0x83, 0x53} };
state_t* ptrMatrix1 = &matrix1;
state_t* ptrMatrix2 = &matrix2;
printf("Original Text1: \n");
PrintMatrix(ptrMatrix1);
state_t* ciphertext;
ciphertext = AESEncryption(ptrMatrix1, MasKeyPtr);
printf("Encrypted Text: \n");
PrintMatrix(ciphertext);
state_t* ciphertextFaulty;
ciphertextFaulty = AESEncryptionFaultyROUND8(ptrMatrix2, MasKeyPtr);
printf("Faulty Encrypted Text: \n");
PrintMatrix(ciphertextFaulty);
int* SizeKeySet0, *SizeKeySet1, *SizeKeySet2, *SizeKeySet3;
int Size0 = 0;
int Size1 = 0;
int Size2 = 0;
int Size3 = 0;
SizeKeySet0 = &Size0;
SizeKeySet1 = &Size1;
SizeKeySet2 = &Size2;
SizeKeySet3 = &Size3;
FILE* fptr_time;
errno_t err_time;
err_time = fopen_s(&fptr_time, "C:\\Users\\tempacc\\Documents\\NTU\\Test File\\Test2\\Time.txt", "w+");
/************************************************ PHASE 1 **********************************************************************/
printf("Going into DFAPhase1: \n");
clock_t begin_1 = clock();
uint8_t ***KeyHypothesisSet = DFA_round8Phase1(ciphertext, ciphertextFaulty, SizeKeySet0, SizeKeySet1, SizeKeySet2, SizeKeySet3);
clock_t end_1 = clock();
double time_spent_1 = (double)(end_1 - begin_1) / CLOCKS_PER_SEC;
printf("time in sec: %f \n", time_spent_1);
printf("Going out of DFAPhase1: \n");
printf("Size0 Outside: %d \n", Size0);
printf("Size1 Outside: %d \n", Size1);
printf("Size2 Outside: %d \n", Size2);
printf("Size3 Outside: %d \n", Size3);
fprintf(fptr_time, "Time spent 1: %lf \n", time_spent_1);
FILE* fptr;
errno_t err;
err = fopen_s(&fptr, "C:\\Users\\tempacc\\Documents\\NTU\\Test File\\Test2\\Phase1KeyHypothesis.txt", "w");
if (err == 0) {
printf("The file'Phase1KeyHypothesis.txt was opened\n");
}
else {
printf("The file 'Phase1KeyHypothesis.txt' was not opened\n");
}
WriteFileKeyHypothesis(KeyHypothesisSet, SizeKeySet0, SizeKeySet1, SizeKeySet2, SizeKeySet3, fptr);
fclose(fptr);
/************************************************ PHASE 2.1 **********************************************************************/
/************************************************ Initialise KeyHypothesisSet **********************************************************************/
//uint8_t*** KeyHypothesisSet = (uint8_t***)malloc(4 * sizeof(uint8_t**));
//KeyHypothesisSet[0] = (uint8_t**)malloc(1000 * sizeof(uint8_t*)); // For key k_1,k_8,k_11,k_14
//KeyHypothesisSet[1] = (uint8_t**)malloc(1000 * sizeof(uint8_t*)); // For key k_2,k_5,k_12,k_15
//KeyHypothesisSet[2] = (uint8_t**)malloc(1000 * sizeof(uint8_t*)); // For key k_3,k_6,k_9,k_16
//KeyHypothesisSet[3] = (uint8_t**)malloc(1000 * sizeof(uint8_t*)); // For key k_4,k_7,k_10,k_13
///************************************************* Read file for KeyHypothesisSet **********************************************/
//FILE* fptr;
//errno_t err;
//err = fopen_s(&fptr, "C:\\Users\\tempacc\\Documents\\NTU\\Test File\\Test2\\Phase1KeyHypothesis.txt", "r");
//if (err == 0) {
// printf("The file'Phase1KeyHypothesis.txt was opened\n");
//} else {
// printf("The file 'Phase1KeyHypothesis.txt' was not opened\n");
//}
//ReadFileKeyHypothesis(KeyHypothesisSet, SizeKeySet0, SizeKeySet1, SizeKeySet2, SizeKeySet3, fptr);
//fclose(fptr);
///************************************************ Apply PHASE 2.1 **********************************************************************/
printf("Going into DFAPhase2_1: \n");
uint8_t** Key11 = (uint8_t**)malloc(256 * sizeof(uint8_t*));;//Put outside
uint8_t** Key12 = (uint8_t**)malloc(256 * sizeof(uint8_t*));;
uint8_t** Key21 = (uint8_t**)malloc(256 * sizeof(uint8_t*));;
uint8_t** Key22 = (uint8_t**)malloc(256 * sizeof(uint8_t*));;
int* Size11Ptr, * Size12Ptr, * Size21Ptr, * Size22Ptr;
int Size11 = 0;
int Size12 = 0;
int Size21 = 0;
int Size22 = 0;
Size11Ptr = &Size11;
Size12Ptr = &Size12;
Size21Ptr = &Size21;
Size22Ptr = &Size22;
int* PartSize;
int Part = 0;
PartSize = &Part;
clock_t begin_2 = clock();
uint8_t **PartKey = DFA_round8Phase2_1(ciphertext, ciphertextFaulty, KeyHypothesisSet, SizeKeySet0, SizeKeySet1, SizeKeySet2, SizeKeySet3, PartSize,
Key11, Size11Ptr, Key12, Size12Ptr, Key21, Size21Ptr, Key22, Size22Ptr);
clock_t end_2 = clock();
double time_spent_2 = (double)(end_2 - begin_2) / CLOCKS_PER_SEC;
printf("time in sec: %f", time_spent_2);
fprintf(fptr_time, "Time spent 2: %f \n", time_spent_2);
printf(" PartSize: %d \n", *PartSize);
printf("Going out of DFAPhase2_1: \n");
//Freeing KeyHypothesisSet
int i;
for (i = 0; i < *SizeKeySet0; i++) {
free(KeyHypothesisSet[0][i]);
}
for (i = 0; i < *SizeKeySet1; i++) {
free(KeyHypothesisSet[1][i]);
}
for (i = 0; i < *SizeKeySet2; i++) {
free(KeyHypothesisSet[2][i]);
}
for (i = 0; i < *SizeKeySet3; i++) {
free(KeyHypothesisSet[3][i]);
}
free(KeyHypothesisSet[0]);
free(KeyHypothesisSet[1]);
free(KeyHypothesisSet[2]);
free(KeyHypothesisSet[3]);
free(KeyHypothesisSet);
printf("Size11Ptr: %d Size12Ptr: %d Size21Ptr: %d Size22Ptr: %d \n", *Size11Ptr, *Size12Ptr, *Size21Ptr, *Size22Ptr);
///************************************************* Write file for PartKey **********************************************/
FILE* fptr2;
errno_t err2;
err2 = fopen_s(&fptr2, "C:\\Users\\tempacc\\Documents\\NTU\\Test File\\Test2\\Phase2PartKey.txt", "w");
if (err2 == 0) {
printf("The file'Phase2PartKey.txt was opened\n");
} else {
printf("The file 'Phase2PartKey.txt' was not opened\n");
}
WriteFilePartKey(PartKey, PartSize, fptr2);
fclose(fptr2);
printf("The file 'Phase2PartKey.txt' closed\n");
/************************************************* Write file for Key11, Key12, Key21 and Key22 **********************************************/
FILE* fptr2_11;
errno_t err2_11;
err2_11 = fopen_s(&fptr2_11, "C:\\Users\\tempacc\\Documents\\NTU\\Test File\\Test2\\Key11.txt", "w");
if (err2_11 == 0) {
printf("The file'Key11.txt' was opened\n");
} else {
printf("The file 'Key11.txt' was not opened\n");
}
WriteFileKeyArray(Key11, Size11Ptr, fptr2_11);
fclose(fptr2_11);
printf("The file 'Key11.txt' closed\n");
FILE* fptr2_12;
errno_t err2_12;
err2_12 = fopen_s(&fptr2_12, "C:\\Users\\tempacc\\Documents\\NTU\\Test File\\Test2\\Key12.txt", "w");
if (err2_12 == 0) {
printf("The file'Key12.txt' was opened\n");
}
else {
printf("The file 'Key12.txt' was not opened\n");
}
WriteFileKeyArray(Key12, Size12Ptr, fptr2_12);
fclose(fptr2_12);
printf("The file 'Key12.txt' closed\n");
FILE* fptr2_21;
errno_t err2_21;
err2_21 = fopen_s(&fptr2_21, "C:\\Users\\tempacc\\Documents\\NTU\\Test File\\Test2\\Key21.txt", "w");
if (err2_21 == 0) {
printf("The file'Key21.txt' was opened\n");
}
else {
printf("The file 'Key21.txt' was not opened\n");
}
WriteFileKeyArray(Key21, Size21Ptr, fptr2_21);
fclose(fptr2_21);
printf("The file 'Key21.txt' closed\n");
FILE* fptr2_22;
errno_t err2_22;
err2_22 = fopen_s(&fptr2_22, "C:\\Users\\tempacc\\Documents\\NTU\\Test File\\Test2\\Key22.txt", "w");
if (err2_22 == 0) {
printf("The file'Key22.txt' was opened\n");
}
else {
printf("The file 'Key22.txt' was not opened\n");
}
WriteFileKeyArray(Key22, Size22Ptr, fptr2_22);
fclose(fptr2_22);
printf("The file 'Key22.txt' closed\n");
/************************************************ PHASE 2.2 **********************************************************************/
/************************************************* Read file for PartKey **********************************************/
//FILE* fptr3;
//errno_t err3;
//err3 = fopen_s(&fptr3, "C:\\Users\\tempacc\\Documents\\NTU\\Test File\\Test2\\Phase2PartKey.txt", "r");
//if (err3 == 0) {
// printf("The file'Phase2PartKey.txt was opened\n");
//}
//else {
// printf("The file 'Phase2PartKey.txt' was not opened\n");
//}
//int* PartSize;
//int Part = 0;
//PartSize = &Part;
//fscanf_s(fptr3, "%d", PartSize);
//printf("PartSize: %d \n", *PartSize);
//uint8_t** PartKey= (uint8_t**)malloc((*PartSize) * sizeof(uint8_t*));
//ReadFilePartKey(PartKey,PartSize,fptr3);
//fclose(fptr3);
///************************************************* Read file for Key11**********************************************/
//FILE* fptr3_11;
//errno_t err3_11;
//err3_11 = fopen_s(&fptr3_11, "C:\\Users\\tempacc\\Documents\\NTU\\Test File\\Test2\\Key11.txt", "r");
//if (err3_11 == 0) {
// printf("The file'Key11.txt.txt was opened\n");
//}
//else {
// printf("The file 'Key11.txt.txt' was not opened\n");
//}
//int* Size11Ptr;
//int Size11 = 0;
//Size11Ptr = &Size11;
//fscanf_s(fptr3_11, "%d", Size11Ptr);
//printf("Size11: %d \n", *Size11Ptr);
//
//uint8_t** Key11 = (uint8_t**)malloc(Size11 * sizeof(uint8_t*));
//ReadFileKeyArray(Key11, Size11Ptr, fptr3_11);
//fclose(fptr3_11);
///************************************************* Read file for Key12**********************************************/
//FILE* fptr3_12;
//errno_t err3_12;
//err3_12 = fopen_s(&fptr3_12, "C:\\Users\\tempacc\\Documents\\NTU\\Test File\\Test2\\Key12.txt", "r");
//if (err3_12 == 0) {
// printf("The file'Key12.txt.txt was opened\n");
//}
//else {
// printf("The file 'Key12.txt.txt' was not opened\n");
//}
//int* Size12Ptr;
//int Size12 = 0;
//Size12Ptr = &Size12;
//fscanf_s(fptr3_12, "%d", Size12Ptr);
//printf("Size12: %d \n", *Size12Ptr);
//uint8_t** Key12 = (uint8_t**)malloc(Size12 * sizeof(uint8_t*));
//ReadFileKeyArray(Key12, Size12Ptr, fptr3_12);
//fclose(fptr3_12);
///************************************************* Read file for Key21**********************************************/
//FILE* fptr3_21;
//errno_t err3_21;
//err3_21 = fopen_s(&fptr3_21, "C:\\Users\\tempacc\\Documents\\NTU\\Test File\\Test2\\Key21.txt", "r");
//if (err3_21 == 0) {
// printf("The file'Key21.txt.txt was opened\n");
//}
//else {
// printf("The file 'Key21.txt.txt' was not opened\n");
//}
//int* Size21Ptr;
//int Size21 = 0;
//Size21Ptr = &Size21;
//fscanf_s(fptr3_21, "%d", Size21Ptr);
//printf("Size21: %d \n", *Size21Ptr);
//uint8_t** Key21 = (uint8_t**)malloc(Size21 * sizeof(uint8_t*));
//ReadFileKeyArray(Key21, Size21Ptr, fptr3_21);
//fclose(fptr3_21);
///************************************************* Read file for Key22**********************************************/
//FILE* fptr3_22;
//errno_t err3_22;
//err3_22 = fopen_s(&fptr3_22, "C:\\Users\\tempacc\\Documents\\NTU\\Test File\\Test2\\Key22.txt", "r");
//if (err3_22 == 0) {
// printf("The file'Key22.txt.txt was opened\n");
//}
//else {
// printf("The file 'Key22.txt.txt' was not opened\n");
//}
//int* Size22Ptr;
//int Size22 = 0;
//Size22Ptr = &Size22;
//fscanf_s(fptr3_22, "%d", Size22Ptr);
//printf("Size22: %d \n", *Size22Ptr);
//uint8_t** Key22 = (uint8_t**)malloc(Size22 * sizeof(uint8_t*));
//ReadFileKeyArray(Key22, Size22Ptr, fptr3_22);
//fclose(fptr3_22);
///************************************************ Applying Phase 2.2 **********************************************************************/
printf("Going in of DFAPhase2_2: \n");
int* FinalSize;
int Final = 0;
FinalSize = &Final;
clock_t begin_3 = clock();
uint8_t **FinalKeyHypothesis = DFA_round8Phase2_2(ciphertext, ciphertextFaulty, PartKey, PartSize,
Key11, Size11Ptr, Key12, Size12Ptr, Key21, Size21Ptr, Key22, Size22Ptr, FinalSize);
clock_t end_3 = clock();
double time_spent_3 = (double)(end_3 - begin_3) / CLOCKS_PER_SEC;
printf("time in sec: %f \n", time_spent_3);// ~4359 sec
//fprintf(fptr_time, "Time spent 3: %f \n", time_spent_3);
printf("Going out of DFAPhase2_2: \n");
///************************************************* Write file for FinalKey **********************************************/
FILE* fptr4;
errno_t err4;
err4 = fopen_s(&fptr4, "C:\\Users\\tempacc\\Documents\\NTU\\Test File\\Test2\\Phase2FinalKey.txt", "w");
if (err4 == 0) {
printf("The file'Phase2FinalKey.txt was opened\n");
}
else {
printf("The file 'Phase2FinalKey.txt' was not opened\n");
}
WriteFileFinalKey(FinalKeyHypothesis, FinalSize, fptr4);
fclose(fptr4);
free(Key11);
free(Key12);
free(Key21);
free(Key22);
/************************************************TESTING **********************************************************************/
//Check if the actual key in Round 10 is inside.
//Key Schedule
uint8_t** W;
W = CreateKeys(MasKeyPtr);
state_t key = {{0x00, 0x00, 0x00, 0x00},{0x00, 0x00, 0x00, 0x00},{0x00, 0x00, 0x00, 0x00},{0x00, 0x00, 0x00, 0x00}};
state_t* keyptr = &key;
RoundKey(W, 10 , keyptr);
//PrintMatrix(ptrMatrix1);
/************************************************TESTING PHASE 1 **********************************************************************/
//uint8_t*** KeyHypothesisSet = (uint8_t***)malloc(4 * sizeof(uint8_t**));
//KeyHypothesisSet[0] = (uint8_t**)malloc(512 * sizeof(uint8_t*)); // For key k_1,k_8,k_11,k_14
//KeyHypothesisSet[1] = (uint8_t**)malloc(512 * sizeof(uint8_t*)); // For key k_2,k_5,k_12,k_15
//KeyHypothesisSet[2] = (uint8_t**)malloc(512 * sizeof(uint8_t*)); // For key k_3,k_6,k_9,k_16
//KeyHypothesisSet[3] = (uint8_t**)malloc(512 * sizeof(uint8_t*)); // For key k_4,k_7,k_10,k_13
///************************************************* Read file for KeyHypothesisSet **********************************************/
//FILE* fptr;
//errno_t err;
//err = fopen_s(&fptr, "C:\\Users\\tempacc\\Documents\\NTU\\Test File\\Phase1KeyHypothesis.txt", "r");
//if (err == 0) {
// printf("The file'Phase1KeyHypothesis.txt was opened\n");
//} else {
// printf("The file 'Phase1KeyHypothesis.txt' was not opened\n");
//}
//ReadFileKeyHypothesis(KeyHypothesisSet, SizeKeySet0, SizeKeySet1, SizeKeySet2, SizeKeySet3, fptr);
//fclose(fptr);
/////************************************************* Check Phase 1 **********************************************/
//int j;
//printf("SizeKeySet0: %d \n", *SizeKeySet0);
//printf("SizeKeySet1: %d \n", *SizeKeySet1);
//printf("SizeKeySet2: %d \n", *SizeKeySet2);
//printf("SizeKeySet3: %d \n", *SizeKeySet3);
//printf("going in to test phase 1 \n");
//for (j = 0; j < *SizeKeySet0; j++) {
// if ((((KeyHypothesisSet[0][j][0] == (*keyptr)[0][0] && KeyHypothesisSet[0][j][1] == (*keyptr)[1][3])
// && KeyHypothesisSet[0][j][2] == (*keyptr)[2][2])
// && KeyHypothesisSet[0][j][3] == (*keyptr)[3][1])) {
// printf("Hurray 1 \n");
// }
//}
//for (j = 0; j < *SizeKeySet1; j++) {
// if ((((KeyHypothesisSet[1][j][0] == (*keyptr)[0][1] && KeyHypothesisSet[1][j][1] == (*keyptr)[1][0])
// && KeyHypothesisSet[1][j][2] == (*keyptr)[2][3])
// && KeyHypothesisSet[1][j][3] == (*keyptr)[3][2])) {
// printf("Hurray 2 \n");
// }
//}
//for (j = 0; j < *SizeKeySet2; j++) {
// if ((((KeyHypothesisSet[2][j][0] == (*keyptr)[0][2] && KeyHypothesisSet[2][j][1] == (*keyptr)[1][1])
// && KeyHypothesisSet[2][j][2] == (*keyptr)[2][0])
// && KeyHypothesisSet[2][j][3] == (*keyptr)[3][3])) {
// printf("Hurray 3 \n");
// }
//}
//for (j = 0; j < *SizeKeySet3; j++) {
// if ((((KeyHypothesisSet[3][j][0] == (*keyptr)[0][3] && KeyHypothesisSet[3][j][1] == (*keyptr)[1][2])
// && KeyHypothesisSet[3][j][2] == (*keyptr)[2][1])
// && KeyHypothesisSet[3][j][3] == (*keyptr)[3][0])) {
// printf("Hurray 4 \n");
// }
//}
/********************************** Misc. Stuff when checking phase1*****************************/
//state_t* y, *y_fault;
//y = ciphertext;
//y_fault = ciphertextFaulty;
//uint8_t f1 = InvSBox((*y)[3][1] ^ (*keyptr)[3][1]) ^ InvSBox((*y_fault)[3][1] ^ (*keyptr)[3][1]); // 14
//uint8_t eq_1 = InvSBox((*y)[2][2] ^ (*keyptr)[2][2]) ^ InvSBox((*y_fault)[2][2] ^ (*keyptr)[2][2]); //11
//uint8_t eq_2 = InvSBox((*y)[0][0] ^ (*keyptr)[0][0]) ^ InvSBox((*y_fault)[0][0] ^ (*keyptr)[0][0]); //1
//uint8_t eq_3 = InvSBox((*y)[1][3] ^ (*keyptr)[1][3]) ^ InvSBox((*y_fault)[1][3] ^ (*keyptr)[1][3]); //8
//uint8_t twof1 = xtimes(f1);
//uint8_t threef1 = xtimes(f1) ^ f1;
//printf("f1: %"SCNd32 " eq_1: %"SCNd32 " eq_2: %"SCNd32 " eq_3: %"SCNd32 "\n", f1, eq_1, eq_2, eq_3);
//printf("twof1: %"SCNd32 " threef1: %"SCNd32 "\n", twof1, threef1);
//uint8_t f2 = InvSBox((*y)[1][0] ^ (*keyptr)[1][0]) ^ InvSBox((*y_fault)[1][0] ^ (*keyptr)[1][0]); //5
//eq_1 = InvSBox((*y)[0][1] ^ (*keyptr)[0][1]) ^ InvSBox((*y_fault)[0][1] ^ (*keyptr)[0][1]); //2
//eq_2 = InvSBox((*y)[2][3] ^ (*keyptr)[2][3]) ^ InvSBox((*y_fault)[2][3] ^ (*keyptr)[2][3]); //12
//eq_3 = InvSBox((*y)[3][2] ^ (*keyptr)[3][2]) ^ InvSBox((*y_fault)[3][2] ^ (*keyptr)[3][2]); //15
//uint8_t twof2 = xtimes(f2);
//uint8_t threef2 = xtimes(f2) ^ f2;
//printf("f2: %"SCNd32 " eq_1: %"SCNd32 " eq_2: %"SCNd32 " eq_3: %"SCNd32 "\n", f2, eq_1, eq_2, eq_3);
//printf("twof2: %"SCNd32 " threef2: %"SCNd32 "\n", twof2, threef2);
//uint8_t f3 = InvSBox((*y)[2][0] ^ (*keyptr)[2][0]) ^ InvSBox((*y_fault)[2][0] ^ (*keyptr)[2][0]); //9
//eq_1 = InvSBox((*y)[3][3] ^ (*keyptr)[3][3]) ^ InvSBox((*y_fault)[3][3] ^ (*keyptr)[3][3]); //16
//eq_2 = InvSBox((*y)[0][2] ^ (*keyptr)[0][2]) ^ InvSBox((*y_fault)[0][2] ^ (*keyptr)[0][2]); //3
//eq_3 = InvSBox((*y)[1][1] ^ (*keyptr)[1][1]) ^ InvSBox((*y_fault)[1][1] ^ (*keyptr)[1][1]); //6
//uint8_t twof3 = xtimes(f3);
//uint8_t threef3 = xtimes(f3) ^ f3;
//printf("f3: %"SCNd32 " eq_1: %"SCNd32 " eq_2: %"SCNd32 " eq_3: %"SCNd32 "\n", f3, eq_1, eq_2, eq_3);
//printf("twof3: %"SCNd32 " threef3: %"SCNd32 "\n", twof3, threef3);
//uint8_t f4 = InvSBox((*y)[0][3] ^ (*keyptr)[0][3]) ^ InvSBox((*y_fault)[0][3] ^ (*keyptr)[0][3]); //4
//eq_1 = InvSBox((*y)[1][2] ^ (*keyptr)[1][2]) ^ InvSBox((*y_fault)[1][2] ^ (*keyptr)[1][2]); //7
//eq_2 = InvSBox((*y)[2][1] ^ (*keyptr)[2][1]) ^ InvSBox((*y_fault)[2][1] ^ (*keyptr)[2][1]); //10
//eq_3 = InvSBox((*y)[3][0] ^ (*keyptr)[3][0]) ^ InvSBox((*y_fault)[3][0] ^ (*keyptr)[3][0]); //13
//uint8_t twof4 = xtimes(f4);
//uint8_t threef4 = xtimes(f4) ^ f4;
//printf("f4: %"SCNd32 " eq_1: %"SCNd32 " eq_2: %"SCNd32 " eq_3: %"SCNd32 "\n", f4, eq_1, eq_2, eq_3);
//printf("twof4: %"SCNd32 " threef4: %"SCNd32 "\n", twof4, threef4);
/************************************************TESTING PHASE 2.1 **********************************************************************/
/*FILE* fptr3;
errno_t err3;
err3 = fopen_s(&fptr3, "C:\\Users\\tempacc\\Documents\\NTU\\Test File\\Test1\\Phase2PartKey.txt", "r");
if (err3 == 0) {
printf("The file'Phase2PartKey.txt' was opened\n");
}
else {
printf("The file 'Phase2PartKey.txt' was not opened\n");
}
int* PartSize;
int Part = 0;
PartSize = &Part;
fscanf_s(fptr3, "%d", PartSize);
printf("Size0: %d \n", *PartSize);
uint8_t** PartKey= (uint8_t**)malloc((*PartSize) * sizeof(uint8_t*));
///************************************************* Read file for PartKey **********************************************/
//ReadFilePartKey(PartKey,PartSize,fptr3);
//fclose(fptr3);
//printf("The file 'Phase2PartKey.txt' closed\n");*/
///************************************************* Check Phase2.1 **********************************************/
//int j;
//for (j = 0; j < *PartSize; j++) {
// //printf("0: %"SCNd32" 1: %"SCNd32" 2: %"SCNd32" 3: %"SCNd32" 4: %"SCNd32" 5: %"SCNd32" 6: %"SCNd32" 7: %"SCNd32" 8: %"SCNd32" 9: %"SCNd32" 10: %"SCNd32" 11: %"SCNd32" 12: %"SCNd32" 13: %"SCNd32"\n",PartKey[j][0], PartKey[j][1], PartKey[j][2], PartKey[j][3], PartKey[j][4], PartKey[j][5], PartKey[j][6], PartKey[j][7], PartKey[j][8], PartKey[j][9], PartKey[j][10], PartKey[j][11], PartKey[j][12], PartKey[j][13]);
// //printf("Part %d 0: %"SCNd32 "\n",j, PartKey[j][0]);
// if (PartKey[j][0] == (*keyptr)[0][2] && PartKey[j][1] == (*keyptr)[0][3] &&
// PartKey[j][2] == (*keyptr)[1][0] && PartKey[j][3] == (*keyptr)[1][1] &&
// PartKey[j][4] == (*keyptr)[1][2] && PartKey[j][5] == (*keyptr)[1][3] &&
// PartKey[j][6] == (*keyptr)[2][0] && PartKey[j][7] == (*keyptr)[2][1] &&
// PartKey[j][8] == (*keyptr)[2][2] && PartKey[j][9] == (*keyptr)[2][3] &&
// PartKey[j][10] == (*keyptr)[3][0] && PartKey[j][11] == (*keyptr)[3][1] &&
// PartKey[j][12] == (*keyptr)[3][2] && PartKey[j][13] == (*keyptr)[3][3]) {
// printf("Hurray for PartKey\n");
// }
//}
//printf("out \n");
//Check for duplicates
//for (j = 0; j < *PartSize; j++) {
// for (i = 0; i < j; i++) {
// if (PartKey[j][0] == PartKey[i][0] && PartKey[j][1] == PartKey[i][1] && PartKey[j][2] == PartKey[i][2] && PartKey[j][3] == PartKey[i][3] &&
// PartKey[j][4] == PartKey[i][4] && PartKey[j][5] == PartKey[i][5] && PartKey[j][6] == PartKey[i][6] && PartKey[j][7] == PartKey[i][7] &&
// PartKey[j][8] == PartKey[i][8] && PartKey[j][9] == PartKey[i][9] && PartKey[j][10] == PartKey[i][10] && PartKey[j][11] == PartKey[i][11] &&
// PartKey[j][12] == PartKey[i][12] && PartKey[j][13] == PartKey[i][13]) {
// printf("duplicates\n");
// }
// }
//}
//printf("out2\n");
/********************************** Misc. Stuff when checking phase2.1*****************************/
//state_t* y, *y_fault;
//y = ciphertext;
//y_fault = ciphertextFaulty;
//uint8_t f = InvSBox( times9( InvSBox((*y)[3][0] ^ (*keyptr)[3][0]) ^ (*keyptr)[3][0] ^ (*keyptr)[2][0]) ^
// times14( InvSBox( (*y)[2][1] ^ (*keyptr)[2][1]) ^ ((*keyptr)[2][1] ^ (*keyptr)[3][1])) ^
// times11( InvSBox((*y)[1][2] ^ (*keyptr)[1][2]) ^ ((*keyptr)[3][2] ^ (*keyptr)[2][2])) ^
// times13( InvSBox((*y)[0][3] ^ (*keyptr)[0][3]) ^ ((*keyptr)[3][3] ^ (*keyptr)[2][3]) )) ^
// InvSBox ( times9( InvSBox((*y_fault)[3][0] ^ (*keyptr)[3][0]) ^ (*keyptr)[3][0] ^ (*keyptr)[2][0]) ^
// times14( InvSBox( (*y_fault)[2][1] ^ (*keyptr)[2][1]) ^ ((*keyptr)[2][1] ^ (*keyptr)[3][1])) ^
// times11( InvSBox( (*y_fault)[1][2] ^ (*keyptr)[1][2]) ^ ((*keyptr)[3][2] ^ (*keyptr)[2][2])) ^
// times13( InvSBox( (*y_fault)[0][3] ^ (*keyptr)[0][3]) ^ ((*keyptr)[3][3] ^ (*keyptr)[2][3])) );
//uint8_t q = InvSBox(times13(InvSBox((*y)[2][0] ^ (*keyptr)[2][0]) ^ (*keyptr)[2][0] ^ (*keyptr)[1][0]) ^
// times9(InvSBox((*y)[1][1] ^ (*keyptr)[1][1]) ^ ((*keyptr)[2][1] ^ (*keyptr)[1][1])) ^
// times14(InvSBox((*y)[0][2] ^ (*keyptr)[0][2]) ^ ((*keyptr)[2][2] ^ (*keyptr)[1][2])) ^
// times11(InvSBox((*y)[3][3] ^ (*keyptr)[3][3]) ^ ((*keyptr)[2][3] ^ (*keyptr)[1][3]))) ^
// InvSBox(times13(InvSBox((*y_fault)[2][0] ^ (*keyptr)[2][0]) ^ (*keyptr)[2][0] ^ (*keyptr)[1][0]) ^
// times9(InvSBox((*y_fault)[1][1] ^ (*keyptr)[1][1]) ^ ((*keyptr)[2][1] ^ (*keyptr)[1][1])) ^
// times14(InvSBox((*y_fault)[0][2] ^ (*keyptr)[0][2]) ^ ((*keyptr)[2][2] ^ (*keyptr)[1][2])) ^
// times11(InvSBox((*y_fault)[3][3] ^ (*keyptr)[3][3]) ^ ((*keyptr)[2][3] ^ (*keyptr)[1][3])));
//printf("f: %"SCNd32 " q: %"SCNd32 "\n", f,q);
/************************************************TESTING PHASE 2.2 **********************************************************************/
/*FILE* fptr5;
errno_t err5;
err5 = fopen_s(&fptr5, "C:\\Users\\tempacc\\Documents\\NTU\\Test File\\Test1\\Phase2FinalKey.txt", "r");
if (err5 == 0) {
printf("The file'Phase2FinalKey.txt was opened\n");
}
else {
printf("The file 'Phase2FinalKey.txt' was not opened\n");
}
int* FinalSize;
int Final = 0;
FinalSize = &Final;
fscanf_s(fptr5, "%d", FinalSize);
printf("FinalSize: %d \n", *FinalSize);
uint8_t** FinalKeyHypothesis = (uint8_t**)malloc((*FinalSize) * sizeof(uint8_t*));
ReadFileFinalKey(FinalKeyHypothesis, FinalSize, fptr5);
fclose(fptr5);
printf("going in to test phase 2 \n");
printf("FinalSize outside: %d \n", *FinalSize);
int i,j;
for(j = 0; j < *FinalSize; j++) {
if ((FinalKeyHypothesis[j][0] == (*keyptr)[0][0] && FinalKeyHypothesis[j][1] == (*keyptr)[0][1]) &&
FinalKeyHypothesis[j][2] == (*keyptr)[0][2] && FinalKeyHypothesis[j][3] == (*keyptr)[0][3] &&
FinalKeyHypothesis[j][4] == (*keyptr)[1][0] && FinalKeyHypothesis[j][5] == (*keyptr)[1][1] &&
FinalKeyHypothesis[j][6] == (*keyptr)[1][2] && FinalKeyHypothesis[j][7] == (*keyptr)[1][3] &&
FinalKeyHypothesis[j][8] == (*keyptr)[2][0] && FinalKeyHypothesis[j][9] == (*keyptr)[2][1] &&
FinalKeyHypothesis[j][10] == (*keyptr)[2][2] && FinalKeyHypothesis[j][11] == (*keyptr)[2][3] &&
FinalKeyHypothesis[j][12] == (*keyptr)[3][0] && FinalKeyHypothesis[j][13] == (*keyptr)[3][1] &&
FinalKeyHypothesis[j][14] == (*keyptr)[3][2] && FinalKeyHypothesis[j][15] == (*keyptr)[3][3] ) {
printf("hugehurray! \n");
}
}
printf("out \n");*/
////Check for duplicates
//for (j = 0; j < *FinalSize; j++) {
// for (i = 0; i < j; i++) {
// if (FinalKeyHypothesis[j][0] == FinalKeyHypothesis[i][0] && FinalKeyHypothesis[j][1] == FinalKeyHypothesis[i][1] && FinalKeyHypothesis[j][2] == FinalKeyHypothesis[i][2] && FinalKeyHypothesis[j][3] == FinalKeyHypothesis[i][3] &&
// FinalKeyHypothesis[j][4] == FinalKeyHypothesis[i][4] && FinalKeyHypothesis[j][5] == FinalKeyHypothesis[i][5] && FinalKeyHypothesis[j][6] == FinalKeyHypothesis[i][6] && FinalKeyHypothesis[j][7] == FinalKeyHypothesis[i][7] &&
// FinalKeyHypothesis[j][8] == FinalKeyHypothesis[i][8] && FinalKeyHypothesis[j][9] == FinalKeyHypothesis[i][9] && FinalKeyHypothesis[j][10] == FinalKeyHypothesis[i][10] && FinalKeyHypothesis[j][11] == FinalKeyHypothesis[i][11] &&
// FinalKeyHypothesis[j][12] == FinalKeyHypothesis[i][12] && FinalKeyHypothesis[j][13] == FinalKeyHypothesis[i][13] && FinalKeyHypothesis[j][14] == FinalKeyHypothesis[i][14] && FinalKeyHypothesis[j][15] == FinalKeyHypothesis[i][15]) {
// printf("duplicates\n");
// }
// }
//}
//printf("out2\n");
//
/********************************** Misc. Stuff when checking phase2.2*****************************/
//state_t* y, *y_fault;
//y = ciphertext;
//y_fault = ciphertextFaulty;
////equation 1
//uint8_t q_1 = InvSBox(
// times14( InvSBox((*y)[0][0] ^ (*keyptr)[0][0]) ^ ((*keyptr)[0][0] ^ SBox((*keyptr)[3][1] ^ (*keyptr)[2][1]) ^ Rcon(10) ) ) ^
// times11( InvSBox((*y)[3][1] ^ (*keyptr)[3][1] ) ^ (*keyptr)[0][1] ^ SBox((*keyptr)[3][2] ^ (*keyptr)[2][2]) ) ^
// times13( InvSBox( (*y)[2][2] ^ (*keyptr)[2][2] ) ^ (*keyptr)[0][2] ^ SBox( (*keyptr)[3][3] ^ (*keyptr)[2][3] ) ) ^
// times9(InvSBox( (*y)[1][3] ^ (*keyptr)[1][3] )^ (*keyptr)[0][3] ^ SBox((*keyptr)[3][0] ^ (*keyptr)[2][0]) ) ) ^
// InvSBox(times14(InvSBox( (*y_fault)[0][0] ^ (*keyptr)[0][0] ) ^((*keyptr)[0][0] ^ SBox( (*keyptr)[3][1] ^ (*keyptr)[2][1]) ^ Rcon(10))) ^
// times11(InvSBox( (*y_fault)[3][1] ^ (*keyptr)[3][1]) ^ (*keyptr)[0][1] ^ SBox( (*keyptr)[3][2] ^ (*keyptr)[2][2])) ^
// times13(InvSBox( (*y_fault)[2][2] ^ (*keyptr)[2][2]) ^ (*keyptr)[0][2] ^ SBox( (*keyptr)[3][3] ^ (*keyptr)[2][3])) ^
// times9(InvSBox( (*y_fault)[1][3] ^ (*keyptr)[1][3]) ^ (*keyptr)[0][3] ^ SBox((*keyptr)[3][0] ^ (*keyptr)[2][0])) );
////equation 4
//uint8_t q_2 = InvSBox(times11(InvSBox((*y)[1][0] ^ (*keyptr)[1][0]) ^ ((*keyptr)[1][0] ^ (*keyptr)[0][0])) ^
// times13(InvSBox((*y)[0][1] ^ (*keyptr)[0][1]) ^ ((*keyptr)[1][1] ^ (*keyptr)[0][1])) ^
// times9(InvSBox((*y)[3][2] ^ (*keyptr)[3][2]) ^ ((*keyptr)[1][2]^ (*keyptr)[0][2])) ^
// times14(InvSBox((*y)[2][3] ^ (*keyptr)[2][3]) ^ ((*keyptr)[1][3] ^ (*keyptr)[0][3]))) ^
// InvSBox(times11(InvSBox((*y_fault)[1][0] ^ (*keyptr)[1][0]) ^ ((*keyptr)[1][0] ^ (*keyptr)[0][0])) ^
// times13(InvSBox((*y_fault)[0][1] ^ (*keyptr)[0][1]) ^ ((*keyptr)[1][1] ^ (*keyptr)[0][1])) ^
// times9(InvSBox((*y_fault)[3][2] ^ (*keyptr)[3][2]) ^ ((*keyptr)[1][2] ^ (*keyptr)[0][2])) ^
// times14(InvSBox((*y_fault)[2][3] ^ (*keyptr)[2][3]) ^ ((*keyptr)[1][3] ^ (*keyptr)[0][3])));
//uint8_t f = InvSBox( times9( InvSBox((*y)[3][0] ^ (*keyptr)[3][0]) ^ (*keyptr)[3][0] ^ (*keyptr)[2][0]) ^
// times14( InvSBox( (*y)[2][1] ^ (*keyptr)[2][1]) ^ ((*keyptr)[2][1] ^ (*keyptr)[3][1])) ^
// times11( InvSBox((*y)[1][2] ^ (*keyptr)[1][2]) ^ ((*keyptr)[3][2] ^ (*keyptr)[2][2])) ^
// times13( InvSBox((*y)[0][3] ^ (*keyptr)[0][3]) ^ ((*keyptr)[3][3] ^ (*keyptr)[2][3]) )) ^
// InvSBox ( times9( InvSBox((*y_fault)[3][0] ^ (*keyptr)[3][0]) ^ (*keyptr)[3][0] ^ (*keyptr)[2][0]) ^
// times14( InvSBox( (*y_fault)[2][1] ^ (*keyptr)[2][1]) ^ ((*keyptr)[2][1] ^ (*keyptr)[3][1])) ^
// times11( InvSBox( (*y_fault)[1][2] ^ (*keyptr)[1][2]) ^ ((*keyptr)[3][2] ^ (*keyptr)[2][2])) ^
// times13( InvSBox( (*y_fault)[0][3] ^ (*keyptr)[0][3]) ^ ((*keyptr)[3][3] ^ (*keyptr)[2][3])) );
//uint8_t q = InvSBox(times13(InvSBox((*y)[2][0] ^ (*keyptr)[2][0]) ^ (*keyptr)[2][0] ^ (*keyptr)[1][0]) ^
// times9(InvSBox((*y)[1][1] ^ (*keyptr)[1][1]) ^ ((*keyptr)[2][1] ^ (*keyptr)[1][1])) ^
// times14(InvSBox((*y)[0][2] ^ (*keyptr)[0][2]) ^ ((*keyptr)[2][2] ^ (*keyptr)[1][2])) ^
// times11(InvSBox((*y)[3][3] ^ (*keyptr)[3][3]) ^ ((*keyptr)[2][3] ^ (*keyptr)[1][3]))) ^
// InvSBox(times13(InvSBox((*y_fault)[2][0] ^ (*keyptr)[2][0]) ^ (*keyptr)[2][0] ^ (*keyptr)[1][0]) ^
// times9(InvSBox((*y_fault)[1][1] ^ (*keyptr)[1][1]) ^ ((*keyptr)[2][1] ^ (*keyptr)[1][1])) ^
// times14(InvSBox((*y_fault)[0][2] ^ (*keyptr)[0][2]) ^ ((*keyptr)[2][2] ^ (*keyptr)[1][2])) ^
// times11(InvSBox((*y_fault)[3][3] ^ (*keyptr)[3][3]) ^ ((*keyptr)[2][3] ^ (*keyptr)[1][3])));
//printf("f: %"SCNd32 " q: %"SCNd32 "\n", f, q);
//printf("2f: %"SCNd32 " 3f: %"SCNd32 "\n", xtimes(q), xtimes(q)^q);
//printf("q_1: %"SCNd32 " q_2: %"SCNd32 "\n", q_1, q_2);
//printf("2q_2: %"SCNd32 " 3q_1: %"SCNd32 "\n", xtimes(q_2), xtimes(q_1) ^ q_1);
fclose(fptr_time);
return 0;
}
|
django_fmt_plug.c | /* Django 1.4 patch for JtR. Hacked together during May of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* Input Format => user:$django$*type*django-hash
*
* Where,
*
* type => 1, for Django 1.4 pbkdf_sha256 hashes and
*
* django-hash => Second column of "SELECT username, password FROM auth_user"
*
* July, 2012, the oSSL PKCS5_PBKDF2_HMAC function was replaced with a much faster
* function pbkdf2() designed by JimF. Originally this function was designed for
* the mscash2 (DCC2). The same pbkdf2 function, is used, and simply required small
* changes to use SHA256.
*
* This new code is 3x to 4x FASTER than the original oSSL code. Even though it is
* only useing oSSL functions. A lot of the high level stuff in oSSL sux for speed.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_django;
#elif FMT_REGISTERS_H
john_register_one(&fmt_django);
#else
// uncomment this header to use the slower PKCS5_PBKDF2_HMAC function.
// Note, PKCS5_PBKDF2_HMAC is ONLY available in oSSL 1.00 + (1.0c I think to be exact)
//#include <openssl/evp.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "sha2.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "johnswap.h"
#include "base64_convert.h"
#include "pbkdf2_hmac_sha256.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 4 // tuned on core i7
#endif
static int omp_t = 1;
#endif
#include "memdbg.h"
#define FORMAT_LABEL "Django"
#define FORMAT_NAME ""
#define FORMAT_TAG "$django$*"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA256 " SHA256_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA256 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT " (x10000)"
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define HASH_LENGTH 44
#define BINARY_SIZE 32
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_ALIGN sizeof(int)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests django_tests[] = {
{"$django$*1*pbkdf2_sha256$10000$qPmFbibfAY06$x/geVEkdZSlJMqvIYJ7G6i5l/6KJ0UpvLUU6cfj83VM=", "openwall"},
{"$django$*1*pbkdf2_sha256$10000$BVmpZMBhRSd7$2nTDwPhSsDKOwpKiV04teVtf+a14Rs7na/lIB3KnHkM=", "123"},
{"$django$*1*pbkdf2_sha256$10000$BVmpZMBhRSd1$bkdQo9RoatRomupPFP+XEo+Guuirq4mi+R1cFcV0U3M=", "openwall"},
{"$django$*1*pbkdf2_sha256$10000$BVmpZMBhRSd6$Uq33DAHOFHUED+32IIqCqm+ITU1mhsGOJ7YwFf6h+6k=", "password"},
{"$django$*1*pbkdf2_sha256$10000$34L3roCQ6ZfN$R21tJK1sIDfmj9BfBocefFfuGVwE3pXcLEhChNjc+pU=", "0123456789012345678901234567890123456789012345678901234567890123"},
{"$django$*1*pbkdf2_sha256$10000$7qPqyUDw8kZV$pFmVRjlHvayoWEy8ZWXkHgfmgImUKLmkmruclpYVAxM=", "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static struct custom_salt {
int type;
int iterations;
unsigned char salt[32];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_align(sizeof(*saved_key),
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_align(sizeof(*crypt_out), self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;;
ctcopy += FORMAT_TAG_LEN;
if ((p = strtokm(ctcopy, "*")) == NULL) /* type */
goto err;
/* type must be 1 */
if (!isdec(p))
goto err;
if (atoi(p) != 1)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* algorithm */
goto err;
if (strcmp(p, "pbkdf2_sha256") != 0)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* iterations */
goto err;
if (!isdec(p)) // FIXME: what about iterations == 0?
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* salt */
goto err;
if (strlen(p) > sizeof(cur_salt->salt)-1)
goto err;
if ((p = strtokm(NULL, "")) == NULL) /* hash */
goto err;
if (strlen(p)-1 != base64_valid_length(p,e_b64_mime,flg_Base64_MIME_TRAIL_EQ, 0) || strlen(p)-1 > HASH_LENGTH-1) {
goto err;
}
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char Buf[120], *ctcopy=Buf;
char *p, *t;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
strncpy(Buf, ciphertext, 119);
Buf[119] = 0;
ctcopy += FORMAT_TAG_LEN; /* skip over "$django$*" */
p = strtokm(ctcopy, "*");
cs.type = atoi(p);
strtokm(NULL, "$");
t = strtokm(NULL, "$");
cs.iterations = atoi(t);
t = strtokm(NULL, "$");
strcpy((char*)cs.salt, t);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{ static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
p = strrchr(ciphertext, '$') + 1;
base64_convert(p, e_b64_mime, strlen(p), (char*)out, e_b64_raw, sizeof(buf.c), flg_Base64_DONOT_NULL_TERMINATE, 0);
return out;
}
#define COMMON_GET_HASH_VAR crypt_out
#include "common-get-hash.h"
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
#ifdef SIMD_COEF_32
int lens[MAX_KEYS_PER_CRYPT], i;
unsigned char *pin[MAX_KEYS_PER_CRYPT];
union {
uint32_t *pout[MAX_KEYS_PER_CRYPT];
unsigned char *poutc;
} x;
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = strlen(saved_key[i+index]);
pin[i] = (unsigned char*)saved_key[i+index];
x.pout[i] = crypt_out[i+index];
}
pbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, &(x.poutc), 32, 0);
#else
pbkdf2_sha256((unsigned char *)saved_key[index], strlen(saved_key[index]),
cur_salt->salt, strlen((char*)cur_salt->salt),
cur_salt->iterations, (unsigned char*)crypt_out[index], 32, 0);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void django_set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int)my_salt->iterations;
}
struct fmt_main fmt_django = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
{ FORMAT_TAG },
django_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
django_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
distribute_parallel_for_simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized
void xxx(int argc) {
int x; // expected-note {{initialize the variable 'x' to silence this warning}}
#pragma omp distribute parallel for simd
for (int i = 0; i < 10; ++i)
argc = x; // expected-warning {{variable 'x' is uninitialized when used here}}
}
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute parallel for simd'}}
#pragma omp distribute parallel for simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute parallel for simd'}}
#pragma omp distribute parallel for simd foo
void test_no_clause() {
int i;
#pragma omp distribute parallel for simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp distribute parallel for simd' must be a for loop}}
#pragma omp distribute parallel for simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
#pragma omp distribute parallel for simd foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
#pragma omp distribute parallel for simd;
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
#pragma omp distribute parallel for simd firstprivate(x);
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
#pragma omp distribute parallel for simd private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
#pragma omp distribute parallel for simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_safelen() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd safelen
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd safelen()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd safelen 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd safelen(4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd safelen(4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4, 8)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp distribute parallel for simd safelen(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp distribute parallel for simd safelen(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd safelen(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd safelen(0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd safelen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_simdlen() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd simdlen
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd simdlen()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd simdlen 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd simdlen(4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd simdlen(4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4, 8)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp distribute parallel for simd simdlen(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp distribute parallel for simd simdlen(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd simdlen(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd simdlen(0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd simdlen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_safelen_simdlen() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp distribute parallel for simd simdlen(6) safelen(5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp distribute parallel for simd safelen(5) simdlen(6)
for (i = 0; i < 16; ++i)
;
}
void test_collapse() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd collapse
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp distribute parallel for simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp distribute parallel for simd collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd collapse(2)
for (i = 0; i < 16; ++i)
for (int j = 0; j < 16; ++j)
// expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}}
#pragma omp distribute parallel for simd reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_linear() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd linear(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd linear(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd linear(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd linear()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd linear(int)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd linear(0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp distribute parallel for simd linear(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp distribute parallel for simd linear(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp distribute parallel for simd linear(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_aligned() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd aligned(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd aligned(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd aligned(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd aligned()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd aligned(int)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd aligned(0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp distribute parallel for simd aligned(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp distribute parallel for simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp distribute parallel for simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
int *x, y, z[25]; // expected-note 4 {{'y' defined here}}
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd aligned(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd aligned(z)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd aligned(x :)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd aligned(x :, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd aligned(x : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd aligned(x : 2 * 2)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd aligned(x : 1, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd aligned(x : 1, y, z : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp distribute parallel for simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp distribute parallel for simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-note@+2 {{defined as aligned}}
// expected-error@+1 {{a variable cannot appear in more than one aligned clause}}
#pragma omp distribute parallel for simd aligned(x) aligned(z, x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-note@+3 {{defined as aligned}}
// expected-error@+2 {{a variable cannot appear in more than one aligned clause}}
// expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp distribute parallel for simd aligned(x, y, z) aligned(y, z)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd private(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd private(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd private()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
// expected-error@+3 {{lastprivate variable cannot be firstprivate}} expected-note@+3 {{defined as lastprivate}}
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 2 {{lastprivate variable cannot be firstprivate}} expected-note@+3 2 {{defined as lastprivate}}
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 3 {{lastprivate variable cannot be firstprivate}} expected-note@+3 3 {{defined as lastprivate}}
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
#pragma omp target
#pragma omp teams
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp distribute parallel for simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp target
#pragma omp teams
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp distribute parallel for simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
void test_nontemporal() {
int i;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd nontemporal(
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd nontemporal(,
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd nontemporal(, )
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd nontemporal()
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd nontemporal(int)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} omp50-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd nontemporal(0)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp distribute parallel for simd nontemporal(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp distribute parallel for simd nontemporal(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp distribute parallel for simd nontemporal(x, y, z)
for (i = 0; i < 16; ++i)
;
int x, y;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd nontemporal(x :)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}}
#pragma omp distribute parallel for simd nontemporal(x :, )
for (i = 0; i < 16; ++i)
;
// omp50-note@+2 {{defined as nontemporal}}
// omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}}
#pragma omp distribute parallel for simd nontemporal(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}}
#pragma omp distribute parallel for simd private(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}}
#pragma omp distribute parallel for simd nontemporal(x) private(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}}
#pragma omp distribute parallel for simd nontemporal(x, y : 0)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}}
#pragma omp distribute parallel for simd nontemporal(x) lastprivate(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute parallel for simd'}}
#pragma omp distribute parallel for simd lastprivate(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
}
|
opencl_sxc_fmt_plug.c | /*
* Modified by Dhiru Kholia <dhiru at openwall.com> for Keychain format.
*
* This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted. */
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_sxc;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_sxc);
#else
#include <string.h>
#include <stdint.h>
#include <openssl/blowfish.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "sha.h"
#include "aes.h"
#include "formats.h"
#include "common.h"
#include "misc.h"
#include "options.h"
#include "common.h"
#include "formats.h"
#include "common-opencl.h"
#define FORMAT_LABEL "sxc-opencl"
#define FORMAT_NAME "StarOffice .sxc"
#define FORMAT_TAG "$sxc$*"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL Blowfish"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define BINARY_SIZE 20
#define PLAINTEXT_LENGTH 64
#define SALT_SIZE sizeof(sxc_cpu_salt)
#define BINARY_ALIGN MEM_ALIGN_WORD
#define SALT_ALIGN 4
typedef struct {
uint32_t length;
uint8_t v[20]; // hash of password
} sxc_password;
typedef struct {
uint32_t v[16/4];
} sxc_hash;
typedef struct {
uint32_t iterations;
uint32_t outlen;
uint32_t skip_bytes;
uint8_t length;
uint8_t salt[64];
} sxc_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[32 / sizeof(uint32_t)];
typedef struct {
int cipher_type;
int checksum_type;
int iterations;
int key_size;
int iv_length;
int salt_length;
int original_length;
int length;
unsigned char iv[16];
unsigned char salt[32];
unsigned char content[1024];
} sxc_cpu_salt;
static sxc_cpu_salt *cur_salt;
static struct fmt_tests sxc_tests[] = {
{"$sxc$*0*0*1024*16*4448359828281a1e6842c31453473abfeae584fb*8*dc0248bea0c7508c*16*1d53770002fe9d8016064e5ef9423174*860*864*f00399ab17b9899cd517758ecf918d4da78099ccd3557aef5e22e137fd5b81f732fc7c167c4de0cf263b4f82b50e3d6abc65da613a36b0025d89e1a09adeb4106da28040d1019bb4b36630fc8bc94fe5b515504bf8a92ea630bb95ace074868e7c10743ec970c89895f44b975a30b6ca032354f3e73ec86b2cc7a4f7a185884026d971b37b1e0e650376a2552e27ba955c700f8903a82a6df11f6cc2ecf63290f02ffdd278f890d1db75f9e8bd0f437c4ec613d3c6dcb421bbd1067be633593ba9bd58f77ef08e0cca64c732f892567d20de8d4c444fa9c1c1adc5e4657ef9740cb69ce55c8f9e6b1cfed0739ef002f1e1c1e54a5df50a759d92354f78eb90a9d9378f36df7d1edd8002ea0d637604fcd2408494c2d42b1771e2a2a20b55044836f76db4ed71e8a53f55a04f9437946603e7246c2d2d70caf6be0de82e8977fab4de84ca3783baedac041195d8b51166b502ff80c17db78f63d3632df1d5ef5b14d8d5553fc40b072030f9e3374c93e929a490c6cfb170f04433fc46f43b9c7d27f3f8c4ed759d4a20c2e53a0701b7c3d9201390a9b5597ce8ba35bd765b662e2242b9821bbb63b6be502d2150fff37e4b7f2a6b592fd0e319a7349df320e7fe7da600a2a05628dc00e04d480c085417f676bd0518bc39d9a9be34fc0cb192d5fa5e0c657cdf7c1ad265a2e81b90ac8b28d326f98b8f33c123df83edc964d2c17a904d0df8bd9ecbf629929d6e48cadc97f49a8941ada3d219e8c0f04f37cecc9a50cc5307fd2a488c34829b05cd1615ae0d1ef0ce450529aa755f9ae38332187ffe4144990de3265afaacb9f0f0fb9c67f6210369f7a0cc5bb346412db08e0f4732f91aa8d4b32fe6eece4fba118f118f6df2fb6c53fa9bc164c9ab7a9d414d33281eb0c3cd02abe0a4dd1c170e41c1c960a8f12a48a7b5e1f748c08e1b150a4e389c110ea3368bc6c6ef2bee98dc92c6825cbf6aee20e690e116c0e6cf48d49b38035f6a9b0cd6053b9f5b9f8360024c9c608cbba3fe5e7966b656fa08dec3e3ce3178a0c0007b7d177c7c44e6a68f4c7325cb98264b1e0f391c75a6a8fd3691581fb68ef459458830f2138d0fd743631efd92b742dfeb62c5ea8502515eb65af414bf805992f9272a7b1b745970fd54e128751f8f6c0a4d5bc7872bc09c04037e1e91dc7192d68f780cdb0f7ef6b282ea883be462ffeffb7b396e30303030", "openwall"},
{"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*259cafe530bd09f8*16*8f53ea878d0795cfe05dcc65fb272c20*1024*1024*ffb0f736b69d8433f958e8f475f609948ad7c9dd052f2b92c14cb1b395ffcac043a3def76d58442e131701b3b53d56ea570633bb20c83068542420160f5db3cee5eece05b67b54d0d4cdf3fbfd928d94852924e391aa3f70cad598b48b946399b0cd1e9e7e7d081a888933f8a1973e83166799396e8290771463c623620b51fb5310b9b0e0de3e5597b66a091301ada3ba6a5d7515d1fcf0eff65e543b50f8fe2222619600054eaf69c7aa680c96bc403f115cab32d6d8e8bc0db3904a71ce3eb1283ca73fd6f75845d9b7d0275e4727a0f56bfbf962a9079f51849de2c9dee7f1dadbbae944f442169281004773309b0f8d68f2cf83076fd8b19afbccf5ab7dc58fb9554fee82e2c491d6434a4cef6f3209775343c840bc4bdfab6705000e67412ac74d00f5b6aba1fd21bca5213234a5a1100a9d93daa141a4936723ea77d008a35c9078167a3529706432b36b9ec012c060d093535c85ca6feb75165d620d7d663c3e76b9bf3af614556ed8560b446a8a73649cb935383a30b4fd8fd75522203e4575cf4bc2b7f01a9294310fe021c71acbf68f6f1e95f48c30c14151c51d4fb878a16272ee73753bc750cbd48007c842412ca1dcb6214215b082c00d619a5318e2ebe9149410f501170093784afc2bd71dd9f5a87b349b96661747b1627e8cba8a5c98559fb146fa7e30db4c6f648ce3c2209f84551a7a1cd46d9172ae1354b6d093f89f6f5f58d29c1d7af8830df62e67753caa8166322caa0f8adf4b61d2013d35baa7c002e1d4c83b1cba8aaa57cf4946627fa63ba7a6a5a5c803e8d5a4794845ab670ef950b918a360cd9f12e8f3424ecab1f505cb494ad35f28d12ff183471d0f47bd67e6abd3b8c8e206d11149474a19b5c13d165d8f6dc39cf579fe1000295328aeeb82e0ae8020d2f61e4c3d6e68c25a655ab72aad5e9e74af4cf27c74158fdb1a29a3d76cd658976fa0a30743247408df00a23b593f68861348a6c46af05d21a4b81fedbf5715462ec8ffc5f001a85c43058ac1fab488236588ef0bf08dd8dd7c7fce630a0a996395b503647d9a2f0dd63dd2f939eca8e1849ee4ed41a6d5672d947177e8f890692de879a20dd9e366ec494d270faf0d24fc076172a25998aac218586404687e7c77b55e77e0eff9b1c65c3f8da99deaa86411ab6aca2531d84b364349591bc73e7504163afd23c5208e321883ee611ea7e4e5885086e4fa7196e16b948cb54808b64b94106c74900e3190fd5f6068b490fd0c9c64481771527a0e2d00899fd5b7a9e7f508cc6770018fadf09d965d7a12ad3624d2161d9546d4a7937b5f961d7f7c4714786380c147e1ec6b0583503bd5a139b892831d1ea925993bb86f12e75d9010ceba230a1c286fa3d1d654a1672313cbf0763c05c622cee452f76957c42ba0e853ecda163d15e8600a702ccdc9e8f88a", "Ghe+t0Blaster"},
{"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*9bb755c8a4fe8c34*16*112b9d41098c8677615755361da473a6*1024*1024*b95f0f2e0e1c7b4ee61168b646804d4b70b615f3c978cec65c9a7ab515417c79625d104373fd5012c3da6b356f8408a3a75edcc8b2aad0aa38bb33edd8933bdadbffde35a350ade73ccb9df29c2996082f5e94e324496835f8dfebe15ca38950e0f435d711ef964aa09915d58287967b5e321ca195a7f90253157afe82329da9a496c97292419b9a94cdb92f919e6d54700466aff61c200c5a355905b5a37c12d77b0e4ffd23f0204cfa664f4c0545f233db8d35af5fe337b459135da398fd23101becb194db305496474ba4179a7355285a9ec935044e1831f290f5f87ed3e00925e7fb4fc6bc38d9f0cfe9abf72560400490d2fd398d2d49516b618f99168602f323dd1786bcca394830341dfbeb377f9b7ef161dc1470f5e92b6152fa7a4f428e8ae40100791491a9e1c9385298522320488f00535866ac6e08354a75b8b2fd293066da7eb6b4ad7f3e13c8dc98cd815b2393f147fdac6279f76fdac9abd0a94131fa84fe4e99634a362a56d60ce588f6e0b66d6f8b6d411511272ffe32181d20e7d2c3d4b680764607afb2c29dcb94a845b920e96f6c27575534f8b7f9ddd93bdcef0d717d0a899fa937e7d2eeeb6d5b0338757f6e69dac72524d4b6f74edce1f937008eb3653bcc31a88712af940cf47ec3f3efd83e4da89d1a6cb7da6cf8d7d41430bc81a4b5d7bb46cad687f2f505e3379143ae274eed6201c3b17c1e05e516a14cbf2351ccf9fdd46e1309afb170bd01eb8f6a1d8e12441525199455fb550e3fc689b1801332b2d985e336b158f846fcbca18fbe6ea21438cf1fb5fdbce8d6350e65d6468342880845675ec721af2fb9df917a3968b4a1a477fc4c74ee38a71a230d77c2a7cf66ae6b83804488cbd25213ebc470cd845a2691b16161a640ebb385aa2381dc91f692f6c4ca2709b5a7e94dfb4548000a29b56f1da08701945d6209fabbd1621b28849fc27810775f1a0e0204d3ae9040a8cfb1386499a39d87149cfc1579de7d059662ad25a67abd42b30bb3608f09142ca030351c3a1e921e4c7bbc11aab846ef42eb5d1418c15ada77539aca096e0678439cd1b60950d2aa0cc4d2004b1ac48dc6a454c5a8e9ea7e910047c7c83895fd614fd9dfd961631eb23757646143c2aeb03c1a6476e78fc4ccf0f02cc1f88ec1b0080a170ac6871dc183939f7a4376965b0dfa7922012582eec4846ee621edc5547a2b9c4893e7f67f76541a4bd4a91827a57b3db5cdea29a2a3cc20238d89c8145c14b037360ad27f54f87317ef70472d6b1fd9f1168bcf8aba6071257b3adebab8d4e115188ed4af3fc3574fdccb4bc7eeb00a6a442f1b96a989b735f5e6059ec72c1677b77f437dcb93066f8591a11071799c3a0ec3b48f6160976aff1928c375358837e1ef02e20397b2e9d8d9c4bff23172c9b4c0b941cb1b49b5bc070f72a14cd384", "M1racl33"},
{"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*ceb1edb1e3cb72fd*16*f7104c9b2789540f5fd4beef009c0139*1024*1024*709130b940a9663d0a5687133c6f78535d05a72936faed8c2f3c1b4e29423baaabcee4f0d7d57e3ad8d8c090486f974c4d0ce4be5b29ef8e1b02c01b4af1959ed0b277146a45aec35a48997b584b82697803193644eefd88a7eefcae8819839e13702f887278a597dd954babd82bf71bf8ca8559af0e5537be0264e358d36b4f5067960edf608de731e04d117de953386aadee71849edbc494fac3e6b14567a9e9c545a06d402acd3158441829f25478ed0f9086dabd2d3913b123b43c27176f8f08f30312d84e82d47654097a2bce95554357db3ce3d45a7441472067f55e4ea6244a3dedc23db4bea8f549109ffac382cf5b652c5b1ee431bcab1051567c263a9d668c5d6a15a6f8da754914746c1d3c7eb6347bdd8d6a3ac82e4c742fcf8721913c111dfd5398f2698db00f7220d2a3562e02f7f7a6505af3ba1ee10b46f2ab5b5d2f52d288fd12814c6edbcb8d50b6e8716fba0d5962747b971689fe75e94fa36ec39598ea30e15ab2b9c9f22ca04b890a13b18fb3c7a962050426bb2da08c8b993608b9c1ffd0a21e0c74e993242ead8eb30f86d7d2dcdbd4774d85c2e06adbe4b40050ff0ac1a8afe8fbc2175ec4da4676a691b1fce38421175734c20f07a604fea5287e1c33b420aa9db4de9bd97382c161b4ec0818add675e52ebf036aad779f24b824be4b2b013c470ff66cbf44f5800e128a3b328e80a5fd6295b9b3a94e915f9add6710cb9444432751a7a31c3a3422f48a5eabc26d9a52571b8447bdd0a5977ff7153d95337cef7ff2ec29774332fbeed6ee5eed5e12288cc13e14ba9d5ff3dd052e28ba96715f5b95d7ea214ebcd9e60b26308eb11370b824b5cff2644dd2117985b3c25ba8076d4025cf3a3a62da62d5e11d44422a142048e8cd00c7de6a0a55fd5dc09a3ed01dfe35b88268f351b6ff289fee8e52ac29fe32d9990e0d6d87f39727b6a762bac9d509c6ea235fc8bedc3bec2143eae9fd2cb831b798ef8261d72785002638b940947de0aad64f791f9a27e5b091e55adf4aee0649f6785bdd37e0248fedd1759d771aeacacb3ff6e7cf2d045f791428ab61710b54e869213393caf1b6bc99066678351deafc290cecc1f6b40b5532adbbab9a70408c61a437d4483b6a75cb61a55b20881efc0d849e0f60c1887f0fa091672179a145c4ab1b6487a0e939e0123d5aaffa3aec66ab593f9c25d27f22f4a73a999a4ab45e8bc7d71a85e2d40afadad1a1dc0b8389f96f91614293fa205583ef1c3440e3df50e8aa5f1a13e5929b72cd003461ff03d44d8c84bdada176b24459021d398b2b91b61a9c0b553a8714c703d32452c691a33f1581e98c2439514ca3e7deeef90850f8d6d89bf1d3a5762a56ef769ea588f5c1705bfb7b944cfbbb0632718ee3722f4e1929b35706d6413a315a11bc16349af109a7e675df2ab1eebe93", "excel123"},
{NULL}
};
static cl_int cl_error;
static sxc_password *inbuffer;
static sxc_hash *outbuffer;
static sxc_salt currentsalt;
static cl_mem mem_in, mem_out, mem_setting;
static struct fmt_main *self;
static size_t insize, outsize, settingsize;
#define STEP 0
#define SEED 256
// This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl_autotune.h"
#include "memdbg.h"
static const char * warn[] = {
"xfer: ", ", crypt: ", ", xfer: "
};
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel);
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
insize = sizeof(sxc_password) * gws;
outsize = sizeof(sxc_hash) * gws;
settingsize = sizeof(sxc_salt);
inbuffer = mem_calloc(1, insize);
outbuffer = mem_alloc(outsize);
saved_key = mem_calloc(gws, sizeof(*saved_key));
crypt_out = mem_calloc(gws, sizeof(*crypt_out));
/// Allocate memory
mem_in =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem in");
mem_setting =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize,
NULL, &cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem setting");
mem_out =
clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem out");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting),
&mem_setting), "Error while setting mem_salt kernel argument");
}
static void release_clobj(void)
{
if (crypt_out) {
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(outbuffer);
MEM_FREE(saved_key);
MEM_FREE(crypt_out);
}
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void init(struct fmt_main *_self)
{
self = _self;
opencl_prepare_dev(gpu_id);
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[64];
snprintf(build_opts, sizeof(build_opts),
"-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d",
(int)sizeof(inbuffer->v),
(int)sizeof(currentsalt.salt),
(int)sizeof(outbuffer->v));
opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl",
gpu_id, build_opts);
crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 0, NULL, warn, 1,
self, create_clobj, release_clobj,
sizeof(sxc_password), 0, db);
// Auto tune execution from shared/included code.
autotune_run(self, 1, 0, 1000);
}
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
char *p;
int res, extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN;
if ((p = strtokm(ctcopy, "*")) == NULL) /* cipher type */
goto err;
res = atoi(p);
if (res != 0 && res != 1)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* checksum type */
goto err;
res = atoi(p);
if (res != 0 && res != 1)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iterations */
goto err;
res = atoi(p);
if (res <= 0)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* key size */
goto err;
res = atoi(p);
if (res != 16 && res != 32)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* checksum field (skipped) */
goto err;
if (hexlenl(p, &extra) != BINARY_SIZE * 2 || extra)
goto err;
if (!ishex(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv length */
goto err;
res = atoi(p);
if (res <= 0 || res > 16)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv */
goto err;
if (hexlenl(p, &extra) != res * 2 || extra)
goto err;
if (!ishex(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt length */
goto err;
res = atoi(p);
if (res <= 0 || res > 32)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt */
goto err;
if (hexlenl(p, &extra) != res * 2 || extra)
goto err;
if (!ishex(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* original length */
goto err;
res = atoi(p);
if (res <= 0 || res > 1024) /* 1024 because of "unsigned char output[1024];" in crypt_all */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* length */
goto err;
res = atoi(p);
if (res <= 0 || res > 1024)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* content */
goto err;
if (hexlenl(p, &extra) != res * 2 || extra)
goto err;
if (strtokm(NULL, "*") != NULL) /* the end */
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static sxc_cpu_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += 6; /* skip over "$sxc$*" */
p = strtokm(ctcopy, "*");
cs.cipher_type = atoi(p);
p = strtokm(NULL, "*");
cs.checksum_type = atoi(p);
p = strtokm(NULL, "*");
cs.iterations = atoi(p);
p = strtokm(NULL, "*");
cs.key_size = atoi(p);
strtokm(NULL, "*");
/* skip checksum field */
p = strtokm(NULL, "*");
cs.iv_length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.iv_length; i++)
cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.salt_length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.salt_length; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.original_length = atoi(p);
p = strtokm(NULL, "*");
cs.length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.length; i++)
cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN; /* skip over "$sxc$*" */
strtokm(ctcopy, "*");
strtokm(NULL, "*");
strtokm(NULL, "*");
strtokm(NULL, "*");
p = strtokm(NULL, "*");
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
MEM_FREE(keeptr);
return out;
}
static void set_salt(void *salt)
{
cur_salt = (sxc_cpu_salt*)salt;
memcpy((char*)currentsalt.salt, cur_salt->salt, cur_salt->salt_length);
currentsalt.length = cur_salt->salt_length;
currentsalt.iterations = cur_salt->iterations;
currentsalt.outlen = cur_salt->key_size;
currentsalt.skip_bytes = 0;
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting,
CL_FALSE, 0, settingsize, ¤tsalt, 0, NULL, NULL),
"Copy salt to gpu");
}
#undef set_key
static void set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
size_t *lws = local_work_size ? &local_work_size : NULL;
global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
unsigned char hash[20];
SHA_CTX ctx;
SHA1_Init(&ctx);
SHA1_Update(&ctx, (unsigned char *)saved_key[index], strlen(saved_key[index]));
SHA1_Final((unsigned char *)hash, &ctx);
memcpy(inbuffer[index].v, hash, 20);
inbuffer[index].length = 20;
}
/// Copy data to gpu
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
insize, inbuffer, 0, NULL, multi_profilingEvent[0]),
"Copy data to gpu");
/// Run kernel
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1,
NULL, &global_work_size, lws, 0, NULL,
multi_profilingEvent[1]), "Run kernel");
/// Read the result back
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0,
outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back");
if (ocl_autotune_running)
return count;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
BF_KEY bf_key;
SHA_CTX ctx;
int bf_ivec_pos;
unsigned char ivec[8];
unsigned char output[1024];
bf_ivec_pos = 0;
memcpy(ivec, cur_salt->iv, 8);
BF_set_key(&bf_key, cur_salt->key_size, (const unsigned char*)outbuffer[index].v);
BF_cfb64_encrypt(cur_salt->content, output, cur_salt->length, &bf_key, ivec, &bf_ivec_pos, 0);
SHA1_Init(&ctx);
SHA1_Update(&ctx, output, cur_salt->original_length);
SHA1_Final((unsigned char*)crypt_out[index], &ctx);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static unsigned int iteration_count(void *salt)
{
sxc_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->iterations;
}
struct fmt_main fmt_opencl_sxc = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{
"iteration count",
},
{ FORMAT_TAG },
sxc_tests
}, {
init,
done,
reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
triplet.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* These codes were originally parts of spglib, but only develped */
/* and used for phono3py. Therefore these were moved from spglib to */
/* phono3py. This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <stddef.h>
#include <mathfunc.h>
#include <triplet_h/triplet.h>
#include <triplet_h/triplet_iw.h>
#include <triplet_h/triplet_kpoint.h>
static size_t get_triplets_reciprocal_mesh_at_q(size_t *map_triplets,
size_t *map_q,
int (*grid_address)[3],
const int grid_point,
const int mesh[3],
const int is_time_reversal,
const int num_rot,
TPLCONST int (*rotations)[3][3],
const int swappable);
size_t tpl_get_BZ_triplets_at_q(size_t (*triplets)[3],
const size_t grid_point,
TPLCONST int (*bz_grid_address)[3],
const size_t *bz_map,
const size_t *map_triplets,
const size_t num_map_triplets,
const int mesh[3])
{
return tpk_get_BZ_triplets_at_q(triplets,
grid_point,
bz_grid_address,
bz_map,
map_triplets,
num_map_triplets,
mesh);
}
size_t tpl_get_triplets_reciprocal_mesh_at_q(size_t *map_triplets,
size_t *map_q,
int (*grid_address)[3],
const size_t grid_point,
const int mesh[3],
const int is_time_reversal,
const int num_rot,
TPLCONST int (*rotations)[3][3],
const int swappable)
{
return get_triplets_reciprocal_mesh_at_q(map_triplets,
map_q,
grid_address,
grid_point,
mesh,
is_time_reversal,
num_rot,
rotations,
swappable);
}
void tpl_get_integration_weight(double *iw,
char *iw_zero,
const double *frequency_points,
const size_t num_band0,
TPLCONST int relative_grid_address[24][4][3],
const int mesh[3],
TPLCONST size_t (*triplets)[3],
const size_t num_triplets,
TPLCONST int (*bz_grid_address)[3],
const size_t *bz_map,
const double *frequencies1,
const size_t num_band1,
const double *frequencies2,
const size_t num_band2,
const size_t tp_type,
const int openmp_per_triplets,
const int openmp_per_bands)
{
size_t i, num_band_prod;
int tp_relative_grid_address[2][24][4][3];
tpl_set_relative_grid_address(tp_relative_grid_address,
relative_grid_address,
tp_type);
num_band_prod = num_band0 * num_band1 * num_band2;
#pragma omp parallel for if (openmp_per_triplets)
for (i = 0; i < num_triplets; i++) {
tpi_get_integration_weight(iw + i * num_band_prod,
iw_zero + i * num_band_prod,
frequency_points, /* f0 */
num_band0,
tp_relative_grid_address,
mesh,
triplets[i],
num_triplets,
bz_grid_address,
bz_map,
frequencies1, /* f1 */
num_band1,
frequencies2, /* f2 */
num_band2,
tp_type,
openmp_per_bands);
}
}
void tpl_get_integration_weight_with_sigma(double *iw,
char *iw_zero,
const double sigma,
const double sigma_cutoff,
const double *frequency_points,
const size_t num_band0,
TPLCONST size_t (*triplets)[3],
const size_t num_triplets,
const double *frequencies,
const size_t num_band,
const size_t tp_type)
{
size_t i, num_band_prod, const_adrs_shift;
double cutoff;
cutoff = sigma * sigma_cutoff;
num_band_prod = num_band0 * num_band * num_band;
const_adrs_shift = num_triplets * num_band0 * num_band * num_band;
#pragma omp parallel for
for (i = 0; i < num_triplets; i++) {
tpi_get_integration_weight_with_sigma(
iw + i * num_band_prod,
iw_zero + i * num_band_prod,
sigma,
cutoff,
frequency_points,
num_band0,
triplets[i],
const_adrs_shift,
frequencies,
num_band,
tp_type,
0);
}
}
int tpl_is_N(const size_t triplet[3], const int *grid_address)
{
int i, j, sum_q, is_N;
is_N = 1;
for (i = 0; i < 3; i++) {
sum_q = 0;
for (j = 0; j < 3; j++) { /* 1st, 2nd, 3rd triplet */
sum_q += grid_address[triplet[j] * 3 + i];
}
if (sum_q) {
is_N = 0;
break;
}
}
return is_N;
}
void tpl_set_relative_grid_address(
int tp_relative_grid_address[2][24][4][3],
TPLCONST int relative_grid_address[24][4][3],
const size_t tp_type)
{
size_t i, j, k, l;
int signs[2];
signs[0] = 1;
signs[1] = 1;
if ((tp_type == 2) || (tp_type == 3)) {
/* q1+q2+q3=G */
/* To set q2+1, q3-1 is needed to keep G */
signs[1] = -1;
}
/* tp_type == 4, q+k_i-k_f=G */
for (i = 0; i < 2; i++) {
for (j = 0; j < 24; j++) {
for (k = 0; k < 4; k++) {
for (l = 0; l < 3; l++) {
tp_relative_grid_address[i][j][k][l] =
relative_grid_address[j][k][l] * signs[i];
}
}
}
}
}
static size_t get_triplets_reciprocal_mesh_at_q(size_t *map_triplets,
size_t *map_q,
int (*grid_address)[3],
const int grid_point,
const int mesh[3],
const int is_time_reversal,
const int num_rot,
TPLCONST int (*rotations)[3][3],
const int swappable)
{
MatINT *rot_real;
int i;
size_t num_ir;
rot_real = mat_alloc_MatINT(num_rot);
for (i = 0; i < num_rot; i++) {
mat_copy_matrix_i3(rot_real->mat[i], rotations[i]);
}
num_ir = tpk_get_ir_triplets_at_q(map_triplets,
map_q,
grid_address,
grid_point,
mesh,
is_time_reversal,
rot_real,
swappable);
mat_free_MatINT(rot_real);
return num_ir;
}
|
omp_zherk_batch.c | /**
* @file omp_zherk_batch.c
*
* @brief BBLAS zherk_batch double _Complex routine.
*
* BBLAS is a software package provided by Univ. of Manchester,
* Univ. of Tennessee.
*
* @version 1.0.0
* @author Samuel D. Relton
* @author Pedro V. Lara
* @author Mawussi Zounon
* @date 2016-02-20
*
**/
#ifndef DOXYGEN_SHOULD_SKIP_THIS
/**
* Code generation
* @precisions normal z -> c
**/
#endif
#include<cblas.h>
#include "bblas_omp.h"
#include "bblas.h"
#include <omp.h>
#define COMPLEX
/**
Purpose
-------
<b>zherk_batch</b> is an OpenMP version of zherk_batch.
It performs the matrix-matrix operations
arrayC[i] = alpha[i]*arrayA[i]*arrayA[i**H] + beta[i]*arrayC[i], or
arrayC[i] = alpha[i]*arrayA[i]**H *arrayA[i] + beta[i]*arrayC[i],
where alpha[i] and beta[i] are real scalars, arrayC[i] are matrices with
an N[i] by N[i] hermitian matrix and arrayA[i] are N[i] by K[i] mtrices in the first
case and a K[i] by N[i] in the second case.
Fixed and Variable Batch Operations
-----------------------------------
Two types of batch operation are supported depending upon the value of batch_opts.
When <tt>batch_opts = BBLAS_VARIABLE</tt>
- all parameters that are arrays must have length at least batch_count.
- all parameters that are arrays must have all values set.
When <tt>batch_opts = BBLAS_FIXED</tt>
- all parameters that are arrays (except for arrayA, arrayC, and info)
must have length at least one.
- all parameters that are arrays (except for arrayA, arrayC, and info)
need only to have their first value set.
This means that for a <tt>BBLAS_FIXED</tt> batch,
the values of uplo[0], trans[0], N[0], K[0],
alpha[0], beta[0], lda[0], and ldc[0] are used for all computations.
Parameters
----------
@param[in]
uplo Array of <tt>enum BBLAS_UPLO</tt>.
On entry, uplo[i] specifies whether the upper or
lower triangular part of the matrix arrayC[i]
is to be referenced as follows:
- = 'BblasUpper' Only the upper triangular part of
arrayC[i] is to be referenced.
- = 'BblasLower' Only the lower triangular part of
arrayC[i] is to be referenced.
@param[in]
trans Array of <tt>enum BBLAS_TRANS</tt>.
On entry, trans[i] specifies the operation to be
performed as follows:
- = 'BblasNoTrans' arrayC[i] = alpha[i]*arrayA[i]*arrayA[i]**H + beta[i]*arrayC[i].
- = 'BblasConjTrans' arrayC[i] = alpha[i]*arrayA[i]**H *arrayA[i] + beta[i]*arrayC[i].
@param[in]
N Array of <tt>int</tt>.
Each element N[i] specifies the number of rows and columns of the matrix
arrayC[i]. N[i] must be greater than zero.
@param[in]
K Array of <tt>int</tt>.
On entry with trans[i] = 'BblasNoTrans', K[i] specifies the
number of columns of the matrix arrayA[i],
and upon entry with trans[i] = 'BblasConjTrans',
K[i] specifies the number of rows of the matrix arrayA[i].
K[i] must be greater than zero.
@param[in]
alpha Array of <tt>complex_16</tt>.
@param[in]
arrayA Array of pointers.
Each element arrayA[i] is a pointer to a COMPLEX_16 matrix of
dimension lda[i] by Ka[i],
where Ka[i] = K[i] when transA[i] = BblasNoTrans and is N[i] otherwise.
Before entry with transA[i] = BblasNoTrans, the leading N[i] by K[i]
part of the arrayA[i] must contain the elements of arrayA[i], otherwise
the leading K[i] by N[i] part of the arrayA[i] must contain the
elements of arrayA[i].
@param[in]
lda Array of <tt>int</tt>.
On entry, lda[i] specifies the first dimension of arrayA[i] as declared
in the calling (sub) program. When transA[i] = BblasNoTrans then
lda[i] must be at least max( 1, N[i] ), otherwise lda[i] must be at
least max( 1, K[i] ).
@param[in]
beta Array of <tt>complex_16</tt>.
When beta[i] is set to zero arrayC[i] need not be set on input.
@param[in,out]
arrayC Array of pointers.
Each elements arrayC[i] is a pointer to a COMPLEX_16 matrix of
dimension ldc[i] by N[i].
Before entry with uplo[i] = 'BblasUpper', the leading
N[i] by N[i] upper triangular part of the arrayC[i] must con-
tain the upper triangular part of the hermitian
matrix and the strictly lower triangular part of arrayC[i]
is not referenced. On exit, the upper triangular
part of the arrayC[i] is overwritten by the upper tri-
angular part of the updated matrix.
Before entry with uplo[i] = 'BblasLower', the leading N[i] by N[i] lower
triangular part of the arrayC[i] must contain the lower
triangular part of the hermitian matrix and the
strictly upper triangular part of arrayC[i] is not refer-
enced. On exit, the lower triangular part of the
arrayC[i] is overwritten by the lower triangular part
of the updated matrix.
Note that the imaginary parts of the diagonal elements need not be set,
they are assumed to be zero,
and on exit they are set to zero.
@param[in]
ldc Array of <tt>int</tt>.
On entry, ldc[i] specifies the first dimension of arrayC[i] as declared
in the calling (sub) program. Each element ldc must be at least max( 1, N[i] ).
@param[in]
batch_count <tt>int</tt>
The number of matrices to operate on.
@param[in]
batch_opts <tt>enum BBLAS_OPTS</tt>
One of BBLAS_FIXED or BBLAS_VARIABLE depending upon the type of
batch operation required.
@param[out]
info Array of <tt>int</tt>.
Each element info[i] is the error return code of the ith zherk in the batch,
these need not be set on entry.
The error codes can be found in bblas_macros.h.
**/
void omp_zherk_batch(
const enum BBLAS_UPLO *uplo, const enum BBLAS_TRANS *trans,
const int *N, const int *K, const double *alpha,
const BBLAS_Complex64_t **arrayA, const int *lda,
const double *beta, BBLAS_Complex64_t **arrayC,
const int *ldc, const int batch_count, enum BBLAS_OPTS batch_opts, int *info)
{
/*Local variables */
int first_index = 0;
int batch_iter;
int LDA;
char func_name[15] = "zherk_batch";
/* Check input arguments */
if (batch_count < 0)
{
xerbla_batch(func_name, BBLAS_ERR_BATCH_COUNT, -1);
}
if (batch_opts == BBLAS_FIXED)
{
if ((uplo[first_index] != BblasUpper) &&
(uplo[first_index] != BblasLower))
{
xerbla_batch(func_name, BBLAS_ERR_UPLO, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_UPLO;
}
return;
}
if ((trans[first_index] != BblasNoTrans) &&
(trans[first_index] != BblasTrans) &&
(trans[first_index] != BblasConjTrans))
{
xerbla_batch(func_name, BBLAS_ERR_TRANS, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_TRANS;
}
return;
}
if (N[first_index] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_N, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_N;
}
return;
}
if (K[first_index] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_K, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_K;
}
return;
}
if (trans[first_index] == BblasNoTrans)
{
LDA = N[first_index];
} else
{
LDA = K[first_index];
}
if (lda[first_index] < max(1, LDA)){
xerbla_batch(func_name, BBLAS_ERR_LDA, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[first_index] = BBLAS_ERR_LDA;
}
return;
}
if (ldc[first_index] < max(1, N[first_index]))
{
xerbla_batch(func_name, BBLAS_ERR_LDC, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_LDC;
}
return;
}
/* particular case */
if (N[first_index] == 0 ||
((K[first_index] == 0 || alpha[first_index] == (double)0.0) &&
(beta[first_index] == (double)1.0)))
{
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_SUCCESS;
}
return;
}
#pragma omp parallel for private(batch_iter)
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
/*Call to cblas_zherk */
cblas_zherk(
BblasColMajor,
uplo[first_index],
trans[first_index],
N[first_index],
K[first_index],
alpha[first_index],
arrayA[batch_iter],
lda[first_index],
beta[first_index],
arrayC[batch_iter],
ldc[first_index]);
/* Successful */
info[batch_iter] = BBLAS_SUCCESS;
} /*END FIXED SIZE FOR LOOP */
}else if (batch_opts == BBLAS_VARIABLE)
{
#pragma omp parallel for private(batch_iter, LDA)
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
/* Check input arguments */
if ((uplo[batch_iter] != BblasUpper) &&
(uplo[batch_iter] != BblasLower))
{
xerbla_batch(func_name, BBLAS_ERR_UPLO, batch_iter);
info[batch_iter] = BBLAS_ERR_UPLO;
continue;
}
if ((trans[batch_iter] != BblasNoTrans) &&
(trans[batch_iter] != BblasTrans) &&
(trans[batch_iter] != BblasConjTrans))
{
xerbla_batch(func_name, BBLAS_ERR_TRANS, batch_iter);
info[batch_iter] = BBLAS_ERR_TRANS;
continue;
}
if (N[batch_iter] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_N, batch_iter);
info[batch_iter] = BBLAS_ERR_N;
continue;
}
if (K[batch_iter] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_K, batch_iter);
info[batch_iter] = BBLAS_ERR_K;
continue;
}
if (trans[batch_iter] == BblasNoTrans){
LDA = N[batch_iter];
}
else
{
LDA = K[batch_iter];
}
if (lda[batch_iter] < max(1, LDA)){
xerbla_batch(func_name, BBLAS_ERR_LDA, batch_iter);
info[batch_iter] = BBLAS_ERR_LDA;
continue;
}
if (ldc[batch_iter] < max(1, N[batch_iter]))
{
xerbla_batch(func_name, BBLAS_ERR_LDC, batch_iter);
info[batch_iter] = BBLAS_ERR_LDC;
continue;
}
/* particular case */
if (N[batch_iter] == 0 ||
((K[batch_iter] == 0 || alpha[batch_iter] == (double)0.0) &&
(beta[batch_iter] == (double)1.0)))
{
info[batch_iter] = BBLAS_SUCCESS;
continue;
}
cblas_zherk(
BblasColMajor,
uplo[batch_iter],
trans[batch_iter],
N[batch_iter],
K[batch_iter],
alpha[batch_iter],
arrayA[batch_iter],
lda[batch_iter],
beta[batch_iter],
arrayC[batch_iter],
ldc[batch_iter]);
/* Successful */
info[batch_iter] = BBLAS_SUCCESS;
}
}else
{
xerbla_batch(func_name, BBLAS_ERR_BATCH_OPTS, -1);
}
}
#undef COMPLEX
|
HelloOpenMP_fix4.c | #include <stdio.h>
#include <omp.h>
int main(int argc, char *argv[]){
#pragma omp parallel
{
int nthreads = omp_get_num_threads();
int thread_id = omp_get_thread_num();
#pragma omp master
{
printf("Goodbye slow serial world and Hello OpenMP!\n");
printf(" I have %d thread(s) and my thread id is %d\n",nthreads,thread_id);
}
}
}
|
axpy_float_avx2.c | //axpy.c
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#define N_RUNS 20
#define N 102400000
// read timer in second
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
//Create a matrix and a vector and fill with random numbers
void init(float *X, float *Y) {
for (int i = 0; i<N; i++) {
X[i] = (float)rand()/(float)(RAND_MAX/10.0);
Y[i] = (float)rand()/(float)(RAND_MAX/10.0);
}
}
//Our sum function- what it does is pretty straight-forward.
void axpy(float *X, float *Y, float a) {
#pragma omp simd simdlen(8)
for (int i = 0; i<N; i++) {
Y[i] += a * X[i];
}
}
// Debug functions
void axpy_serial(float *X, float *Y, float a) {
for (int i = 0; i<N; i++) {
Y[i] += a * X[i];
}
}
void print_vector(float *vector) {
printf("[");
for (int i = 0; i<8; i++) {
printf("%.2f ", vector[i]);
}
puts("]");
}
float check(float *A, float *B){
float difference = 0;
for(int i = 0;i<N; i++){
difference += A[i]- B[i];
}
return difference;
}
int main(int argc, char **argv) {
//Set everything up
float *X = malloc(sizeof(float)*N);
float *Y = malloc(sizeof(float)*N);
float *Y_serial = malloc(sizeof(float)*N);
float a = 3.14;
srand(time(NULL));
init(X, Y);
for (int i = 0; i<N; i++) Y_serial[i] = Y[i];
print_vector(Y);
print_vector(X);
printf("%.2f\n", a);
puts("=\n");
//warming up
axpy(X, Y, a);
axpy_serial(X, Y_serial, a);
init(X, Y);
for (int i = 0; i<N; i++) Y_serial[i] = Y[i];
double t = 0;
double start = read_timer();
for (int i = 0; i<N_RUNS; i++)
axpy(X, Y, a);
t += (read_timer() - start);
double t_serial = 0;
double start_serial = read_timer();
for (int i = 0; i<N_RUNS; i++)
axpy_serial(X, Y_serial, a);
t_serial += (read_timer() - start_serial);
print_vector(Y);
puts("---------------------------------");
print_vector(Y_serial);
double gflops = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t);
double gflops_serial = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t_serial);
printf("==================================================================\n");
printf("Performance:\t\t\tRuntime (s)\t GFLOPS\n");
printf("------------------------------------------------------------------\n");
printf("AXPY (SIMD):\t\t%4f\t%4f\n", t/N_RUNS, gflops);
printf("AXPY (Serial):\t\t%4f\t%4f\n", t_serial/N_RUNS, gflops_serial);
printf("Correctness check: %f\n", check(Y,Y_serial));
free(X);
free(Y);
free(Y_serial);
return 0;
}
|
test_zpotrf_nopack.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "test.h"
#include "flops.h"
#include "plasma.h"
#include <plasma_core_blas.h>
#include "core_lapack.h"
#include <assert.h>
#include <math.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#define COMPLEX
#define A(i_, j_) A[(i_) + (size_t)lda*(j_)]
/***************************************************************************//**
*
* @brief Tests ZPOTRF.
*
* @param[in,out] param - array of parameters
* @param[in] run - whether to run test
*
* Sets flags in param indicating which parameters are used.
* If run is true, also runs test and stores output parameters.
******************************************************************************/
void test_zpotrf_nopack(param_value_t param[], bool run)
{
//================================================================
// Mark which parameters are used.
//================================================================
param[PARAM_UPLO ].used = true;
param[PARAM_DIM ].used = PARAM_USE_N;
param[PARAM_PADA ].used = true;
param[PARAM_NB ].used = true;
param[PARAM_ZEROCOL].used = true;
if (! run)
return;
//================================================================
// Set parameters.
//================================================================
plasma_enum_t uplo = plasma_uplo_const(param[PARAM_UPLO].c);
int n = param[PARAM_DIM].dim.n;
int lda = imax(1, n + param[PARAM_PADA].i);
int test = param[PARAM_TEST].c == 'y';
double tol = param[PARAM_TOL].d * LAPACKE_dlamch('E');
//================================================================
// Set tuning parameters.
//================================================================
plasma_set(PlasmaTuning, PlasmaDisabled);
plasma_set(PlasmaNb, param[PARAM_NB].i);
//================================================================
// Allocate and initialize arrays.
//================================================================
plasma_complex64_t *A =
(plasma_complex64_t*)malloc((size_t)lda*n*sizeof(plasma_complex64_t));
assert(A != NULL);
int seed[] = {0, 0, 0, 1};
lapack_int retval;
retval = LAPACKE_zlarnv(1, seed, (size_t)lda*n, A);
assert(retval == 0);
//================================================================
// Make the A matrix symmetric/Hermitian positive definite.
// It increases diagonal by n, and makes it real.
// It sets Aji = conj( Aij ) for j < i, that is, copy lower
// triangle to upper triangle.
//================================================================
for (int i = 0; i < n; i++) {
A(i, i) = creal(A(i, i)) + n;
for (int j = 0; j < i; j++) {
A(j, i) = conj(A(i, j));
}
}
int zerocol = param[PARAM_ZEROCOL].i;
if (zerocol >= 0 && zerocol < n)
memset(&A[zerocol*lda], 0, n*sizeof(plasma_complex64_t));
plasma_complex64_t *Aref = NULL;
if (test) {
Aref = (plasma_complex64_t*)malloc(
(size_t)lda*n*sizeof(plasma_complex64_t));
assert(Aref != NULL);
memcpy(Aref, A, (size_t)lda*n*sizeof(plasma_complex64_t));
}
//================================================================
// Run and time PLASMA.
//================================================================
//int plainfo = plasma_zpotrf(uplo, n, A, lda);
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (lda < imax(1, n)) {
plasma_error("illegal value of lda");
return -4;
}
// quick return
if (imax(n, 0) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_potrf(plasma, PlasmaComplexDouble, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrix.
plasma_desc_t AA;
int retval1;
retval1 = plasma_desc_triangular_create(PlasmaComplexDouble, uplo, nb, nb,
n, n, 0, 0, n, n, &AA);
if (retval1 != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval1;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval1 = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval1 = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_ztr2desc(A, lda, AA, &sequence, &request);
}
plasma_time_t start = omp_get_wtime();
#pragma omp parallel
#pragma omp master
{
// Call the tile async function.
plasma_omp_zpotrf(uplo, AA, &sequence, &request);
}
plasma_time_t stop = omp_get_wtime();
plasma_time_t time = stop-start;
param[PARAM_TIME].d = time;
param[PARAM_GFLOPS].d = flops_zpotrf(n) / time / 1e9;
#pragma omp parallel
#pragma omp master
{
// Translate back to LAPACK layout.
plasma_omp_zdesc2tr(AA, A, lda, &sequence, &request);
}
// implicit synchronization
// Free matrix A in tile layout.
plasma_desc_destroy(&AA);
int plainfo = sequence.status;
//================================================================
// Test results by comparing to a reference implementation.
//================================================================
if (test) {
int lapinfo = LAPACKE_zpotrf(LAPACK_COL_MAJOR,
lapack_const(uplo), n,
Aref, lda);
if (lapinfo == 0) {
plasma_complex64_t zmone = -1.0;
cblas_zaxpy((size_t)lda*n, CBLAS_SADDR(zmone), Aref, 1, A, 1);
double work[1];
double Anorm = LAPACKE_zlanhe_work(
LAPACK_COL_MAJOR, 'F', lapack_const(uplo), n, Aref, lda, work);
double error = LAPACKE_zlange_work(
LAPACK_COL_MAJOR, 'F', n, n, A, lda, work);
if (Anorm != 0)
error /= Anorm;
param[PARAM_ERROR].d = error;
param[PARAM_SUCCESS].i = error < tol;
}
else {
if (plainfo == lapinfo) {
param[PARAM_ERROR].d = 0.0;
param[PARAM_SUCCESS].i = 1;
}
else {
param[PARAM_ERROR].d = INFINITY;
param[PARAM_SUCCESS].i = 0;
}
}
}
//================================================================
// Free arrays.
//================================================================
free(A);
if (test)
free(Aref);
}
|
firstprivate2.c | #include <stdio.h>
#include "assert.h"
#include <unistd.h>
#include <omp.h>
#define N 2
#define NUM_THREADS 3
int main(){
int array1[NUM_THREADS];
int array2[N*NUM_THREADS];
int errors = 0;
//omp_set_num_threads(NUM_THREADS);
for (int i = 0; i < NUM_THREADS; i++){
array1[i] = -1;
}
for (int i = 0; i < N * NUM_THREADS; i++){
array2[i] = -1;
}
#pragma omp target data map(from:array1, array2)
#pragma omp parallel num_threads(3)
{
int p_val = omp_get_thread_num();
fprintf(stderr,"Thread num: %d P_VAL: %d\n", omp_get_thread_num(),p_val);
#pragma omp target firstprivate(p_val)
{
array1[p_val] = p_val + 100;
for(int x = 0; x < N; x++)
array2[p_val * N + x] = 200;
p_val++;
}
if(p_val != omp_get_thread_num()){
printf("Unwanted Behavior: P_VAL Changed to: %d. Should be %d.\n", p_val, omp_get_thread_num());
errors = 1;
}
}
//Print Arrays
for(int i = 0; i < NUM_THREADS; i++){
fprintf(stderr, "Array1[%d]: %d\n", i, array1[i]);
if(array1[i] != 100 + i){
printf("Array1 has invalid value %d at index %d.\n", array1[i], i);
errors = 1;
}
}
fprintf(stderr, "\n");
for(int i = 0; i < N*NUM_THREADS; i++){
fprintf(stderr, "Array2[%d]: %d\n", i, array2[i]);
if(array2[i] != 200){
printf("Array2 has invalid value %d at %d.\n", array2[i], i);
errors = 1;
}
}
if(errors){
printf("Fail!\n");
return 1;
}
printf("Success!\n");
return 0;
}
|
ASTMatchers.h | //===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements matchers to be used together with the MatchFinder to
// match AST nodes.
//
// Matchers are created by generator functions, which can be combined in
// a functional in-language DSL to express queries over the C++ AST.
//
// For example, to match a class with a certain name, one would call:
// cxxRecordDecl(hasName("MyClass"))
// which returns a matcher that can be used to find all AST nodes that declare
// a class named 'MyClass'.
//
// For more complicated match expressions we're often interested in accessing
// multiple parts of the matched AST nodes once a match is found. In that case,
// call `.bind("name")` on match expressions that match the nodes you want to
// access.
//
// For example, when we're interested in child classes of a certain class, we
// would write:
// cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child")))
// When the match is found via the MatchFinder, a user provided callback will
// be called with a BoundNodes instance that contains a mapping from the
// strings that we provided for the `.bind()` calls to the nodes that were
// matched.
// In the given example, each time our matcher finds a match we get a callback
// where "child" is bound to the RecordDecl node of the matching child
// class declaration.
//
// See ASTMatchersInternal.h for a more in-depth explanation of the
// implementation details of the matcher framework.
//
// See ASTMatchFinder.h for how to use the generated matchers to run over
// an AST.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTTypeTraits.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/LambdaCapture.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/ParentMapContext.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/ASTMatchers/ASTMatchersInternal.h"
#include "clang/ASTMatchers/ASTMatchersMacros.h"
#include "clang/Basic/AttrKinds.h"
#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TypeTraits.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Regex.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <limits>
#include <string>
#include <utility>
#include <vector>
namespace clang {
namespace ast_matchers {
/// Maps string IDs to AST nodes matched by parts of a matcher.
///
/// The bound nodes are generated by calling \c bind("id") on the node matchers
/// of the nodes we want to access later.
///
/// The instances of BoundNodes are created by \c MatchFinder when the user's
/// callbacks are executed every time a match is found.
class BoundNodes {
public:
/// Returns the AST node bound to \c ID.
///
/// Returns NULL if there was no node bound to \c ID or if there is a node but
/// it cannot be converted to the specified type.
template <typename T>
const T *getNodeAs(StringRef ID) const {
return MyBoundNodes.getNodeAs<T>(ID);
}
/// Type of mapping from binding identifiers to bound nodes. This type
/// is an associative container with a key type of \c std::string and a value
/// type of \c clang::DynTypedNode
using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap;
/// Retrieve mapping from binding identifiers to bound nodes.
const IDToNodeMap &getMap() const {
return MyBoundNodes.getMap();
}
private:
friend class internal::BoundNodesTreeBuilder;
/// Create BoundNodes from a pre-filled map of bindings.
BoundNodes(internal::BoundNodesMap &MyBoundNodes)
: MyBoundNodes(MyBoundNodes) {}
internal::BoundNodesMap MyBoundNodes;
};
/// Types of matchers for the top-level classes in the AST class
/// hierarchy.
/// @{
using DeclarationMatcher = internal::Matcher<Decl>;
using StatementMatcher = internal::Matcher<Stmt>;
using TypeMatcher = internal::Matcher<QualType>;
using TypeLocMatcher = internal::Matcher<TypeLoc>;
using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>;
using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>;
using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>;
/// @}
/// Matches any node.
///
/// Useful when another matcher requires a child matcher, but there's no
/// additional constraint. This will often be used with an explicit conversion
/// to an \c internal::Matcher<> type such as \c TypeMatcher.
///
/// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g.,
/// \code
/// "int* p" and "void f()" in
/// int* p;
/// void f();
/// \endcode
///
/// Usable as: Any Matcher
inline internal::TrueMatcher anything() { return internal::TrueMatcher(); }
/// Matches the top declaration context.
///
/// Given
/// \code
/// int X;
/// namespace NS {
/// int Y;
/// } // namespace NS
/// \endcode
/// decl(hasDeclContext(translationUnitDecl()))
/// matches "int X", but not "int Y".
extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl>
translationUnitDecl;
/// Matches typedef declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefDecl()
/// matches "typedef int X", but not "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl>
typedefDecl;
/// Matches typedef name declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefNameDecl()
/// matches "typedef int X" and "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl>
typedefNameDecl;
/// Matches type alias declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typeAliasDecl()
/// matches "using Y = int", but not "typedef int X"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl>
typeAliasDecl;
/// Matches type alias template declarations.
///
/// typeAliasTemplateDecl() matches
/// \code
/// template <typename T>
/// using Y = X<T>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl>
typeAliasTemplateDecl;
/// Matches AST nodes that were expanded within the main-file.
///
/// Example matches X but not Y
/// (matcher = cxxRecordDecl(isExpansionInMainFile())
/// \code
/// #include <Y.h>
/// class X {};
/// \endcode
/// Y.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInMainFile,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
return SourceManager.isInMainFile(
SourceManager.getExpansionLoc(Node.getBeginLoc()));
}
/// Matches AST nodes that were expanded within system-header-files.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInSystemHeader())
/// \code
/// #include <SystemHeader.h>
/// class X {};
/// \endcode
/// SystemHeader.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
return SourceManager.isInSystemHeader(ExpansionLoc);
}
/// Matches AST nodes that were expanded within files whose name is
/// partially matching a given regex.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*"))
/// \code
/// #include "ASTMatcher.h"
/// class X {};
/// \endcode
/// ASTMatcher.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER_P(isExpansionInFileMatching,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc),
std::string, RegExp) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
auto FileEntry =
SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc));
if (!FileEntry) {
return false;
}
auto Filename = FileEntry->getName();
llvm::Regex RE(RegExp);
return RE.match(Filename);
}
/// Matches statements that are (transitively) expanded from the named macro.
/// Does not match if only part of the statement is expanded from that macro or
/// if different parts of the the statement are expanded from different
/// appearances of the macro.
///
/// FIXME: Change to be a polymorphic matcher that works on any syntactic
/// node. There's nothing `Stmt`-specific about it.
AST_MATCHER_P(Stmt, isExpandedFromMacro, llvm::StringRef, MacroName) {
// Verifies that the statement' beginning and ending are both expanded from
// the same instance of the given macro.
auto& Context = Finder->getASTContext();
llvm::Optional<SourceLocation> B =
internal::getExpansionLocOfMacro(MacroName, Node.getBeginLoc(), Context);
if (!B) return false;
llvm::Optional<SourceLocation> E =
internal::getExpansionLocOfMacro(MacroName, Node.getEndLoc(), Context);
if (!E) return false;
return *B == *E;
}
/// Matches declarations.
///
/// Examples matches \c X, \c C, and the friend declaration inside \c C;
/// \code
/// void X();
/// class C {
/// friend X;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<Decl> decl;
/// Matches a declaration of a linkage specification.
///
/// Given
/// \code
/// extern "C" {}
/// \endcode
/// linkageSpecDecl()
/// matches "extern "C" {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl>
linkageSpecDecl;
/// Matches a declaration of anything that could have a name.
///
/// Example matches \c X, \c S, the anonymous union type, \c i, and \c U;
/// \code
/// typedef int X;
/// struct S {
/// union {
/// int i;
/// } U;
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl;
/// Matches a declaration of label.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelDecl()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl;
/// Matches a declaration of a namespace.
///
/// Given
/// \code
/// namespace {}
/// namespace test {}
/// \endcode
/// namespaceDecl()
/// matches "namespace {}" and "namespace test {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl>
namespaceDecl;
/// Matches a declaration of a namespace alias.
///
/// Given
/// \code
/// namespace test {}
/// namespace alias = ::test;
/// \endcode
/// namespaceAliasDecl()
/// matches "namespace alias" but not "namespace test"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl>
namespaceAliasDecl;
/// Matches class, struct, and union declarations.
///
/// Example matches \c X, \c Z, \c U, and \c S
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl;
/// Matches C++ class declarations.
///
/// Example matches \c X, \c Z
/// \code
/// class X;
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl>
cxxRecordDecl;
/// Matches C++ class template declarations.
///
/// Example matches \c Z
/// \code
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl>
classTemplateDecl;
/// Matches C++ class template specializations.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
/// \endcode
/// classTemplateSpecializationDecl()
/// matches the specializations \c A<int> and \c A<double>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplateSpecializationDecl>
classTemplateSpecializationDecl;
/// Matches C++ class template partial specializations.
///
/// Given
/// \code
/// template<class T1, class T2, int I>
/// class A {};
///
/// template<class T, int I>
/// class A<T, T*, I> {};
///
/// template<>
/// class A<int, int, 1> {};
/// \endcode
/// classTemplatePartialSpecializationDecl()
/// matches the specialization \c A<T,T*,I> but not \c A<int,int,1>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplatePartialSpecializationDecl>
classTemplatePartialSpecializationDecl;
/// Matches declarator declarations (field, variable, function
/// and non-type template parameter declarations).
///
/// Given
/// \code
/// class X { int y; };
/// \endcode
/// declaratorDecl()
/// matches \c int y.
extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl>
declaratorDecl;
/// Matches parameter variable declarations.
///
/// Given
/// \code
/// void f(int x);
/// \endcode
/// parmVarDecl()
/// matches \c int x.
extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl>
parmVarDecl;
/// Matches C++ access specifier declarations.
///
/// Given
/// \code
/// class C {
/// public:
/// int a;
/// };
/// \endcode
/// accessSpecDecl()
/// matches 'public:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl>
accessSpecDecl;
/// Matches constructor initializers.
///
/// Examples matches \c i(42).
/// \code
/// class C {
/// C() : i(42) {}
/// int i;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<CXXCtorInitializer>
cxxCtorInitializer;
/// Matches template arguments.
///
/// Given
/// \code
/// template <typename T> struct C {};
/// C<int> c;
/// \endcode
/// templateArgument()
/// matches 'int' in C<int>.
extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument;
/// Matches template name.
///
/// Given
/// \code
/// template <typename T> class X { };
/// X<int> xi;
/// \endcode
/// templateName()
/// matches 'X' in X<int>.
extern const internal::VariadicAllOfMatcher<TemplateName> templateName;
/// Matches non-type template parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// nonTypeTemplateParmDecl()
/// matches 'N', but not 'T'.
extern const internal::VariadicDynCastAllOfMatcher<Decl,
NonTypeTemplateParmDecl>
nonTypeTemplateParmDecl;
/// Matches template type parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// templateTypeParmDecl()
/// matches 'T', but not 'N'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl>
templateTypeParmDecl;
/// Matches public C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isPublic())
/// matches 'int a;'
AST_MATCHER(Decl, isPublic) {
return Node.getAccess() == AS_public;
}
/// Matches protected C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isProtected())
/// matches 'int b;'
AST_MATCHER(Decl, isProtected) {
return Node.getAccess() == AS_protected;
}
/// Matches private C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isPrivate())
/// matches 'int c;'
AST_MATCHER(Decl, isPrivate) {
return Node.getAccess() == AS_private;
}
/// Matches non-static data members that are bit-fields.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b;
/// };
/// \endcode
/// fieldDecl(isBitField())
/// matches 'int a;' but not 'int b;'.
AST_MATCHER(FieldDecl, isBitField) {
return Node.isBitField();
}
/// Matches non-static data members that are bit-fields of the specified
/// bit width.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b : 4;
/// int c : 2;
/// };
/// \endcode
/// fieldDecl(hasBitWidth(2))
/// matches 'int a;' and 'int c;' but not 'int b;'.
AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) {
return Node.isBitField() &&
Node.getBitWidthValue(Finder->getASTContext()) == Width;
}
/// Matches non-static data members that have an in-class initializer.
///
/// Given
/// \code
/// class C {
/// int a = 2;
/// int b = 3;
/// int c;
/// };
/// \endcode
/// fieldDecl(hasInClassInitializer(integerLiteral(equals(2))))
/// matches 'int a;' but not 'int b;'.
/// fieldDecl(hasInClassInitializer(anything()))
/// matches 'int a;' and 'int b;' but not 'int c;'.
AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getInClassInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// Determines whether the function is "main", which is the entry point
/// into an executable program.
AST_MATCHER(FunctionDecl, isMain) {
return Node.isMain();
}
/// Matches the specialized template of a specialization declaration.
///
/// Given
/// \code
/// template<typename T> class A {}; #1
/// template<> class A<int> {}; #2
/// \endcode
/// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl()))
/// matches '#2' with classTemplateDecl() matching the class template
/// declaration of 'A' at #1.
AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate,
internal::Matcher<ClassTemplateDecl>, InnerMatcher) {
const ClassTemplateDecl* Decl = Node.getSpecializedTemplate();
return (Decl != nullptr &&
InnerMatcher.matches(*Decl, Finder, Builder));
}
/// Matches a declaration that has been implicitly added
/// by the compiler (eg. implicit default/copy constructors).
AST_MATCHER(Decl, isImplicit) {
return Node.isImplicit();
}
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl that have at least one TemplateArgument matching the given
/// InnerMatcher.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
///
/// template<typename T> f() {};
/// void func() { f<int>(); };
/// \endcode
///
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(asString("int"))))
/// matches the specialization \c A<int>
///
/// functionDecl(hasAnyTemplateArgument(refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P(
hasAnyTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder,
Builder);
}
/// Causes all nested matchers to be matched with the specified traversal kind.
///
/// Given
/// \code
/// void foo()
/// {
/// int i = 3.0;
/// }
/// \endcode
/// The matcher
/// \code
/// traverse(TK_IgnoreImplicitCastsAndParentheses,
/// varDecl(hasInitializer(floatLiteral().bind("init")))
/// )
/// \endcode
/// matches the variable declaration with "init" bound to the "3.0".
template <typename T>
internal::Matcher<T> traverse(TraversalKind TK,
const internal::Matcher<T> &InnerMatcher) {
return internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>();
}
template <typename T>
internal::BindableMatcher<T>
traverse(TraversalKind TK, const internal::BindableMatcher<T> &InnerMatcher) {
return internal::BindableMatcher<T>(
internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>());
}
template <typename... T>
internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>
traverse(TraversalKind TK,
const internal::VariadicOperatorMatcher<T...> &InnerMatcher) {
return internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>(
TK, InnerMatcher);
}
template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
typename T, typename ToTypes>
internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>>
traverse(TraversalKind TK, const internal::ArgumentAdaptingMatcherFuncAdaptor<
ArgumentAdapterT, T, ToTypes> &InnerMatcher) {
return internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T,
ToTypes>>(TK, InnerMatcher);
}
template <template <typename T, typename P1> class MatcherT, typename P1,
typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>
traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam1<
MatcherT, P1, ReturnTypesF> &InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>(
TK, InnerMatcher);
}
template <template <typename T, typename P1, typename P2> class MatcherT,
typename P1, typename P2, typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>
traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam2<
MatcherT, P1, P2, ReturnTypesF> &InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>(
TK, InnerMatcher);
}
/// Matches expressions that match InnerMatcher after any implicit AST
/// nodes are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// class C {};
/// C a = C();
/// C b;
/// C c = b;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr())))
/// \endcode
/// would match the declarations for a, b, and c.
/// While
/// \code
/// varDecl(hasInitializer(cxxConstructExpr()))
/// \endcode
/// only match the declarations for b and c.
AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>,
InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after any implicit casts
/// are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = 0;
/// const int c = a;
/// int *d = arr;
/// long e = (long) 0l;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringImpCasts(declRefExpr())))
/// \endcode
/// would match the declarations for a, b, c, and d, but not e.
/// While
/// \code
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// \endcode
/// only match the declarations for b, c, and d.
AST_MATCHER_P(Expr, ignoringImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after parentheses and
/// casts are stripped off.
///
/// Implicit and non-C Style casts are also discarded.
/// Given
/// \code
/// int a = 0;
/// char b = (0);
/// void* c = reinterpret_cast<char*>(0);
/// char d = char(0);
/// \endcode
/// The matcher
/// varDecl(hasInitializer(ignoringParenCasts(integerLiteral())))
/// would match the declarations for a, b, c, and d.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after implicit casts and
/// parentheses are stripped off.
///
/// Explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = (0);
/// const int c = a;
/// int *d = (arr);
/// long e = ((long) 0l);
/// \endcode
/// The matchers
/// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr())))
/// would match the declarations for a, b, c, and d, but not e.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// would only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder);
}
/// Matches types that match InnerMatcher after any parens are stripped.
///
/// Given
/// \code
/// void (*fp)(void);
/// \endcode
/// The matcher
/// \code
/// varDecl(hasType(pointerType(pointee(ignoringParens(functionType())))))
/// \endcode
/// would match the declaration for fp.
AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>,
InnerMatcher, 0) {
return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder);
}
/// Overload \c ignoringParens for \c Expr.
///
/// Given
/// \code
/// const char* str = ("my-string");
/// \endcode
/// The matcher
/// \code
/// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral())))
/// \endcode
/// would match the implicit cast resulting from the assignment.
AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>,
InnerMatcher, 1) {
const Expr *E = Node.IgnoreParens();
return InnerMatcher.matches(*E, Finder, Builder);
}
/// Matches expressions that are instantiation-dependent even if it is
/// neither type- nor value-dependent.
///
/// In the following example, the expression sizeof(sizeof(T() + T()))
/// is instantiation-dependent (since it involves a template parameter T),
/// but is neither type- nor value-dependent, since the type of the inner
/// sizeof is known (std::size_t) and therefore the size of the outer
/// sizeof is known.
/// \code
/// template<typename T>
/// void f(T x, T y) { sizeof(sizeof(T() + T()); }
/// \endcode
/// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T())
AST_MATCHER(Expr, isInstantiationDependent) {
return Node.isInstantiationDependent();
}
/// Matches expressions that are type-dependent because the template type
/// is not yet instantiated.
///
/// For example, the expressions "x" and "x + y" are type-dependent in
/// the following code, but "y" is not type-dependent:
/// \code
/// template<typename T>
/// void add(T x, int y) {
/// x + y;
/// }
/// \endcode
/// expr(isTypeDependent()) matches x + y
AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); }
/// Matches expression that are value-dependent because they contain a
/// non-type template parameter.
///
/// For example, the array bound of "Chars" in the following example is
/// value-dependent.
/// \code
/// template<int Size> int f() { return Size; }
/// \endcode
/// expr(isValueDependent()) matches return Size
AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); }
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl where the n'th TemplateArgument matches the given InnerMatcher.
///
/// Given
/// \code
/// template<typename T, typename U> class A {};
/// A<bool, int> b;
/// A<int, bool> c;
///
/// template<typename T> void f() {}
/// void func() { f<int>(); };
/// \endcode
/// classTemplateSpecializationDecl(hasTemplateArgument(
/// 1, refersToType(asString("int"))))
/// matches the specialization \c A<bool, int>
///
/// functionDecl(hasTemplateArgument(0, refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P2(
hasTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
if (List.size() <= N)
return false;
return InnerMatcher.matches(List[N], Finder, Builder);
}
/// Matches if the number of template arguments equals \p N.
///
/// Given
/// \code
/// template<typename T> struct C {};
/// C<int> c;
/// \endcode
/// classTemplateSpecializationDecl(templateArgumentCountIs(1))
/// matches C<int>.
AST_POLYMORPHIC_MATCHER_P(
templateArgumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType),
unsigned, N) {
return internal::getTemplateSpecializationArgs(Node).size() == N;
}
/// Matches a TemplateArgument that refers to a certain type.
///
/// Given
/// \code
/// struct X {};
/// template<typename T> struct A {};
/// A<X> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(class(hasName("X")))))
/// matches the specialization \c A<X>
AST_MATCHER_P(TemplateArgument, refersToType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Type)
return false;
return InnerMatcher.matches(Node.getAsType(), Finder, Builder);
}
/// Matches a TemplateArgument that refers to a certain template.
///
/// Given
/// \code
/// template<template <typename> class S> class X {};
/// template<typename T> class Y {};
/// X<Y> xi;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToTemplate(templateName())))
/// matches the specialization \c X<Y>
AST_MATCHER_P(TemplateArgument, refersToTemplate,
internal::Matcher<TemplateName>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Template)
return false;
return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder);
}
/// Matches a canonical TemplateArgument that refers to a certain
/// declaration.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToDeclaration(fieldDecl(hasName("next")))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, refersToDeclaration,
internal::Matcher<Decl>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Declaration)
return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder);
return false;
}
/// Matches a sugar TemplateArgument that refers to a certain expression.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// templateSpecializationType(hasAnyTemplateArgument(
/// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next"))))))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Expression)
return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder);
return false;
}
/// Matches a TemplateArgument that is an integral value.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(isIntegral()))
/// matches the implicit instantiation of C in C<42>
/// with isIntegral() matching 42.
AST_MATCHER(TemplateArgument, isIntegral) {
return Node.getKind() == TemplateArgument::Integral;
}
/// Matches a TemplateArgument that referes to an integral type.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(refersToIntegralType(asString("int"))))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, refersToIntegralType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder);
}
/// Matches a TemplateArgument of integral type with a given value.
///
/// Note that 'Value' is a string as the template argument's value is
/// an arbitrary precision integer. 'Value' must be euqal to the canonical
/// representation of that integral value in base 10.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(equalsIntegralValue("42")))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, equalsIntegralValue,
std::string, Value) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return Node.getAsIntegral().toString(10) == Value;
}
/// Matches an Objective-C autorelease pool statement.
///
/// Given
/// \code
/// @autoreleasepool {
/// int x = 0;
/// }
/// \endcode
/// autoreleasePoolStmt(stmt()) matches the declaration of "x"
/// inside the autorelease pool.
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
ObjCAutoreleasePoolStmt> autoreleasePoolStmt;
/// Matches any value declaration.
///
/// Example matches A, B, C and F
/// \code
/// enum X { A, B, C };
/// void F();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl;
/// Matches C++ constructor declarations.
///
/// Example matches Foo::Foo() and Foo::Foo(int)
/// \code
/// class Foo {
/// public:
/// Foo();
/// Foo(int);
/// int DoSomething();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl>
cxxConstructorDecl;
/// Matches explicit C++ destructor declarations.
///
/// Example matches Foo::~Foo()
/// \code
/// class Foo {
/// public:
/// virtual ~Foo();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl>
cxxDestructorDecl;
/// Matches enum declarations.
///
/// Example matches X
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl;
/// Matches enum constants.
///
/// Example matches A, B, C
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl>
enumConstantDecl;
/// Matches tag declarations.
///
/// Example matches X, Z, U, S, E
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// enum E {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl;
/// Matches method declarations.
///
/// Example matches y
/// \code
/// class X { void y(); };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl>
cxxMethodDecl;
/// Matches conversion operator declarations.
///
/// Example matches the operator.
/// \code
/// class X { operator int() const; };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl>
cxxConversionDecl;
/// Matches user-defined and implicitly generated deduction guide.
///
/// Example matches the deduction guide.
/// \code
/// template<typename T>
/// class X { X(int) };
/// X(int) -> X<int>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl>
cxxDeductionGuideDecl;
/// Matches variable declarations.
///
/// Note: this does not match declarations of member variables, which are
/// "field" declarations in Clang parlance.
///
/// Example matches a
/// \code
/// int a;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl;
/// Matches field declarations.
///
/// Given
/// \code
/// class X { int m; };
/// \endcode
/// fieldDecl()
/// matches 'm'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl;
/// Matches indirect field declarations.
///
/// Given
/// \code
/// struct X { struct { int a; }; };
/// \endcode
/// indirectFieldDecl()
/// matches 'a'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl>
indirectFieldDecl;
/// Matches function declarations.
///
/// Example matches f
/// \code
/// void f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl>
functionDecl;
/// Matches C++ function template declarations.
///
/// Example matches f
/// \code
/// template<class T> void f(T t) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl>
functionTemplateDecl;
/// Matches friend declarations.
///
/// Given
/// \code
/// class X { friend void foo(); };
/// \endcode
/// friendDecl()
/// matches 'friend void foo()'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl;
/// Matches statements.
///
/// Given
/// \code
/// { ++a; }
/// \endcode
/// stmt()
/// matches both the compound statement '{ ++a; }' and '++a'.
extern const internal::VariadicAllOfMatcher<Stmt> stmt;
/// Matches declaration statements.
///
/// Given
/// \code
/// int a;
/// \endcode
/// declStmt()
/// matches 'int a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt;
/// Matches member expressions.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// int a; static int b;
/// };
/// \endcode
/// memberExpr()
/// matches this->x, x, y.x, a, this->b
extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr;
/// Matches unresolved member expressions.
///
/// Given
/// \code
/// struct X {
/// template <class T> void f();
/// void g();
/// };
/// template <class T> void h() { X x; x.f<T>(); x.g(); }
/// \endcode
/// unresolvedMemberExpr()
/// matches x.f<T>
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr>
unresolvedMemberExpr;
/// Matches member expressions where the actual member referenced could not be
/// resolved because the base expression or the member name was dependent.
///
/// Given
/// \code
/// template <class T> void f() { T t; t.g(); }
/// \endcode
/// cxxDependentScopeMemberExpr()
/// matches t.g
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXDependentScopeMemberExpr>
cxxDependentScopeMemberExpr;
/// Matches call expressions.
///
/// Example matches x.y() and y()
/// \code
/// X x;
/// x.y();
/// y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr;
/// Matches call expressions which were resolved using ADL.
///
/// Example matches y(x) but not y(42) or NS::y(x).
/// \code
/// namespace NS {
/// struct X {};
/// void y(X);
/// }
///
/// void y(...);
///
/// void test() {
/// NS::X x;
/// y(x); // Matches
/// NS::y(x); // Doesn't match
/// y(42); // Doesn't match
/// using NS::y;
/// y(x); // Found by both unqualified lookup and ADL, doesn't match
// }
/// \endcode
AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); }
/// Matches lambda expressions.
///
/// Example matches [&](){return 5;}
/// \code
/// [&](){return 5;}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr;
/// Matches member call expressions.
///
/// Example matches x.y()
/// \code
/// X x;
/// x.y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr>
cxxMemberCallExpr;
/// Matches ObjectiveC Message invocation expressions.
///
/// The innermost message send invokes the "alloc" class method on the
/// NSString class, while the outermost message send invokes the
/// "initWithString" instance method on the object returned from
/// NSString's "alloc". This matcher should match both message sends.
/// \code
/// [[NSString alloc] initWithString:@"Hello"]
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr>
objcMessageExpr;
/// Matches Objective-C interface declarations.
///
/// Example matches Foo
/// \code
/// @interface Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl>
objcInterfaceDecl;
/// Matches Objective-C implementation declarations.
///
/// Example matches Foo
/// \code
/// @implementation Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl>
objcImplementationDecl;
/// Matches Objective-C protocol declarations.
///
/// Example matches FooDelegate
/// \code
/// @protocol FooDelegate
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl>
objcProtocolDecl;
/// Matches Objective-C category declarations.
///
/// Example matches Foo (Additions)
/// \code
/// @interface Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl>
objcCategoryDecl;
/// Matches Objective-C category definitions.
///
/// Example matches Foo (Additions)
/// \code
/// @implementation Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl>
objcCategoryImplDecl;
/// Matches Objective-C method declarations.
///
/// Example matches both declaration and definition of -[Foo method]
/// \code
/// @interface Foo
/// - (void)method;
/// @end
///
/// @implementation Foo
/// - (void)method {}
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl>
objcMethodDecl;
/// Matches block declarations.
///
/// Example matches the declaration of the nameless block printing an input
/// integer.
///
/// \code
/// myFunc(^(int p) {
/// printf("%d", p);
/// })
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl>
blockDecl;
/// Matches Objective-C instance variable declarations.
///
/// Example matches _enabled
/// \code
/// @implementation Foo {
/// BOOL _enabled;
/// }
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl>
objcIvarDecl;
/// Matches Objective-C property declarations.
///
/// Example matches enabled
/// \code
/// @interface Foo
/// @property BOOL enabled;
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl>
objcPropertyDecl;
/// Matches Objective-C \@throw statements.
///
/// Example matches \@throw
/// \code
/// @throw obj;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt>
objcThrowStmt;
/// Matches Objective-C @try statements.
///
/// Example matches @try
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt>
objcTryStmt;
/// Matches Objective-C @catch statements.
///
/// Example matches @catch
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt>
objcCatchStmt;
/// Matches Objective-C @finally statements.
///
/// Example matches @finally
/// \code
/// @try {}
/// @finally {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt>
objcFinallyStmt;
/// Matches expressions that introduce cleanups to be run at the end
/// of the sub-expression's evaluation.
///
/// Example matches std::string()
/// \code
/// const std::string str = std::string();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups>
exprWithCleanups;
/// Matches init list expressions.
///
/// Given
/// \code
/// int a[] = { 1, 2 };
/// struct B { int x, y; };
/// B b = { 5, 6 };
/// \endcode
/// initListExpr()
/// matches "{ 1, 2 }" and "{ 5, 6 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr>
initListExpr;
/// Matches the syntactic form of init list expressions
/// (if expression have it).
AST_MATCHER_P(InitListExpr, hasSyntacticForm,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *SyntForm = Node.getSyntacticForm();
return (SyntForm != nullptr &&
InnerMatcher.matches(*SyntForm, Finder, Builder));
}
/// Matches C++ initializer list expressions.
///
/// Given
/// \code
/// std::vector<int> a({ 1, 2, 3 });
/// std::vector<int> b = { 4, 5 };
/// int c[] = { 6, 7 };
/// std::pair<int, int> d = { 8, 9 };
/// \endcode
/// cxxStdInitializerListExpr()
/// matches "{ 1, 2, 3 }" and "{ 4, 5 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXStdInitializerListExpr>
cxxStdInitializerListExpr;
/// Matches implicit initializers of init list expressions.
///
/// Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 };
/// \endcode
/// implicitValueInitExpr()
/// matches "[0].y" (implicitly)
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr>
implicitValueInitExpr;
/// Matches paren list expressions.
/// ParenListExprs don't have a predefined type and are used for late parsing.
/// In the final AST, they can be met in template declarations.
///
/// Given
/// \code
/// template<typename T> class X {
/// void f() {
/// X x(*this);
/// int a = 0, b = 1; int i = (a, b);
/// }
/// };
/// \endcode
/// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b)
/// has a predefined type and is a ParenExpr, not a ParenListExpr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr>
parenListExpr;
/// Matches substitutions of non-type template parameters.
///
/// Given
/// \code
/// template <int N>
/// struct A { static const int n = N; };
/// struct B : public A<42> {};
/// \endcode
/// substNonTypeTemplateParmExpr()
/// matches "N" in the right-hand side of "static const int n = N;"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
SubstNonTypeTemplateParmExpr>
substNonTypeTemplateParmExpr;
/// Matches using declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using X::x;
/// \endcode
/// usingDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl;
/// Matches using namespace declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using namespace X;
/// \endcode
/// usingDirectiveDecl()
/// matches \code using namespace X \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl>
usingDirectiveDecl;
/// Matches reference to a name that can be looked up during parsing
/// but could not be resolved to a specific declaration.
///
/// Given
/// \code
/// template<typename T>
/// T foo() { T a; return a; }
/// template<typename T>
/// void bar() {
/// foo<T>();
/// }
/// \endcode
/// unresolvedLookupExpr()
/// matches \code foo<T>() \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr>
unresolvedLookupExpr;
/// Matches unresolved using value declarations.
///
/// Given
/// \code
/// template<typename X>
/// class C : private X {
/// using X::x;
/// };
/// \endcode
/// unresolvedUsingValueDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingValueDecl>
unresolvedUsingValueDecl;
/// Matches unresolved using value declarations that involve the
/// typename.
///
/// Given
/// \code
/// template <typename T>
/// struct Base { typedef T Foo; };
///
/// template<typename T>
/// struct S : private Base<T> {
/// using typename Base<T>::Foo;
/// };
/// \endcode
/// unresolvedUsingTypenameDecl()
/// matches \code using Base<T>::Foo \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingTypenameDecl>
unresolvedUsingTypenameDecl;
/// Matches a constant expression wrapper.
///
/// Example matches the constant in the case statement:
/// (matcher = constantExpr())
/// \code
/// switch (a) {
/// case 37: break;
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr>
constantExpr;
/// Matches parentheses used in expressions.
///
/// Example matches (foo() + 1)
/// \code
/// int foo() { return 1; }
/// int a = (foo() + 1);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr;
/// Matches constructor call expressions (including implicit ones).
///
/// Example matches string(ptr, n) and ptr within arguments of f
/// (matcher = cxxConstructExpr())
/// \code
/// void f(const string &a, const string &b);
/// char *ptr;
/// int n;
/// f(string(ptr, n), ptr);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr>
cxxConstructExpr;
/// Matches unresolved constructor call expressions.
///
/// Example matches T(t) in return statement of f
/// (matcher = cxxUnresolvedConstructExpr())
/// \code
/// template <typename T>
/// void f(const T& t) { return T(t); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXUnresolvedConstructExpr>
cxxUnresolvedConstructExpr;
/// Matches implicit and explicit this expressions.
///
/// Example matches the implicit this expression in "return i".
/// (matcher = cxxThisExpr())
/// \code
/// struct foo {
/// int i;
/// int f() { return i; }
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr>
cxxThisExpr;
/// Matches nodes where temporaries are created.
///
/// Example matches FunctionTakesString(GetStringByValue())
/// (matcher = cxxBindTemporaryExpr())
/// \code
/// FunctionTakesString(GetStringByValue());
/// FunctionTakesStringByPointer(GetStringPointer());
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr>
cxxBindTemporaryExpr;
/// Matches nodes where temporaries are materialized.
///
/// Example: Given
/// \code
/// struct T {void func();};
/// T f();
/// void g(T);
/// \endcode
/// materializeTemporaryExpr() matches 'f()' in these statements
/// \code
/// T u(f());
/// g(f());
/// f().func();
/// \endcode
/// but does not match
/// \code
/// f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
MaterializeTemporaryExpr>
materializeTemporaryExpr;
/// Matches new expressions.
///
/// Given
/// \code
/// new X;
/// \endcode
/// cxxNewExpr()
/// matches 'new X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr;
/// Matches delete expressions.
///
/// Given
/// \code
/// delete X;
/// \endcode
/// cxxDeleteExpr()
/// matches 'delete X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr>
cxxDeleteExpr;
/// Matches noexcept expressions.
///
/// Given
/// \code
/// bool a() noexcept;
/// bool b() noexcept(true);
/// bool c() noexcept(false);
/// bool d() noexcept(noexcept(a()));
/// bool e = noexcept(b()) || noexcept(c());
/// \endcode
/// cxxNoexceptExpr()
/// matches `noexcept(a())`, `noexcept(b())` and `noexcept(c())`.
/// doesn't match the noexcept specifier in the declarations a, b, c or d.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr>
cxxNoexceptExpr;
/// Matches array subscript expressions.
///
/// Given
/// \code
/// int i = a[1];
/// \endcode
/// arraySubscriptExpr()
/// matches "a[1]"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr>
arraySubscriptExpr;
/// Matches the value of a default argument at the call site.
///
/// Example matches the CXXDefaultArgExpr placeholder inserted for the
/// default value of the second parameter in the call expression f(42)
/// (matcher = cxxDefaultArgExpr())
/// \code
/// void f(int x, int y = 0);
/// f(42);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr>
cxxDefaultArgExpr;
/// Matches overloaded operator calls.
///
/// Note that if an operator isn't overloaded, it won't match. Instead, use
/// binaryOperator matcher.
/// Currently it does not match operators such as new delete.
/// FIXME: figure out why these do not match?
///
/// Example matches both operator<<((o << b), c) and operator<<(o, b)
/// (matcher = cxxOperatorCallExpr())
/// \code
/// ostream &operator<< (ostream &out, int i) { };
/// ostream &o; int b = 1, c = 1;
/// o << b << c;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr>
cxxOperatorCallExpr;
/// Matches expressions.
///
/// Example matches x()
/// \code
/// void f() { x(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr;
/// Matches expressions that refer to declarations.
///
/// Example matches x in if (x)
/// \code
/// bool x;
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr>
declRefExpr;
/// Matches a reference to an ObjCIvar.
///
/// Example: matches "a" in "init" method:
/// \code
/// @implementation A {
/// NSString *a;
/// }
/// - (void) init {
/// a = @"hello";
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr>
objcIvarRefExpr;
/// Matches a reference to a block.
///
/// Example: matches "^{}":
/// \code
/// void f() { ^{}(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr;
/// Matches if statements.
///
/// Example matches 'if (x) {}'
/// \code
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt;
/// Matches for statements.
///
/// Example matches 'for (;;) {}'
/// \code
/// for (;;) {}
/// int i[] = {1, 2, 3}; for (auto a : i);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt;
/// Matches the increment statement of a for loop.
///
/// Example:
/// forStmt(hasIncrement(unaryOperator(hasOperatorName("++"))))
/// matches '++x' in
/// \code
/// for (x; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Increment = Node.getInc();
return (Increment != nullptr &&
InnerMatcher.matches(*Increment, Finder, Builder));
}
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopInit(declStmt()))
/// matches 'int x = 0' in
/// \code
/// for (int x = 0; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Init = Node.getInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches range-based for statements.
///
/// cxxForRangeStmt() matches 'for (auto a : i)'
/// \code
/// int i[] = {1, 2, 3}; for (auto a : i);
/// for(int j = 0; j < 5; ++j);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt>
cxxForRangeStmt;
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopVariable(anything()))
/// matches 'int x' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>,
InnerMatcher) {
const VarDecl *const Var = Node.getLoopVariable();
return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder));
}
/// Matches the range initialization statement of a for loop.
///
/// Example:
/// forStmt(hasRangeInit(anything()))
/// matches 'a' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *const Init = Node.getRangeInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches while statements.
///
/// Given
/// \code
/// while (true) {}
/// \endcode
/// whileStmt()
/// matches 'while (true) {}'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt;
/// Matches do statements.
///
/// Given
/// \code
/// do {} while (true);
/// \endcode
/// doStmt()
/// matches 'do {} while(true)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt;
/// Matches break statements.
///
/// Given
/// \code
/// while (true) { break; }
/// \endcode
/// breakStmt()
/// matches 'break'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt;
/// Matches continue statements.
///
/// Given
/// \code
/// while (true) { continue; }
/// \endcode
/// continueStmt()
/// matches 'continue'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt>
continueStmt;
/// Matches return statements.
///
/// Given
/// \code
/// return 1;
/// \endcode
/// returnStmt()
/// matches 'return 1'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt;
/// Matches goto statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// gotoStmt()
/// matches 'goto FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt;
/// Matches label statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelStmt()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt;
/// Matches address of label statements (GNU extension).
///
/// Given
/// \code
/// FOO: bar();
/// void *ptr = &&FOO;
/// goto *bar;
/// \endcode
/// addrLabelExpr()
/// matches '&&FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr>
addrLabelExpr;
/// Matches switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchStmt()
/// matches 'switch(a)'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt;
/// Matches case and default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchCase()
/// matches 'case 42:' and 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase;
/// Matches case statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// caseStmt()
/// matches 'case 42:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt;
/// Matches default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// defaultStmt()
/// matches 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt>
defaultStmt;
/// Matches compound statements.
///
/// Example matches '{}' and '{{}}' in 'for (;;) {{}}'
/// \code
/// for (;;) {{}}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt>
compoundStmt;
/// Matches catch statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxCatchStmt()
/// matches 'catch(int i)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt>
cxxCatchStmt;
/// Matches try statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxTryStmt()
/// matches 'try {}'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt;
/// Matches throw expressions.
///
/// \code
/// try { throw 5; } catch(int i) {}
/// \endcode
/// cxxThrowExpr()
/// matches 'throw 5'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr>
cxxThrowExpr;
/// Matches null statements.
///
/// \code
/// foo();;
/// \endcode
/// nullStmt()
/// matches the second ';'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt;
/// Matches asm statements.
///
/// \code
/// int i = 100;
/// __asm("mov al, 2");
/// \endcode
/// asmStmt()
/// matches '__asm("mov al, 2")'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt;
/// Matches bool literals.
///
/// Example matches true
/// \code
/// true
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr>
cxxBoolLiteral;
/// Matches string literals (also matches wide string literals).
///
/// Example matches "abcd", L"abcd"
/// \code
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral>
stringLiteral;
/// Matches character literals (also matches wchar_t).
///
/// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral),
/// though.
///
/// Example matches 'a', L'a'
/// \code
/// char ch = 'a';
/// wchar_t chw = L'a';
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral>
characterLiteral;
/// Matches integer literals of all sizes / encodings, e.g.
/// 1, 1L, 0x1 and 1U.
///
/// Does not match character-encoded integers such as L'a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral>
integerLiteral;
/// Matches float literals of all sizes / encodings, e.g.
/// 1.0, 1.0f, 1.0L and 1e10.
///
/// Does not match implicit conversions such as
/// \code
/// float a = 10;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral>
floatLiteral;
/// Matches imaginary literals, which are based on integer and floating
/// point literals e.g.: 1i, 1.0i
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral>
imaginaryLiteral;
/// Matches fixed point literals
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FixedPointLiteral>
fixedPointLiteral;
/// Matches user defined literal operator call.
///
/// Example match: "foo"_suffix
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral>
userDefinedLiteral;
/// Matches compound (i.e. non-scalar) literals
///
/// Example match: {1}, (1, 2)
/// \code
/// int array[4] = {1};
/// vector int myvec = (vector int)(1, 2);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr>
compoundLiteralExpr;
/// Matches nullptr literal.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr>
cxxNullPtrLiteralExpr;
/// Matches GNU __builtin_choose_expr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr>
chooseExpr;
/// Matches GNU __null expression.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr>
gnuNullExpr;
/// Matches atomic builtins.
/// Example matches __atomic_load_n(ptr, 1)
/// \code
/// void foo() { int *ptr; __atomic_load_n(ptr, 1); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr;
/// Matches statement expression (GNU extension).
///
/// Example match: ({ int X = 4; X; })
/// \code
/// int C = ({ int X = 4; X; });
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr;
/// Matches binary operator expressions.
///
/// Example matches a || b
/// \code
/// !(a || b)
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator>
binaryOperator;
/// Matches unary operator expressions.
///
/// Example matches !a
/// \code
/// !a || b
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator>
unaryOperator;
/// Matches conditional operator expressions.
///
/// Example matches a ? b : c
/// \code
/// (a ? b : c) + 42
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator>
conditionalOperator;
/// Matches binary conditional operator expressions (GNU extension).
///
/// Example matches a ?: b
/// \code
/// (a ?: b) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
BinaryConditionalOperator>
binaryConditionalOperator;
/// Matches opaque value expressions. They are used as helpers
/// to reference another expressions and can be met
/// in BinaryConditionalOperators, for example.
///
/// Example matches 'a'
/// \code
/// (a ?: c) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr>
opaqueValueExpr;
/// Matches a C++ static_assert declaration.
///
/// Example:
/// staticAssertExpr()
/// matches
/// static_assert(sizeof(S) == sizeof(int))
/// in
/// \code
/// struct S {
/// int x;
/// };
/// static_assert(sizeof(S) == sizeof(int));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl>
staticAssertDecl;
/// Matches a reinterpret_cast expression.
///
/// Either the source expression or the destination type can be matched
/// using has(), but hasDestinationType() is more specific and can be
/// more readable.
///
/// Example matches reinterpret_cast<char*>(&p) in
/// \code
/// void* p = reinterpret_cast<char*>(&p);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr>
cxxReinterpretCastExpr;
/// Matches a C++ static_cast expression.
///
/// \see hasDestinationType
/// \see reinterpretCast
///
/// Example:
/// cxxStaticCastExpr()
/// matches
/// static_cast<long>(8)
/// in
/// \code
/// long eight(static_cast<long>(8));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr>
cxxStaticCastExpr;
/// Matches a dynamic_cast expression.
///
/// Example:
/// cxxDynamicCastExpr()
/// matches
/// dynamic_cast<D*>(&b);
/// in
/// \code
/// struct B { virtual ~B() {} }; struct D : B {};
/// B b;
/// D* p = dynamic_cast<D*>(&b);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr>
cxxDynamicCastExpr;
/// Matches a const_cast expression.
///
/// Example: Matches const_cast<int*>(&r) in
/// \code
/// int n = 42;
/// const int &r(n);
/// int* p = const_cast<int*>(&r);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr>
cxxConstCastExpr;
/// Matches a C-style cast expression.
///
/// Example: Matches (int) 2.2f in
/// \code
/// int i = (int) 2.2f;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr>
cStyleCastExpr;
/// Matches explicit cast expressions.
///
/// Matches any cast expression written in user code, whether it be a
/// C-style cast, a functional-style cast, or a keyword cast.
///
/// Does not match implicit conversions.
///
/// Note: the name "explicitCast" is chosen to match Clang's terminology, as
/// Clang uses the term "cast" to apply to implicit conversions as well as to
/// actual cast expressions.
///
/// \see hasDestinationType.
///
/// Example: matches all five of the casts in
/// \code
/// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42)))))
/// \endcode
/// but does not match the implicit conversion in
/// \code
/// long ell = 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr>
explicitCastExpr;
/// Matches the implicit cast nodes of Clang's AST.
///
/// This matches many different places, including function call return value
/// eliding, as well as any type conversions.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr>
implicitCastExpr;
/// Matches any cast nodes of Clang's AST.
///
/// Example: castExpr() matches each of the following:
/// \code
/// (int) 3;
/// const_cast<Expr *>(SubExpr);
/// char c = 0;
/// \endcode
/// but does not match
/// \code
/// int i = (0);
/// int k = 0;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr;
/// Matches functional cast expressions
///
/// Example: Matches Foo(bar);
/// \code
/// Foo f = bar;
/// Foo g = (Foo) bar;
/// Foo h = Foo(bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr>
cxxFunctionalCastExpr;
/// Matches functional cast expressions having N != 1 arguments
///
/// Example: Matches Foo(bar, bar)
/// \code
/// Foo h = Foo(bar, bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr>
cxxTemporaryObjectExpr;
/// Matches predefined identifier expressions [C99 6.4.2.2].
///
/// Example: Matches __func__
/// \code
/// printf("%s", __func__);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr>
predefinedExpr;
/// Matches C99 designated initializer expressions [C99 6.7.8].
///
/// Example: Matches { [2].y = 1.0, [0].x = 1.0 }
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr>
designatedInitExpr;
/// Matches designated initializer expressions that contain
/// a specific number of designators.
///
/// Example: Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 };
/// \endcode
/// designatorCountIs(2)
/// matches '{ [2].y = 1.0, [0].x = 1.0 }',
/// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'.
AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches \c QualTypes in the clang AST.
extern const internal::VariadicAllOfMatcher<QualType> qualType;
/// Matches \c Types in the clang AST.
extern const internal::VariadicAllOfMatcher<Type> type;
/// Matches \c TypeLocs in the clang AST.
extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc;
/// Matches if any of the given matchers matches.
///
/// Unlike \c anyOf, \c eachOf will generate a match result for each
/// matching submatcher.
///
/// For example, in:
/// \code
/// class A { int a; int b; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")),
/// has(fieldDecl(hasName("b")).bind("v"))))
/// \endcode
/// will generate two results binding "v", the first of which binds
/// the field declaration of \c a, the second the field declaration of
/// \c b.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
eachOf;
/// Matches if any of the given matchers matches.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
anyOf;
/// Matches if all given matchers match.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
allOf;
/// Matches any node regardless of the submatcher.
///
/// However, \c optionally will retain any bindings generated by the submatcher.
/// Useful when additional information which may or may not present about a main
/// matching node is desired.
///
/// For example, in:
/// \code
/// class Foo {
/// int bar;
/// }
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(
/// optionally(has(
/// fieldDecl(hasName("bar")).bind("var")
/// ))).bind("record")
/// \endcode
/// will produce a result binding for both "record" and "var".
/// The matcher will produce a "record" binding for even if there is no data
/// member named "bar" in that class.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> optionally;
/// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL)
///
/// Given
/// \code
/// Foo x = bar;
/// int y = sizeof(x) + alignof(x);
/// \endcode
/// unaryExprOrTypeTraitExpr()
/// matches \c sizeof(x) and \c alignof(x)
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
UnaryExprOrTypeTraitExpr>
unaryExprOrTypeTraitExpr;
/// Matches unary expressions that have a specific type of argument.
///
/// Given
/// \code
/// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c);
/// \endcode
/// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int"))
/// matches \c sizeof(a) and \c alignof(c)
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType ArgumentType = Node.getTypeOfArgument();
return InnerMatcher.matches(ArgumentType, Finder, Builder);
}
/// Matches unary expressions of a certain kind.
///
/// Given
/// \code
/// int x;
/// int s = sizeof(x) + alignof(x)
/// \endcode
/// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf))
/// matches \c sizeof(x)
///
/// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter
/// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf").
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) {
return Node.getKind() == Kind;
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// alignof.
inline internal::BindableMatcher<Stmt> alignOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)),
InnerMatcher)));
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// sizeof.
inline internal::BindableMatcher<Stmt> sizeOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(ofKind(UETT_SizeOf), InnerMatcher)));
}
/// Matches NamedDecl nodes that have the specified name.
///
/// Supports specifying enclosing namespaces or classes by prefixing the name
/// with '<enclosing>::'.
/// Does not match typedefs of an underlying type with the given name.
///
/// Example matches X (Name == "X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X")
/// \code
/// namespace a { namespace b { class X; } }
/// \endcode
inline internal::Matcher<NamedDecl> hasName(StringRef Name) {
return internal::Matcher<NamedDecl>(
new internal::HasNameMatcher({std::string(Name)}));
}
/// Matches NamedDecl nodes that have any of the specified names.
///
/// This matcher is only provided as a performance optimization of hasName.
/// \code
/// hasAnyName(a, b, c)
/// \endcode
/// is equivalent to, but faster than
/// \code
/// anyOf(hasName(a), hasName(b), hasName(c))
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef,
internal::hasAnyNameFunc>
hasAnyName;
/// Matches NamedDecl nodes whose fully qualified names contain
/// a substring matched by the given RegExp.
///
/// Supports specifying enclosing namespaces or classes by
/// prefixing the name with '<enclosing>::'. Does not match typedefs
/// of an underlying type with the given name.
///
/// Example matches X (regexp == "::X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (regexp is one of "::X", "^foo::.*X", among others)
/// \code
/// namespace foo { namespace bar { class X; } }
/// \endcode
AST_MATCHER_P(NamedDecl, matchesName, std::string, RegExp) {
assert(!RegExp.empty());
std::string FullNameString = "::" + Node.getQualifiedNameAsString();
llvm::Regex RE(RegExp);
return RE.match(FullNameString);
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// Given:
/// \code
/// class A { int operator*(); };
/// const A &operator<<(const A &a, const A &b);
/// A a;
/// a << a; // <-- This matches
/// \endcode
///
/// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the
/// specified line and
/// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*")))
/// matches the declaration of \c A.
///
/// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl>
inline internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>
hasOverloadedOperatorName(StringRef Name) {
return internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>(
{std::string(Name)});
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// hasAnyOverloadesOperatorName("+", "-")
/// Is equivalent to
/// anyOf(hasOverloadedOperatorName("+"), hasOverloadedOperatorName("-"))
extern const internal::VariadicFunction<
internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>,
StringRef, internal::hasAnyOverloadedOperatorNameFunc>
hasAnyOverloadedOperatorName;
/// Matches C++ classes that are directly or indirectly derived from a class
/// matching \c Base, or Objective-C classes that directly or indirectly
/// subclass a class matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, Z, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("NSObject"))
/// \code
/// @interface NSObject @end
/// @interface Bar : NSObject @end
/// \endcode
///
/// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl>
AST_POLYMORPHIC_MATCHER_P(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/false);
}
/// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Similar to \c isDerivedFrom(), but also matches classes that directly
/// match \c Base.
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
const auto M = anyOf(Base, isDerivedFrom(Base));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Overloaded method as shortcut for
/// \c isSameOrDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isSameOrDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches C++ or Objective-C classes that are directly derived from a class
/// matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/true);
}
/// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDirectlyDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches the first method of a class or struct that satisfies \c
/// InnerMatcher.
///
/// Given:
/// \code
/// class A { void func(); };
/// class B { void member(); };
/// \endcode
///
/// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of
/// \c A but not \c B.
AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.method_begin(),
Node.method_end(), Finder, Builder);
}
/// Matches the generated class of lambda expressions.
///
/// Given:
/// \code
/// auto x = []{};
/// \endcode
///
/// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of
/// \c decltype(x)
AST_MATCHER(CXXRecordDecl, isLambda) {
return Node.isLambda();
}
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y
/// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X")))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// Usable as: Any Matcher
/// Note that has is direct matcher, so it also matches things like implicit
/// casts and paren casts. If you are matching with expr then you should
/// probably consider using ignoringParenImpCasts like:
/// has(ignoringParenImpCasts(expr())).
extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Z
/// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasDescendantMatcher>
hasDescendant;
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Y::X, Z::Y, Z::Y::X
/// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X")))
/// \code
/// class X {};
/// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X
/// // inside Y.
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// As opposed to 'has', 'forEach' will cause a match for each result that
/// matches instead of only on the first one.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher>
forEach;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, A, A::X, B, B::C, B::C::X
/// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {};
/// class A { class X {}; }; // Matches A, because A::X is a class of name
/// // X inside A.
/// class B { class C { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for
/// each result that matches instead of only on the first one.
///
/// Note: Recursively combined ForEachDescendant can cause many matches:
/// cxxRecordDecl(forEachDescendant(cxxRecordDecl(
/// forEachDescendant(cxxRecordDecl())
/// )))
/// will match 10 times (plus injected class name matches) on:
/// \code
/// class A { class B { class C { class D { class E {}; }; }; }; };
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::ForEachDescendantMatcher>
forEachDescendant;
/// Matches if the node or any descendant matches.
///
/// Generates results for each match.
///
/// For example, in:
/// \code
/// class A { class B {}; class C {}; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(hasName("::A"),
/// findAll(cxxRecordDecl(isDefinition()).bind("m")))
/// \endcode
/// will generate results for \c A, \c B and \c C.
///
/// Usable as: Any Matcher
template <typename T>
internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) {
return eachOf(Matcher, forEachDescendant(Matcher));
}
/// Matches AST nodes that have a parent that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } }
/// \endcode
/// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }".
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasParentMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasParent;
/// Matches AST nodes that have an ancestor that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { if (true) { int x = 42; } }
/// void g() { for (;;) { int x = 43; } }
/// \endcode
/// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasAncestorMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasAncestor;
/// Matches if the provided matcher does not match.
///
/// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X"))))
/// \code
/// class X {};
/// class Y {};
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> unless;
/// Matches a node if the declaration associated with that node
/// matches the given matcher.
///
/// The associated declaration is:
/// - for type nodes, the declaration of the underlying type
/// - for CallExpr, the declaration of the callee
/// - for MemberExpr, the declaration of the referenced member
/// - for CXXConstructExpr, the declaration of the constructor
/// - for CXXNewExpr, the declaration of the operator new
/// - for ObjCIvarExpr, the declaration of the ivar
///
/// For type nodes, hasDeclaration will generally match the declaration of the
/// sugared type. Given
/// \code
/// class X {};
/// typedef X Y;
/// Y y;
/// \endcode
/// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the
/// typedefDecl. A common use case is to match the underlying, desugared type.
/// This can be achieved by using the hasUnqualifiedDesugaredType matcher:
/// \code
/// varDecl(hasType(hasUnqualifiedDesugaredType(
/// recordType(hasDeclaration(decl())))))
/// \endcode
/// In this matcher, the decl will match the CXXRecordDecl of class X.
///
/// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>,
/// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>,
/// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>,
/// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>,
/// Matcher<TagType>, Matcher<TemplateSpecializationType>,
/// Matcher<TemplateTypeParmType>, Matcher<TypedefType>,
/// Matcher<UnresolvedUsingType>
inline internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>
hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) {
return internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>(InnerMatcher);
}
/// Matches a \c NamedDecl whose underlying declaration matches the given
/// matcher.
///
/// Given
/// \code
/// namespace N { template<class T> void f(T t); }
/// template <class T> void g() { using N::f; f(T()); }
/// \endcode
/// \c unresolvedLookupExpr(hasAnyDeclaration(
/// namedDecl(hasUnderlyingDecl(hasName("::N::f")))))
/// matches the use of \c f in \c g() .
AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>,
InnerMatcher) {
const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl();
return UnderlyingDecl != nullptr &&
InnerMatcher.matches(*UnderlyingDecl, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression, after
/// stripping off any parentheses or implicit casts.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y {};
/// void z(Y y, X x) { y.m(); (g()).m(); x.m(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y")))))
/// matches `y.m()` and `(g()).m()`.
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m()`.
/// cxxMemberCallExpr(on(callExpr()))
/// matches `(g()).m()`.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument()
->IgnoreParenImpCasts();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches on the receiver of an ObjectiveC Message expression.
///
/// Example
/// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *")));
/// matches the [webView ...] message invocation.
/// \code
/// NSString *webViewJavaScript = ...
/// UIWebView *webView = ...
/// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>,
InnerMatcher) {
const QualType TypeDecl = Node.getReceiverType();
return InnerMatcher.matches(TypeDecl, Finder, Builder);
}
/// Returns true when the Objective-C method declaration is a class method.
///
/// Example
/// matcher = objcMethodDecl(isClassMethod())
/// matches
/// \code
/// @interface I + (void)foo; @end
/// \endcode
/// but not
/// \code
/// @interface I - (void)bar; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isClassMethod) {
return Node.isClassMethod();
}
/// Returns true when the Objective-C method declaration is an instance method.
///
/// Example
/// matcher = objcMethodDecl(isInstanceMethod())
/// matches
/// \code
/// @interface I - (void)bar; @end
/// \endcode
/// but not
/// \code
/// @interface I + (void)foo; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isInstanceMethod) {
return Node.isInstanceMethod();
}
/// Returns true when the Objective-C message is sent to a class.
///
/// Example
/// matcher = objcMessageExpr(isClassMessage())
/// matches
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
/// but not
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isClassMessage) {
return Node.isClassMessage();
}
/// Returns true when the Objective-C message is sent to an instance.
///
/// Example
/// matcher = objcMessageExpr(isInstanceMessage())
/// matches
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// but not
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isInstanceMessage) {
return Node.isInstanceMessage();
}
/// Matches if the Objective-C message is sent to an instance,
/// and the inner matcher matches on that instance.
///
/// For example the method call in
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// is matched by
/// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x"))))))
AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ReceiverNode = Node.getInstanceReceiver();
return (ReceiverNode != nullptr &&
InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder,
Builder));
}
/// Matches when BaseName == Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) {
Selector Sel = Node.getSelector();
return BaseName.compare(Sel.getAsString()) == 0;
}
/// Matches when at least one of the supplied string equals to the
/// Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:"));
/// matches both of the expressions below:
/// \code
/// [myObj methodA:argA];
/// [myObj methodB:argB];
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>,
StringRef,
internal::hasAnySelectorFunc>
hasAnySelector;
/// Matches ObjC selectors whose name contains
/// a substring matched by the given RegExp.
/// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, matchesSelector, std::string, RegExp) {
assert(!RegExp.empty());
std::string SelectorString = Node.getSelector().getAsString();
llvm::Regex RE(RegExp);
return RE.match(SelectorString);
}
/// Matches when the selector is the empty selector
///
/// Matches only when the selector of the objCMessageExpr is NULL. This may
/// represent an error condition in the tree!
AST_MATCHER(ObjCMessageExpr, hasNullSelector) {
return Node.getSelector().isNull();
}
/// Matches when the selector is a Unary Selector
///
/// matcher = objCMessageExpr(matchesSelector(hasUnarySelector());
/// matches self.bodyView in the code below, but NOT the outer message
/// invocation of "loadHTMLString:baseURL:".
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasUnarySelector) {
return Node.getSelector().isUnarySelector();
}
/// Matches when the selector is a keyword selector
///
/// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame
/// message expression in
///
/// \code
/// UIWebView *webView = ...;
/// CGRect bodyFrame = webView.frame;
/// bodyFrame.size.height = self.bodyContentHeight;
/// webView.frame = bodyFrame;
/// // ^---- matches here
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) {
return Node.getSelector().isKeywordSelector();
}
/// Matches when the selector has the specified number of arguments
///
/// matcher = objCMessageExpr(numSelectorArgs(0));
/// matches self.bodyView in the code below
///
/// matcher = objCMessageExpr(numSelectorArgs(2));
/// matches the invocation of "loadHTMLString:baseURL:" but not that
/// of self.bodyView
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) {
return Node.getSelector().getNumArgs() == N;
}
/// Matches if the call expression's callee expression matches.
///
/// Given
/// \code
/// class Y { void x() { this->x(); x(); Y y; y.x(); } };
/// void f() { f(); }
/// \endcode
/// callExpr(callee(expr()))
/// matches this->x(), x(), y.x(), f()
/// with callee(...)
/// matching this->x, x, y.x, f respectively
///
/// Note: Callee cannot take the more general internal::Matcher<Expr>
/// because this introduces ambiguous overloads with calls to Callee taking a
/// internal::Matcher<Decl>, as the matcher hierarchy is purely
/// implemented in terms of implicit casts.
AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>,
InnerMatcher) {
const Expr *ExprNode = Node.getCallee();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the call expression's callee's declaration matches the
/// given matcher.
///
/// Example matches y.x() (matcher = callExpr(callee(
/// cxxMethodDecl(hasName("x")))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y y; y.x(); }
/// \endcode
AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher,
1) {
return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder);
}
/// Matches if the expression's or declaration's type matches a type
/// matcher.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and U (matcher = typedefDecl(hasType(asString("int")))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// typedef int U;
/// class Y { friend class X; };
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType,
AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl,
ValueDecl),
internal::Matcher<QualType>, InnerMatcher, 0) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return InnerMatcher.matches(QT, Finder, Builder);
return false;
}
/// Overloaded to match the declaration of the expression's or value
/// declaration's type.
///
/// In case of a value declaration (for example a variable declaration),
/// this resolves one layer of indirection. For example, in the value
/// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of
/// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the
/// declaration of x.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// class Y { friend class X; };
/// \endcode
///
/// Usable as: Matcher<Expr>, Matcher<ValueDecl>
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl),
internal::Matcher<Decl>, InnerMatcher, 1) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder);
return false;
}
/// Matches if the type location of the declarator decl's type matches
/// the inner matcher.
///
/// Given
/// \code
/// int x;
/// \endcode
/// declaratorDecl(hasTypeLoc(loc(asString("int"))))
/// matches int x
AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) {
if (!Node.getTypeSourceInfo())
// This happens for example for implicit destructors.
return false;
return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder);
}
/// Matches if the matched type is represented by the given string.
///
/// Given
/// \code
/// class Y { public: void x(); };
/// void z() { Y* y; y->x(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(asString("class Y *"))))
/// matches y->x()
AST_MATCHER_P(QualType, asString, std::string, Name) {
return Name == Node.getAsString();
}
/// Matches if the matched type is a pointer type and the pointee type
/// matches the specified matcher.
///
/// Example matches y->x()
/// (matcher = cxxMemberCallExpr(on(hasType(pointsTo
/// cxxRecordDecl(hasName("Y")))))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y *y; y->x(); }
/// \endcode
AST_MATCHER_P(
QualType, pointsTo, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isAnyPointerType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Overloaded to match the pointee type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>,
InnerMatcher, 1) {
return pointsTo(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches if the matched type matches the unqualified desugared
/// type of the matched node.
///
/// For example, in:
/// \code
/// class A {};
/// using B = A;
/// \endcode
/// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches
/// both B and A.
AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>,
InnerMatcher) {
return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder,
Builder);
}
/// Matches if the matched type is a reference type and the referenced
/// type matches the specified matcher.
///
/// Example matches X &x and const X &y
/// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X"))))))
/// \code
/// class X {
/// void a(X b) {
/// X &x = b;
/// const X &y = b;
/// }
/// };
/// \endcode
AST_MATCHER_P(QualType, references, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isReferenceType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Matches QualTypes whose canonical type matches InnerMatcher.
///
/// Given:
/// \code
/// typedef int &int_ref;
/// int a;
/// int_ref b = a;
/// \endcode
///
/// \c varDecl(hasType(qualType(referenceType()))))) will not match the
/// declaration of b but \c
/// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does.
AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>,
InnerMatcher) {
if (Node.isNull())
return false;
return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder);
}
/// Overloaded to match the referenced type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>,
InnerMatcher, 1) {
return references(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression. Unlike
/// `on`, matches the argument directly without stripping away anything.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y { void g(); };
/// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); }
/// \endcode
/// cxxMemberCallExpr(onImplicitObjectArgument(hasType(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`.
/// cxxMemberCallExpr(on(callExpr()))
/// does not match `(g()).m()`, because the parens are not ignored.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the type of the expression's implicit object argument either
/// matches the InnerMatcher, or is a pointer to a type that matches the
/// InnerMatcher.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// class X : public Y { void g(); };
/// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); }
/// \endcode
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `p->m()` and `x.m()`.
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("X")))))
/// matches `x.g()`.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<QualType>, InnerMatcher, 0) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Overloaded to match the type's declaration.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<Decl>, InnerMatcher, 1) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Matches a DeclRefExpr that refers to a declaration that matches the
/// specified matcher.
///
/// Example matches x in if(x)
/// (matcher = declRefExpr(to(varDecl(hasName("x")))))
/// \code
/// bool x;
/// if (x) {}
/// \endcode
AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>,
InnerMatcher) {
const Decl *DeclNode = Node.getDecl();
return (DeclNode != nullptr &&
InnerMatcher.matches(*DeclNode, Finder, Builder));
}
/// Matches a \c DeclRefExpr that refers to a declaration through a
/// specific using shadow declaration.
///
/// Given
/// \code
/// namespace a { void f() {} }
/// using a::f;
/// void g() {
/// f(); // Matches this ..
/// a::f(); // .. but not this.
/// }
/// \endcode
/// declRefExpr(throughUsingDecl(anything()))
/// matches \c f()
AST_MATCHER_P(DeclRefExpr, throughUsingDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
const NamedDecl *FoundDecl = Node.getFoundDecl();
if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl))
return InnerMatcher.matches(*UsingDecl, Finder, Builder);
return false;
}
/// Matches an \c OverloadExpr if any of the declarations in the set of
/// overloads matches the given matcher.
///
/// Given
/// \code
/// template <typename T> void foo(T);
/// template <typename T> void bar(T);
/// template <typename T> void baz(T t) {
/// foo(t);
/// bar(t);
/// }
/// \endcode
/// unresolvedLookupExpr(hasAnyDeclaration(
/// functionTemplateDecl(hasName("foo"))))
/// matches \c foo in \c foo(t); but not \c bar in \c bar(t);
AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(),
Node.decls_end(), Finder, Builder);
}
/// Matches the Decl of a DeclStmt which has a single declaration.
///
/// Given
/// \code
/// int a, b;
/// int c;
/// \endcode
/// declStmt(hasSingleDecl(anything()))
/// matches 'int c;' but not 'int a, b;'.
AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) {
if (Node.isSingleDecl()) {
const Decl *FoundDecl = Node.getSingleDecl();
return InnerMatcher.matches(*FoundDecl, Finder, Builder);
}
return false;
}
/// Matches a variable declaration that has an initializer expression
/// that matches the given matcher.
///
/// Example matches x (matcher = varDecl(hasInitializer(callExpr())))
/// \code
/// bool y() { return true; }
/// bool x = y();
/// \endcode
AST_MATCHER_P(
VarDecl, hasInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getAnyInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// \brief Matches a static variable with local scope.
///
/// Example matches y (matcher = varDecl(isStaticLocal()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// static int z;
/// \endcode
AST_MATCHER(VarDecl, isStaticLocal) {
return Node.isStaticLocal();
}
/// Matches a variable declaration that has function scope and is a
/// non-static local variable.
///
/// Example matches x (matcher = varDecl(hasLocalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasLocalStorage) {
return Node.hasLocalStorage();
}
/// Matches a variable declaration that does not have local storage.
///
/// Example matches y and z (matcher = varDecl(hasGlobalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasGlobalStorage) {
return Node.hasGlobalStorage();
}
/// Matches a variable declaration that has automatic storage duration.
///
/// Example matches x, but not y, z, or a.
/// (matcher = varDecl(hasAutomaticStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasAutomaticStorageDuration) {
return Node.getStorageDuration() == SD_Automatic;
}
/// Matches a variable declaration that has static storage duration.
/// It includes the variable declared at namespace scope and those declared
/// with "static" and "extern" storage class specifiers.
///
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// static int b;
/// extern int c;
/// varDecl(hasStaticStorageDuration())
/// matches the function declaration y, a, b and c.
/// \endcode
AST_MATCHER(VarDecl, hasStaticStorageDuration) {
return Node.getStorageDuration() == SD_Static;
}
/// Matches a variable declaration that has thread storage duration.
///
/// Example matches z, but not x, z, or a.
/// (matcher = varDecl(hasThreadStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasThreadStorageDuration) {
return Node.getStorageDuration() == SD_Thread;
}
/// Matches a variable declaration that is an exception variable from
/// a C++ catch block, or an Objective-C \@catch statement.
///
/// Example matches x (matcher = varDecl(isExceptionVariable())
/// \code
/// void f(int y) {
/// try {
/// } catch (int x) {
/// }
/// }
/// \endcode
AST_MATCHER(VarDecl, isExceptionVariable) {
return Node.isExceptionVariable();
}
/// Checks that a call expression or a constructor call expression has
/// a specific number of arguments (including absent default arguments).
///
/// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2)))
/// \code
/// void f(int x, int y);
/// f(0, 0);
/// \endcode
AST_POLYMORPHIC_MATCHER_P(argumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N) {
return Node.getNumArgs() == N;
}
/// Matches the n'th argument of a call expression or a constructor
/// call expression.
///
/// Example matches y in x(y)
/// (matcher = callExpr(hasArgument(0, declRefExpr())))
/// \code
/// void x(int) { int y; x(y); }
/// \endcode
AST_POLYMORPHIC_MATCHER_P2(hasArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N, internal::Matcher<Expr>, InnerMatcher) {
return (N < Node.getNumArgs() &&
InnerMatcher.matches(
*Node.getArg(N)->IgnoreParenImpCasts(), Finder, Builder));
}
/// Matches the n'th item of an initializer list expression.
///
/// Example matches y.
/// (matcher = initListExpr(hasInit(0, expr())))
/// \code
/// int x{y}.
/// \endcode
AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
return N < Node.getNumInits() &&
InnerMatcher.matches(*Node.getInit(N), Finder, Builder);
}
/// Matches declaration statements that contain a specific number of
/// declarations.
///
/// Example: Given
/// \code
/// int a, b;
/// int c;
/// int d = 2, e;
/// \endcode
/// declCountIs(2)
/// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'.
AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) {
return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N;
}
/// Matches the n'th declaration of a declaration statement.
///
/// Note that this does not work for global declarations because the AST
/// breaks up multiple-declaration DeclStmt's into multiple single-declaration
/// DeclStmt's.
/// Example: Given non-global declarations
/// \code
/// int a, b = 0;
/// int c;
/// int d = 2, e;
/// \endcode
/// declStmt(containsDeclaration(
/// 0, varDecl(hasInitializer(anything()))))
/// matches only 'int d = 2, e;', and
/// declStmt(containsDeclaration(1, varDecl()))
/// \code
/// matches 'int a, b = 0' as well as 'int d = 2, e;'
/// but 'int c;' is not matched.
/// \endcode
AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N,
internal::Matcher<Decl>, InnerMatcher) {
const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end());
if (N >= NumDecls)
return false;
DeclStmt::const_decl_iterator Iterator = Node.decl_begin();
std::advance(Iterator, N);
return InnerMatcher.matches(**Iterator, Finder, Builder);
}
/// Matches a C++ catch statement that has a catch-all handler.
///
/// Given
/// \code
/// try {
/// // ...
/// } catch (int) {
/// // ...
/// } catch (...) {
/// // ...
/// }
/// \endcode
/// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int).
AST_MATCHER(CXXCatchStmt, isCatchAll) {
return Node.getExceptionDecl() == nullptr;
}
/// Matches a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(
/// hasAnyConstructorInitializer(anything())
/// )))
/// record matches Foo, hasAnyConstructorInitializer matches foo_(1)
AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.init_begin(),
Node.init_end(), Finder, Builder);
}
/// Matches the field declaration of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// forField(hasName("foo_"))))))
/// matches Foo
/// with forField matching foo_
AST_MATCHER_P(CXXCtorInitializer, forField,
internal::Matcher<FieldDecl>, InnerMatcher) {
const FieldDecl *NodeAsDecl = Node.getAnyMember();
return (NodeAsDecl != nullptr &&
InnerMatcher.matches(*NodeAsDecl, Finder, Builder));
}
/// Matches the initializer expression of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// withInitializer(integerLiteral(equals(1)))))))
/// matches Foo
/// with withInitializer matching (1)
AST_MATCHER_P(CXXCtorInitializer, withInitializer,
internal::Matcher<Expr>, InnerMatcher) {
const Expr* NodeAsExpr = Node.getInit();
return (NodeAsExpr != nullptr &&
InnerMatcher.matches(*NodeAsExpr, Finder, Builder));
}
/// Matches a constructor initializer if it is explicitly written in
/// code (as opposed to implicitly added by the compiler).
///
/// Given
/// \code
/// struct Foo {
/// Foo() { }
/// Foo(int) : foo_("A") { }
/// string foo_;
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten()))
/// will match Foo(int), but not Foo()
AST_MATCHER(CXXCtorInitializer, isWritten) {
return Node.isWritten();
}
/// Matches a constructor initializer if it is initializing a base, as
/// opposed to a member.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer()))
/// will match E(), but not match D(int).
AST_MATCHER(CXXCtorInitializer, isBaseInitializer) {
return Node.isBaseInitializer();
}
/// Matches a constructor initializer if it is initializing a member, as
/// opposed to a base.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer()))
/// will match D(int), but not match E().
AST_MATCHER(CXXCtorInitializer, isMemberInitializer) {
return Node.isMemberInitializer();
}
/// Matches any argument of a call expression or a constructor call
/// expression, or an ObjC-message-send expression.
///
/// Given
/// \code
/// void x(int, int, int) { int y; x(1, y, 42); }
/// \endcode
/// callExpr(hasAnyArgument(declRefExpr()))
/// matches x(1, y, 42)
/// with hasAnyArgument(...)
/// matching y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// void foo(I *i) { [i f:12]; }
/// \endcode
/// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12))))
/// matches [i f:12]
AST_POLYMORPHIC_MATCHER_P(hasAnyArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(
CallExpr, CXXConstructExpr,
CXXUnresolvedConstructExpr, ObjCMessageExpr),
internal::Matcher<Expr>, InnerMatcher) {
for (const Expr *Arg : Node.arguments()) {
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Arg, Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
return false;
}
/// Matches any capture of a lambda expression.
///
/// Given
/// \code
/// void foo() {
/// int x;
/// auto f = [x](){};
/// }
/// \endcode
/// lambdaExpr(hasAnyCapture(anything()))
/// matches [x](){};
AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<VarDecl>,
InnerMatcher, 0) {
for (const LambdaCapture &Capture : Node.captures()) {
if (Capture.capturesVariable()) {
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Capture.getCapturedVar(), Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
}
return false;
}
/// Matches any capture of 'this' in a lambda expression.
///
/// Given
/// \code
/// struct foo {
/// void bar() {
/// auto f = [this](){};
/// }
/// }
/// \endcode
/// lambdaExpr(hasAnyCapture(cxxThisExpr()))
/// matches [this](){};
AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture,
internal::Matcher<CXXThisExpr>, InnerMatcher, 1) {
return llvm::any_of(Node.captures(), [](const LambdaCapture &LC) {
return LC.capturesThis();
});
}
/// Matches a constructor call expression which uses list initialization.
AST_MATCHER(CXXConstructExpr, isListInitialization) {
return Node.isListInitialization();
}
/// Matches a constructor call expression which requires
/// zero initialization.
///
/// Given
/// \code
/// void foo() {
/// struct point { double x; double y; };
/// point pt[2] = { { 1.0, 2.0 } };
/// }
/// \endcode
/// initListExpr(has(cxxConstructExpr(requiresZeroInitialization()))
/// will match the implicit array filler for pt[1].
AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) {
return Node.requiresZeroInitialization();
}
/// Matches the n'th parameter of a function or an ObjC method
/// declaration or a block.
///
/// Given
/// \code
/// class X { void f(int x) {} };
/// \endcode
/// cxxMethodDecl(hasParameter(0, hasType(varDecl())))
/// matches f(int x) {}
/// with hasParameter(...)
/// matching int x
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasParameter(0, hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P2(hasParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
unsigned, N, internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return (N < Node.parameters().size()
&& InnerMatcher.matches(*Node.parameters()[N], Finder, Builder));
}
/// Matches all arguments and their respective ParmVarDecl.
///
/// Given
/// \code
/// void f(int i);
/// int y;
/// f(y);
/// \endcode
/// callExpr(
/// forEachArgumentWithParam(
/// declRefExpr(to(varDecl(hasName("y")))),
/// parmVarDecl(hasType(isInteger()))
/// ))
/// matches f(y);
/// with declRefExpr(...)
/// matching int y
/// and parmVarDecl(...)
/// matching int i
AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr),
internal::Matcher<Expr>, ArgMatcher,
internal::Matcher<ParmVarDecl>, ParamMatcher) {
BoundNodesTreeBuilder Result;
// The first argument of an overloaded member operator is the implicit object
// argument of the method which should not be matched against a parameter, so
// we skip over it here.
BoundNodesTreeBuilder Matches;
unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl()))
.matches(Node, Finder, &Matches)
? 1
: 0;
int ParamIndex = 0;
bool Matched = false;
for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) {
BoundNodesTreeBuilder ArgMatches(*Builder);
if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()),
Finder, &ArgMatches)) {
BoundNodesTreeBuilder ParamMatches(ArgMatches);
if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl(
hasParameter(ParamIndex, ParamMatcher)))),
callExpr(callee(functionDecl(
hasParameter(ParamIndex, ParamMatcher))))))
.matches(Node, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
}
}
++ParamIndex;
}
*Builder = std::move(Result);
return Matched;
}
/// Matches any parameter of a function or an ObjC method declaration or a
/// block.
///
/// Does not match the 'this' parameter of a method.
///
/// Given
/// \code
/// class X { void f(int x, int y, int z) {} };
/// \endcode
/// cxxMethodDecl(hasAnyParameter(hasName("y")))
/// matches f(int x, int y, int z) {}
/// with hasAnyParameter(...)
/// matching int y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
///
/// For blocks, given
/// \code
/// b = ^(int y) { printf("%d", y) };
/// \endcode
///
/// the matcher blockDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of the block b with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P(hasAnyParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(),
Node.param_end(), Finder, Builder);
}
/// Matches \c FunctionDecls and \c FunctionProtoTypes that have a
/// specific parameter count.
///
/// Given
/// \code
/// void f(int i) {}
/// void g(int i, int j) {}
/// void h(int i, int j);
/// void j(int i);
/// void k(int x, int y, int z, ...);
/// \endcode
/// functionDecl(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(3))
/// matches \c k
AST_POLYMORPHIC_MATCHER_P(parameterCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType),
unsigned, N) {
return Node.getNumParams() == N;
}
/// Matches \c FunctionDecls that have a noreturn attribute.
///
/// Given
/// \code
/// void nope();
/// [[noreturn]] void a();
/// __attribute__((noreturn)) void b();
/// struct c { [[noreturn]] c(); };
/// \endcode
/// functionDecl(isNoReturn())
/// matches all of those except
/// \code
/// void nope();
/// \endcode
AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); }
/// Matches the return type of a function declaration.
///
/// Given:
/// \code
/// class X { int f() { return 1; } };
/// \endcode
/// cxxMethodDecl(returns(asString("int")))
/// matches int f() { return 1; }
AST_MATCHER_P(FunctionDecl, returns,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getReturnType(), Finder, Builder);
}
/// Matches extern "C" function or variable declarations.
///
/// Given:
/// \code
/// extern "C" void f() {}
/// extern "C" { void g() {} }
/// void h() {}
/// extern "C" int x = 1;
/// extern "C" int y = 2;
/// int z = 3;
/// \endcode
/// functionDecl(isExternC())
/// matches the declaration of f and g, but not the declaration of h.
/// varDecl(isExternC())
/// matches the declaration of x and y, but not the declaration of z.
AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.isExternC();
}
/// Matches variable/function declarations that have "static" storage
/// class specifier ("static" keyword) written in the source.
///
/// Given:
/// \code
/// static void f() {}
/// static int i = 0;
/// extern int j;
/// int k;
/// \endcode
/// functionDecl(isStaticStorageClass())
/// matches the function declaration f.
/// varDecl(isStaticStorageClass())
/// matches the variable declaration i.
AST_POLYMORPHIC_MATCHER(isStaticStorageClass,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.getStorageClass() == SC_Static;
}
/// Matches deleted function declarations.
///
/// Given:
/// \code
/// void Func();
/// void DeletedFunc() = delete;
/// \endcode
/// functionDecl(isDeleted())
/// matches the declaration of DeletedFunc, but not Func.
AST_MATCHER(FunctionDecl, isDeleted) {
return Node.isDeleted();
}
/// Matches defaulted function declarations.
///
/// Given:
/// \code
/// class A { ~A(); };
/// class B { ~B() = default; };
/// \endcode
/// functionDecl(isDefaulted())
/// matches the declaration of ~B, but not ~A.
AST_MATCHER(FunctionDecl, isDefaulted) {
return Node.isDefaulted();
}
/// Matches functions that have a dynamic exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() noexcept(true);
/// void i() noexcept(false);
/// void j() throw();
/// void k() throw(int);
/// void l() throw(...);
/// \endcode
/// functionDecl(hasDynamicExceptionSpec()) and
/// functionProtoType(hasDynamicExceptionSpec())
/// match the declarations of j, k, and l, but not f, g, h, or i.
AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node))
return FnTy->hasDynamicExceptionSpec();
return false;
}
/// Matches functions that have a non-throwing exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() throw();
/// void i() throw(int);
/// void j() noexcept(false);
/// \endcode
/// functionDecl(isNoThrow()) and functionProtoType(isNoThrow())
/// match the declarations of g, and h, but not f, i or j.
AST_POLYMORPHIC_MATCHER(isNoThrow,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node);
// If the function does not have a prototype, then it is assumed to be a
// throwing function (as it would if the function did not have any exception
// specification).
if (!FnTy)
return false;
// Assume the best for any unresolved exception specification.
if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType()))
return true;
return FnTy->isNothrow();
}
/// Matches constexpr variable and function declarations,
/// and if constexpr.
///
/// Given:
/// \code
/// constexpr int foo = 42;
/// constexpr int bar();
/// void baz() { if constexpr(1 > 0) {} }
/// \endcode
/// varDecl(isConstexpr())
/// matches the declaration of foo.
/// functionDecl(isConstexpr())
/// matches the declaration of bar.
/// ifStmt(isConstexpr())
/// matches the if statement in baz.
AST_POLYMORPHIC_MATCHER(isConstexpr,
AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl,
FunctionDecl,
IfStmt)) {
return Node.isConstexpr();
}
/// Matches selection statements with initializer.
///
/// Given:
/// \code
/// void foo() {
/// if (int i = foobar(); i > 0) {}
/// switch (int i = foobar(); i) {}
/// for (auto& a = get_range(); auto& x : a) {}
/// }
/// void bar() {
/// if (foobar() > 0) {}
/// switch (foobar()) {}
/// for (auto& x : get_range()) {}
/// }
/// \endcode
/// ifStmt(hasInitStatement(anything()))
/// matches the if statement in foo but not in bar.
/// switchStmt(hasInitStatement(anything()))
/// matches the switch statement in foo but not in bar.
/// cxxForRangeStmt(hasInitStatement(anything()))
/// matches the range for statement in foo but not in bar.
AST_POLYMORPHIC_MATCHER_P(hasInitStatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, SwitchStmt,
CXXForRangeStmt),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *Init = Node.getInit();
return Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder);
}
/// Matches the condition expression of an if statement, for loop,
/// switch statement or conditional operator.
///
/// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true))))
/// \code
/// if (true) {}
/// \endcode
AST_POLYMORPHIC_MATCHER_P(
hasCondition,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt,
SwitchStmt, AbstractConditionalOperator),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const Condition = Node.getCond();
return (Condition != nullptr &&
InnerMatcher.matches(*Condition, Finder, Builder));
}
/// Matches the then-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) true; else false;
/// \endcode
AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Then = Node.getThen();
return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder));
}
/// Matches the else-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) false; else true;
/// \endcode
AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Else = Node.getElse();
return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder));
}
/// Matches if a node equals a previously bound node.
///
/// Matches a node if it equals the node previously bound to \p ID.
///
/// Given
/// \code
/// class X { int a; int b; };
/// \endcode
/// cxxRecordDecl(
/// has(fieldDecl(hasName("a"), hasType(type().bind("t")))),
/// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t"))))))
/// matches the class \c X, as \c a and \c b have the same type.
///
/// Note that when multiple matches are involved via \c forEach* matchers,
/// \c equalsBoundNodes acts as a filter.
/// For example:
/// compoundStmt(
/// forEachDescendant(varDecl().bind("d")),
/// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d"))))))
/// will trigger a match for each combination of variable declaration
/// and reference to that variable declaration within a compound statement.
AST_POLYMORPHIC_MATCHER_P(equalsBoundNode,
AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type,
QualType),
std::string, ID) {
// FIXME: Figure out whether it makes sense to allow this
// on any other node types.
// For *Loc it probably does not make sense, as those seem
// unique. For NestedNameSepcifier it might make sense, as
// those also have pointer identity, but I'm not sure whether
// they're ever reused.
internal::NotEqualsBoundNodePredicate Predicate;
Predicate.ID = ID;
Predicate.Node = DynTypedNode::create(Node);
return Builder->removeBindings(Predicate);
}
/// Matches the condition variable statement in an if statement.
///
/// Given
/// \code
/// if (A* a = GetAPointer()) {}
/// \endcode
/// hasConditionVariableStatement(...)
/// matches 'A* a = GetAPointer()'.
AST_MATCHER_P(IfStmt, hasConditionVariableStatement,
internal::Matcher<DeclStmt>, InnerMatcher) {
const DeclStmt* const DeclarationStatement =
Node.getConditionVariableDeclStmt();
return DeclarationStatement != nullptr &&
InnerMatcher.matches(*DeclarationStatement, Finder, Builder);
}
/// Matches the index expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasIndex(integerLiteral()))
/// matches \c i[1] with the \c integerLiteral() matching \c 1
AST_MATCHER_P(ArraySubscriptExpr, hasIndex,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getIdx())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches the base expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasBase(implicitCastExpr(
/// hasSourceExpression(declRefExpr()))))
/// matches \c i[1] with the \c declRefExpr() matching \c i
AST_MATCHER_P(ArraySubscriptExpr, hasBase,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getBase())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches a 'for', 'while', 'do while' statement or a function
/// definition that has a given body.
///
/// Given
/// \code
/// for (;;) {}
/// \endcode
/// hasBody(compoundStmt())
/// matches 'for (;;) {}'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasBody,
AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt,
WhileStmt,
CXXForRangeStmt,
FunctionDecl),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node);
return (Statement != nullptr &&
InnerMatcher.matches(*Statement, Finder, Builder));
}
/// Matches compound statements where at least one substatement matches
/// a given matcher. Also matches StmtExprs that have CompoundStmt as children.
///
/// Given
/// \code
/// { {}; 1+2; }
/// \endcode
/// hasAnySubstatement(compoundStmt())
/// matches '{ {}; 1+2; }'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt,
StmtExpr),
internal::Matcher<Stmt>, InnerMatcher) {
const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node);
return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(),
CS->body_end(), Finder, Builder);
}
/// Checks that a compound statement contains a specific number of
/// child statements.
///
/// Example: Given
/// \code
/// { for (;;) {} }
/// \endcode
/// compoundStmt(statementCountIs(0)))
/// matches '{}'
/// but does not match the outer compound statement.
AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches literals that are equal to the given value of type ValueT.
///
/// Given
/// \code
/// f('\0', false, 3.14, 42);
/// \endcode
/// characterLiteral(equals(0))
/// matches '\0'
/// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0))
/// match false
/// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2))
/// match 3.14
/// integerLiteral(equals(42))
/// matches 42
///
/// Note that you cannot directly match a negative numeric literal because the
/// minus sign is not part of the literal: It is a unary operator whose operand
/// is the positive numeric literal. Instead, you must use a unaryOperator()
/// matcher to match the minus sign:
///
/// unaryOperator(hasOperatorName("-"),
/// hasUnaryOperand(integerLiteral(equals(13))))
///
/// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>,
/// Matcher<FloatingLiteral>, Matcher<IntegerLiteral>
template <typename ValueT>
internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT>
equals(const ValueT &Value) {
return internal::PolymorphicMatcherWithParam1<
internal::ValueEqualsMatcher,
ValueT>(Value);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
bool, Value, 0) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
unsigned, Value, 1) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
FloatingLiteral,
IntegerLiteral),
double, Value, 2) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
/// Matches the operator Name of operator expressions (binary or
/// unary).
///
/// Example matches a || b (matcher = binaryOperator(hasOperatorName("||")))
/// \code
/// !(a || b)
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasOperatorName,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
UnaryOperator),
std::string, Name) {
return Name == Node.getOpcodeStr(Node.getOpcode());
}
/// Matches operator expressions (binary or unary) that have any of the
/// specified names.
///
/// hasAnyOperatorName("+", "-")
/// Is equivalent to
/// anyOf(hasOperatorName("+"), hasOperatorName("-"))
extern const internal::VariadicFunction<
internal::PolymorphicMatcherWithParam1<
internal::HasAnyOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, UnaryOperator)>,
StringRef, internal::hasAnyOperatorNameFunc>
hasAnyOperatorName;
/// Matches all kinds of assignment operators.
///
/// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 = s2
/// (matcher = cxxOperatorCallExpr(isAssignmentOperator()))
/// \code
/// struct S { S& operator=(const S&); };
/// void x() { S s1, s2; s1 = s2; }
/// \endcode
AST_POLYMORPHIC_MATCHER(isAssignmentOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
CXXOperatorCallExpr)) {
return Node.isAssignmentOp();
}
/// Matches comparison operators.
///
/// Example 1: matches a == b (matcher = binaryOperator(isComparisonOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 < s2
/// (matcher = cxxOperatorCallExpr(isComparisonOperator()))
/// \code
/// struct S { bool operator<(const S& other); };
/// void x(S s1, S s2) { bool b1 = s1 < s2; }
/// \endcode
AST_POLYMORPHIC_MATCHER(isComparisonOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
CXXOperatorCallExpr)) {
return Node.isComparisonOp();
}
/// Matches the left hand side of binary operator expressions.
///
/// Example matches a (matcher = binaryOperator(hasLHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasLHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *LeftHandSide = Node.getLHS();
return (LeftHandSide != nullptr &&
InnerMatcher.matches(*LeftHandSide, Finder, Builder));
}
/// Matches the right hand side of binary operator expressions.
///
/// Example matches b (matcher = binaryOperator(hasRHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasRHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *RightHandSide = Node.getRHS();
return (RightHandSide != nullptr &&
InnerMatcher.matches(*RightHandSide, Finder, Builder));
}
/// Matches if either the left hand side or the right hand side of a
/// binary operator matches.
inline internal::Matcher<BinaryOperator> hasEitherOperand(
const internal::Matcher<Expr> &InnerMatcher) {
return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher));
}
/// Matches if both matchers match with opposite sides of the binary operator.
///
/// Example matcher = binaryOperator(hasOperands(integerLiteral(equals(1),
/// integerLiteral(equals(2)))
/// \code
/// 1 + 2 // Match
/// 2 + 1 // Match
/// 1 + 1 // No match
/// 2 + 2 // No match
/// \endcode
inline internal::Matcher<BinaryOperator>
hasOperands(const internal::Matcher<Expr> &Matcher1,
const internal::Matcher<Expr> &Matcher2) {
return anyOf(allOf(hasLHS(Matcher1), hasRHS(Matcher2)),
allOf(hasLHS(Matcher2), hasRHS(Matcher1)));
}
/// Matches if the operand of a unary operator matches.
///
/// Example matches true (matcher = hasUnaryOperand(
/// cxxBoolLiteral(equals(true))))
/// \code
/// !true
/// \endcode
AST_MATCHER_P(UnaryOperator, hasUnaryOperand,
internal::Matcher<Expr>, InnerMatcher) {
const Expr * const Operand = Node.getSubExpr();
return (Operand != nullptr &&
InnerMatcher.matches(*Operand, Finder, Builder));
}
/// Matches if the cast's source expression
/// or opaque value's source expression matches the given matcher.
///
/// Example 1: matches "a string"
/// (matcher = castExpr(hasSourceExpression(cxxConstructExpr())))
/// \code
/// class URL { URL(string); };
/// URL url = "a string";
/// \endcode
///
/// Example 2: matches 'b' (matcher =
/// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr())))
/// \code
/// int a = b ?: 1;
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasSourceExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr,
OpaqueValueExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const SubExpression =
internal::GetSourceExpressionMatcher<NodeType>::get(Node);
return (SubExpression != nullptr &&
InnerMatcher.matches(*SubExpression, Finder, Builder));
}
/// Matches casts that has a given cast kind.
///
/// Example: matches the implicit cast around \c 0
/// (matcher = castExpr(hasCastKind(CK_NullToPointer)))
/// \code
/// int *p = 0;
/// \endcode
///
/// If the matcher is use from clang-query, CastKind parameter
/// should be passed as a quoted string. e.g., hasCastKind("CK_NullToPointer").
AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) {
return Node.getCastKind() == Kind;
}
/// Matches casts whose destination type matches a given matcher.
///
/// (Note: Clang's AST refers to other conversions as "casts" too, and calls
/// actual casts "explicit" casts.)
AST_MATCHER_P(ExplicitCastExpr, hasDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType NodeType = Node.getTypeAsWritten();
return InnerMatcher.matches(NodeType, Finder, Builder);
}
/// Matches implicit casts whose destination type matches a given
/// matcher.
///
/// FIXME: Unit test this matcher
AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getType(), Finder, Builder);
}
/// Matches TagDecl object that are spelled with "struct."
///
/// Example matches S, but not C, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isStruct) {
return Node.isStruct();
}
/// Matches TagDecl object that are spelled with "union."
///
/// Example matches U, but not C, S or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isUnion) {
return Node.isUnion();
}
/// Matches TagDecl object that are spelled with "class."
///
/// Example matches C, but not S, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isClass) {
return Node.isClass();
}
/// Matches TagDecl object that are spelled with "enum."
///
/// Example matches E, but not C, S or U.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isEnum) {
return Node.isEnum();
}
/// Matches the true branch expression of a conditional operator.
///
/// Example 1 (conditional ternary operator): matches a
/// \code
/// condition ? a : b
/// \endcode
///
/// Example 2 (conditional binary operator): matches opaqueValueExpr(condition)
/// \code
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getTrueExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches the false branch expression of a conditional operator
/// (binary or ternary).
///
/// Example matches b
/// \code
/// condition ? a : b
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getFalseExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches if a declaration has a body attached.
///
/// Example matches A, va, fa
/// \code
/// class A {};
/// class B; // Doesn't match, as it has no body.
/// int va;
/// extern int vb; // Doesn't match, as it doesn't define the variable.
/// void fa() {}
/// void fb(); // Doesn't match, as it has no body.
/// @interface X
/// - (void)ma; // Doesn't match, interface is declaration.
/// @end
/// @implementation X
/// - (void)ma {}
/// @end
/// \endcode
///
/// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>,
/// Matcher<ObjCMethodDecl>
AST_POLYMORPHIC_MATCHER(isDefinition,
AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl,
ObjCMethodDecl,
FunctionDecl)) {
return Node.isThisDeclarationADefinition();
}
/// Matches if a function declaration is variadic.
///
/// Example matches f, but not g or h. The function i will not match, even when
/// compiled in C mode.
/// \code
/// void f(...);
/// void g(int);
/// template <typename... Ts> void h(Ts...);
/// void i();
/// \endcode
AST_MATCHER(FunctionDecl, isVariadic) {
return Node.isVariadic();
}
/// Matches the class declaration that the given method declaration
/// belongs to.
///
/// FIXME: Generalize this for other kinds of declarations.
/// FIXME: What other kind of declarations would we need to generalize
/// this to?
///
/// Example matches A() in the last line
/// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl(
/// ofClass(hasName("A"))))))
/// \code
/// class A {
/// public:
/// A();
/// };
/// A a = A();
/// \endcode
AST_MATCHER_P(CXXMethodDecl, ofClass,
internal::Matcher<CXXRecordDecl>, InnerMatcher) {
const CXXRecordDecl *Parent = Node.getParent();
return (Parent != nullptr &&
InnerMatcher.matches(*Parent, Finder, Builder));
}
/// Matches each method overridden by the given method. This matcher may
/// produce multiple matches.
///
/// Given
/// \code
/// class A { virtual void f(); };
/// class B : public A { void f(); };
/// class C : public B { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note
/// that B::f is not overridden by C::f).
///
/// The check can produce multiple matches in case of multiple inheritance, e.g.
/// \code
/// class A1 { virtual void f(); };
/// class A2 { virtual void f(); };
/// class C : public A1, public A2 { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and
/// once with "b" binding "A2::f" and "d" binding "C::f".
AST_MATCHER_P(CXXMethodDecl, forEachOverridden,
internal::Matcher<CXXMethodDecl>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *Overridden : Node.overridden_methods()) {
BoundNodesTreeBuilder OverriddenBuilder(*Builder);
const bool OverriddenMatched =
InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder);
if (OverriddenMatched) {
Matched = true;
Result.addMatch(OverriddenBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches if the given method declaration is virtual.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// \endcode
/// matches A::x
AST_MATCHER(CXXMethodDecl, isVirtual) {
return Node.isVirtual();
}
/// Matches if the given method declaration has an explicit "virtual".
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// void x();
/// };
/// \endcode
/// matches A::x but not B::x
AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) {
return Node.isVirtualAsWritten();
}
/// Matches if the given method or class declaration is final.
///
/// Given:
/// \code
/// class A final {};
///
/// struct B {
/// virtual void f();
/// };
///
/// struct C : B {
/// void f() final;
/// };
/// \endcode
/// matches A and C::f, but not B, C, or B::f
AST_POLYMORPHIC_MATCHER(isFinal,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl,
CXXMethodDecl)) {
return Node.template hasAttr<FinalAttr>();
}
/// Matches if the given method declaration is pure.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x() = 0;
/// };
/// \endcode
/// matches A::x
AST_MATCHER(CXXMethodDecl, isPure) {
return Node.isPure();
}
/// Matches if the given method declaration is const.
///
/// Given
/// \code
/// struct A {
/// void foo() const;
/// void bar();
/// };
/// \endcode
///
/// cxxMethodDecl(isConst()) matches A::foo() but not A::bar()
AST_MATCHER(CXXMethodDecl, isConst) {
return Node.isConst();
}
/// Matches if the given method declaration declares a copy assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not
/// the second one.
AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) {
return Node.isCopyAssignmentOperator();
}
/// Matches if the given method declaration declares a move assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not
/// the first one.
AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) {
return Node.isMoveAssignmentOperator();
}
/// Matches if the given method declaration overrides another method.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// virtual void x();
/// };
/// \endcode
/// matches B::x
AST_MATCHER(CXXMethodDecl, isOverride) {
return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>();
}
/// Matches method declarations that are user-provided.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &) = default; // #2
/// S(S &&) = delete; // #3
/// };
/// \endcode
/// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3.
AST_MATCHER(CXXMethodDecl, isUserProvided) {
return Node.isUserProvided();
}
/// Matches member expressions that are called with '->' as opposed
/// to '.'.
///
/// Member calls on the implicit this pointer match as called with '->'.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// template <class T> void f() { this->f<T>(); f<T>(); }
/// int a;
/// static int b;
/// };
/// template <class T>
/// class Z {
/// void x() { this->m; }
/// };
/// \endcode
/// memberExpr(isArrow())
/// matches this->x, x, y.x, a, this->b
/// cxxDependentScopeMemberExpr(isArrow())
/// matches this->m
/// unresolvedMemberExpr(isArrow())
/// matches this->f<T>, f<T>
AST_POLYMORPHIC_MATCHER(
isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr)) {
return Node.isArrow();
}
/// Matches QualType nodes that are of integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isInteger())))
/// matches "a(int)", "b(long)", but not "c(double)".
AST_MATCHER(QualType, isInteger) {
return Node->isIntegerType();
}
/// Matches QualType nodes that are of unsigned integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isUnsignedInteger())))
/// matches "b(unsigned long)", but not "a(int)" and "c(double)".
AST_MATCHER(QualType, isUnsignedInteger) {
return Node->isUnsignedIntegerType();
}
/// Matches QualType nodes that are of signed integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isSignedInteger())))
/// matches "a(int)", but not "b(unsigned long)" and "c(double)".
AST_MATCHER(QualType, isSignedInteger) {
return Node->isSignedIntegerType();
}
/// Matches QualType nodes that are of character type.
///
/// Given
/// \code
/// void a(char);
/// void b(wchar_t);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isAnyCharacter())))
/// matches "a(char)", "b(wchar_t)", but not "c(double)".
AST_MATCHER(QualType, isAnyCharacter) {
return Node->isAnyCharacterType();
}
/// Matches QualType nodes that are of any pointer type; this includes
/// the Objective-C object pointer type, which is different despite being
/// syntactically similar.
///
/// Given
/// \code
/// int *i = nullptr;
///
/// @interface Foo
/// @end
/// Foo *f;
///
/// int j;
/// \endcode
/// varDecl(hasType(isAnyPointer()))
/// matches "int *i" and "Foo *f", but not "int j".
AST_MATCHER(QualType, isAnyPointer) {
return Node->isAnyPointerType();
}
/// Matches QualType nodes that are const-qualified, i.e., that
/// include "top-level" const.
///
/// Given
/// \code
/// void a(int);
/// void b(int const);
/// void c(const int);
/// void d(const int*);
/// void e(int const) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isConstQualified())))
/// matches "void b(int const)", "void c(const int)" and
/// "void e(int const) {}". It does not match d as there
/// is no top-level const on the parameter type "const int *".
AST_MATCHER(QualType, isConstQualified) {
return Node.isConstQualified();
}
/// Matches QualType nodes that are volatile-qualified, i.e., that
/// include "top-level" volatile.
///
/// Given
/// \code
/// void a(int);
/// void b(int volatile);
/// void c(volatile int);
/// void d(volatile int*);
/// void e(int volatile) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isVolatileQualified())))
/// matches "void b(int volatile)", "void c(volatile int)" and
/// "void e(int volatile) {}". It does not match d as there
/// is no top-level volatile on the parameter type "volatile int *".
AST_MATCHER(QualType, isVolatileQualified) {
return Node.isVolatileQualified();
}
/// Matches QualType nodes that have local CV-qualifiers attached to
/// the node, not hidden within a typedef.
///
/// Given
/// \code
/// typedef const int const_int;
/// const_int i;
/// int *const j;
/// int *volatile k;
/// int m;
/// \endcode
/// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k.
/// \c i is const-qualified but the qualifier is not local.
AST_MATCHER(QualType, hasLocalQualifiers) {
return Node.hasLocalQualifiers();
}
/// Matches a member expression where the member is matched by a
/// given matcher.
///
/// Given
/// \code
/// struct { int first, second; } first, second;
/// int i(second.first);
/// int j(first.second);
/// \endcode
/// memberExpr(member(hasName("first")))
/// matches second.first
/// but not first.second (because the member name there is "second").
AST_MATCHER_P(MemberExpr, member,
internal::Matcher<ValueDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder);
}
/// Matches a member expression where the object expression is matched by a
/// given matcher. Implicit object expressions are included; that is, it matches
/// use of implicit `this`.
///
/// Given
/// \code
/// struct X {
/// int m;
/// int f(X x) { x.m; return m; }
/// };
/// \endcode
/// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m`, but not `m`; however,
/// memberExpr(hasObjectExpression(hasType(pointsTo(
// cxxRecordDecl(hasName("X"))))))
/// matches `m` (aka. `this->m`), but not `x.m`.
AST_POLYMORPHIC_MATCHER_P(
hasObjectExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr),
internal::Matcher<Expr>, InnerMatcher) {
if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
return InnerMatcher.matches(*Node.getBase(), Finder, Builder);
}
/// Matches any using shadow declaration.
///
/// Given
/// \code
/// namespace X { void b(); }
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasName("b"))))
/// matches \code using X::b \endcode
AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(),
Node.shadow_end(), Finder, Builder);
}
/// Matches a using shadow declaration where the target declaration is
/// matched by the given matcher.
///
/// Given
/// \code
/// namespace X { int a; void b(); }
/// using X::a;
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl())))
/// matches \code using X::b \endcode
/// but not \code using X::a \endcode
AST_MATCHER_P(UsingShadowDecl, hasTargetDecl,
internal::Matcher<NamedDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder);
}
/// Matches template instantiations of function, class, or static
/// member variable template instantiations.
///
/// Given
/// \code
/// template <typename T> class X {}; class A {}; X<A> x;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; template class X<A>;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; extern template class X<A>;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// matches the template instantiation of X<A>.
///
/// But given
/// \code
/// template <typename T> class X {}; class A {};
/// template <> class X<A> {}; X<A> x;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// does not match, as X<A> is an explicit template specialization.
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isTemplateInstantiation,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDefinition ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDeclaration);
}
/// Matches declarations that are template instantiations or are inside
/// template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { T i; }
/// A(0);
/// A(0U);
/// \endcode
/// functionDecl(isInstantiated())
/// matches 'A(int) {...};' and 'A(unsigned) {...}'.
AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) {
auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())));
return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation)));
}
/// Matches statements inside of a template instantiation.
///
/// Given
/// \code
/// int j;
/// template<typename T> void A(T t) { T i; j += 42;}
/// A(0);
/// A(0U);
/// \endcode
/// declStmt(isInTemplateInstantiation())
/// matches 'int i;' and 'unsigned i'.
/// unless(stmt(isInTemplateInstantiation()))
/// will NOT match j += 42; as it's shared between the template definition and
/// instantiation.
AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) {
return stmt(
hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())))));
}
/// Matches explicit template specializations of function, class, or
/// static member variable template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { }
/// template<> void A(int N) { }
/// \endcode
/// functionDecl(isExplicitTemplateSpecialization())
/// matches the specialization A<int>().
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization);
}
/// Matches \c TypeLocs for which the given inner
/// QualType-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc,
internal::Matcher<QualType>, InnerMatcher, 0) {
return internal::BindableMatcher<TypeLoc>(
new internal::TypeLocTypeMatcher(InnerMatcher));
}
/// Matches type \c bool.
///
/// Given
/// \code
/// struct S { bool func(); };
/// \endcode
/// functionDecl(returns(booleanType()))
/// matches "bool func();"
AST_MATCHER(Type, booleanType) {
return Node.isBooleanType();
}
/// Matches type \c void.
///
/// Given
/// \code
/// struct S { void func(); };
/// \endcode
/// functionDecl(returns(voidType()))
/// matches "void func();"
AST_MATCHER(Type, voidType) {
return Node.isVoidType();
}
template <typename NodeType>
using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>;
/// Matches builtin Types.
///
/// Given
/// \code
/// struct A {};
/// A a;
/// int b;
/// float c;
/// bool d;
/// \endcode
/// builtinType()
/// matches "int b", "float c" and "bool d"
extern const AstTypeMatcher<BuiltinType> builtinType;
/// Matches all kinds of arrays.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[4];
/// void f() { int c[a[0]]; }
/// \endcode
/// arrayType()
/// matches "int a[]", "int b[4]" and "int c[a[0]]";
extern const AstTypeMatcher<ArrayType> arrayType;
/// Matches C99 complex types.
///
/// Given
/// \code
/// _Complex float f;
/// \endcode
/// complexType()
/// matches "_Complex float f"
extern const AstTypeMatcher<ComplexType> complexType;
/// Matches any real floating-point type (float, double, long double).
///
/// Given
/// \code
/// int i;
/// float f;
/// \endcode
/// realFloatingPointType()
/// matches "float f" but not "int i"
AST_MATCHER(Type, realFloatingPointType) {
return Node.isRealFloatingType();
}
/// Matches arrays and C99 complex types that have a specific element
/// type.
///
/// Given
/// \code
/// struct A {};
/// A a[7];
/// int b[7];
/// \endcode
/// arrayType(hasElementType(builtinType()))
/// matches "int b[7]"
///
/// Usable as: Matcher<ArrayType>, Matcher<ComplexType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement,
AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType,
ComplexType));
/// Matches C arrays with a specified constant size.
///
/// Given
/// \code
/// void() {
/// int a[2];
/// int b[] = { 2, 3 };
/// int c[b[0]];
/// }
/// \endcode
/// constantArrayType()
/// matches "int a[2]"
extern const AstTypeMatcher<ConstantArrayType> constantArrayType;
/// Matches nodes that have the specified size.
///
/// Given
/// \code
/// int a[42];
/// int b[2 * 21];
/// int c[41], d[43];
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// char *w = "a";
/// \endcode
/// constantArrayType(hasSize(42))
/// matches "int a[42]" and "int b[2 * 21]"
/// stringLiteral(hasSize(4))
/// matches "abcd", L"abcd"
AST_POLYMORPHIC_MATCHER_P(hasSize,
AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType,
StringLiteral),
unsigned, N) {
return internal::HasSizeMatcher<NodeType>::hasSize(Node, N);
}
/// Matches C++ arrays whose size is a value-dependent expression.
///
/// Given
/// \code
/// template<typename T, int Size>
/// class array {
/// T data[Size];
/// };
/// \endcode
/// dependentSizedArrayType
/// matches "T data[Size]"
extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType;
/// Matches C arrays with unspecified size.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[42];
/// void f(int c[]) { int d[a[0]]; };
/// \endcode
/// incompleteArrayType()
/// matches "int a[]" and "int c[]"
extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType;
/// Matches C arrays with a specified size that is not an
/// integer-constant-expression.
///
/// Given
/// \code
/// void f() {
/// int a[] = { 2, 3 }
/// int b[42];
/// int c[a[0]];
/// }
/// \endcode
/// variableArrayType()
/// matches "int c[a[0]]"
extern const AstTypeMatcher<VariableArrayType> variableArrayType;
/// Matches \c VariableArrayType nodes that have a specific size
/// expression.
///
/// Given
/// \code
/// void f(int b) {
/// int a[b];
/// }
/// \endcode
/// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to(
/// varDecl(hasName("b")))))))
/// matches "int a[b]"
AST_MATCHER_P(VariableArrayType, hasSizeExpr,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder);
}
/// Matches atomic types.
///
/// Given
/// \code
/// _Atomic(int) i;
/// \endcode
/// atomicType()
/// matches "_Atomic(int) i"
extern const AstTypeMatcher<AtomicType> atomicType;
/// Matches atomic types with a specific value type.
///
/// Given
/// \code
/// _Atomic(int) i;
/// _Atomic(float) f;
/// \endcode
/// atomicType(hasValueType(isInteger()))
/// matches "_Atomic(int) i"
///
/// Usable as: Matcher<AtomicType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue,
AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType));
/// Matches types nodes representing C++11 auto types.
///
/// Given:
/// \code
/// auto n = 4;
/// int v[] = { 2, 3 }
/// for (auto i : v) { }
/// \endcode
/// autoType()
/// matches "auto n" and "auto i"
extern const AstTypeMatcher<AutoType> autoType;
/// Matches types nodes representing C++11 decltype(<expr>) types.
///
/// Given:
/// \code
/// short i = 1;
/// int j = 42;
/// decltype(i + j) result = i + j;
/// \endcode
/// decltypeType()
/// matches "decltype(i + j)"
extern const AstTypeMatcher<DecltypeType> decltypeType;
/// Matches \c AutoType nodes where the deduced type is a specific type.
///
/// Note: There is no \c TypeLoc for the deduced type and thus no
/// \c getDeducedLoc() matcher.
///
/// Given
/// \code
/// auto a = 1;
/// auto b = 2.0;
/// \endcode
/// autoType(hasDeducedType(isInteger()))
/// matches "auto a"
///
/// Usable as: Matcher<AutoType>
AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType,
AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType));
/// Matches \c DecltypeType nodes to find out the underlying type.
///
/// Given
/// \code
/// decltype(1) a = 1;
/// decltype(2.0) b = 2.0;
/// \endcode
/// decltypeType(hasUnderlyingType(isInteger()))
/// matches the type of "a"
///
/// Usable as: Matcher<DecltypeType>
AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType,
AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType));
/// Matches \c FunctionType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionType()
/// matches "int (*f)(int)" and the type of "g".
extern const AstTypeMatcher<FunctionType> functionType;
/// Matches \c FunctionProtoType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionProtoType()
/// matches "int (*f)(int)" and the type of "g" in C++ mode.
/// In C mode, "g" is not matched because it does not contain a prototype.
extern const AstTypeMatcher<FunctionProtoType> functionProtoType;
/// Matches \c ParenType nodes.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int *array_of_ptrs[4];
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not
/// \c array_of_ptrs.
extern const AstTypeMatcher<ParenType> parenType;
/// Matches \c ParenType nodes where the inner type is a specific type.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int (*ptr_to_func)(int);
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches
/// \c ptr_to_func but not \c ptr_to_array.
///
/// Usable as: Matcher<ParenType>
AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType,
AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType));
/// Matches block pointer types, i.e. types syntactically represented as
/// "void (^)(int)".
///
/// The \c pointee is always required to be a \c FunctionType.
extern const AstTypeMatcher<BlockPointerType> blockPointerType;
/// Matches member pointer types.
/// Given
/// \code
/// struct A { int i; }
/// A::* ptr = A::i;
/// \endcode
/// memberPointerType()
/// matches "A::* ptr"
extern const AstTypeMatcher<MemberPointerType> memberPointerType;
/// Matches pointer types, but does not match Objective-C object pointer
/// types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int c = 5;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "int *a", but does not match "Foo *f".
extern const AstTypeMatcher<PointerType> pointerType;
/// Matches an Objective-C object pointer type, which is different from
/// a pointer type, despite being syntactically similar.
///
/// Given
/// \code
/// int *a;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "Foo *f", but does not match "int *a".
extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType;
/// Matches both lvalue and rvalue reference types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f.
extern const AstTypeMatcher<ReferenceType> referenceType;
/// Matches lvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is
/// matched since the type is deduced as int& by reference collapsing rules.
extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType;
/// Matches rvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not
/// matched as it is deduced to int& by reference collapsing rules.
extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType;
/// Narrows PointerType (and similar) matchers to those where the
/// \c pointee matches a given matcher.
///
/// Given
/// \code
/// int *a;
/// int const *b;
/// float const *f;
/// \endcode
/// pointerType(pointee(isConstQualified(), isInteger()))
/// matches "int const *b"
///
/// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>,
/// Matcher<PointerType>, Matcher<ReferenceType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(
pointee, getPointee,
AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType,
PointerType, ReferenceType));
/// Matches typedef types.
///
/// Given
/// \code
/// typedef int X;
/// \endcode
/// typedefType()
/// matches "typedef int X"
extern const AstTypeMatcher<TypedefType> typedefType;
/// Matches enum types.
///
/// Given
/// \code
/// enum C { Green };
/// enum class S { Red };
///
/// C c;
/// S s;
/// \endcode
//
/// \c enumType() matches the type of the variable declarations of both \c c and
/// \c s.
extern const AstTypeMatcher<EnumType> enumType;
/// Matches template specialization types.
///
/// Given
/// \code
/// template <typename T>
/// class C { };
///
/// template class C<int>; // A
/// C<char> var; // B
/// \endcode
///
/// \c templateSpecializationType() matches the type of the explicit
/// instantiation in \c A and the type of the variable declaration in \c B.
extern const AstTypeMatcher<TemplateSpecializationType>
templateSpecializationType;
/// Matches C++17 deduced template specialization types, e.g. deduced class
/// template types.
///
/// Given
/// \code
/// template <typename T>
/// class C { public: C(T); };
///
/// C c(123);
/// \endcode
/// \c deducedTemplateSpecializationType() matches the type in the declaration
/// of the variable \c c.
extern const AstTypeMatcher<DeducedTemplateSpecializationType>
deducedTemplateSpecializationType;
/// Matches types nodes representing unary type transformations.
///
/// Given:
/// \code
/// typedef __underlying_type(T) type;
/// \endcode
/// unaryTransformType()
/// matches "__underlying_type(T)"
extern const AstTypeMatcher<UnaryTransformType> unaryTransformType;
/// Matches record types (e.g. structs, classes).
///
/// Given
/// \code
/// class C {};
/// struct S {};
///
/// C c;
/// S s;
/// \endcode
///
/// \c recordType() matches the type of the variable declarations of both \c c
/// and \c s.
extern const AstTypeMatcher<RecordType> recordType;
/// Matches tag types (record and enum types).
///
/// Given
/// \code
/// enum E {};
/// class C {};
///
/// E e;
/// C c;
/// \endcode
///
/// \c tagType() matches the type of the variable declarations of both \c e
/// and \c c.
extern const AstTypeMatcher<TagType> tagType;
/// Matches types specified with an elaborated type keyword or with a
/// qualified name.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// class C {};
///
/// class C c;
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType() matches the type of the variable declarations of both
/// \c c and \c d.
extern const AstTypeMatcher<ElaboratedType> elaboratedType;
/// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier,
/// matches \c InnerMatcher if the qualifier exists.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N"))))
/// matches the type of the variable declaration of \c d.
AST_MATCHER_P(ElaboratedType, hasQualifier,
internal::Matcher<NestedNameSpecifier>, InnerMatcher) {
if (const NestedNameSpecifier *Qualifier = Node.getQualifier())
return InnerMatcher.matches(*Qualifier, Finder, Builder);
return false;
}
/// Matches ElaboratedTypes whose named type matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(namesType(recordType(
/// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable
/// declaration of \c d.
AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>,
InnerMatcher) {
return InnerMatcher.matches(Node.getNamedType(), Finder, Builder);
}
/// Matches types that represent the result of substituting a type for a
/// template type parameter.
///
/// Given
/// \code
/// template <typename T>
/// void F(T t) {
/// int i = 1 + t;
/// }
/// \endcode
///
/// \c substTemplateTypeParmType() matches the type of 't' but not '1'
extern const AstTypeMatcher<SubstTemplateTypeParmType>
substTemplateTypeParmType;
/// Matches template type parameter substitutions that have a replacement
/// type that matches the provided matcher.
///
/// Given
/// \code
/// template <typename T>
/// double F(T t);
/// int i;
/// double j = F(i);
/// \endcode
///
/// \c substTemplateTypeParmType(hasReplacementType(type())) matches int
AST_TYPE_TRAVERSE_MATCHER(
hasReplacementType, getReplacementType,
AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType));
/// Matches template type parameter types.
///
/// Example matches T, but not int.
/// (matcher = templateTypeParmType())
/// \code
/// template <typename T> void f(int i);
/// \endcode
extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType;
/// Matches injected class name types.
///
/// Example matches S s, but not S<T> s.
/// (matcher = parmVarDecl(hasType(injectedClassNameType())))
/// \code
/// template <typename T> struct S {
/// void f(S s);
/// void g(S<T> s);
/// };
/// \endcode
extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType;
/// Matches decayed type
/// Example matches i[] in declaration of f.
/// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType())))))
/// Example matches i[1].
/// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType())))))
/// \code
/// void f(int i[]) {
/// i[1] = 0;
/// }
/// \endcode
extern const AstTypeMatcher<DecayedType> decayedType;
/// Matches the decayed type, whos decayed type matches \c InnerMatcher
AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>,
InnerType) {
return InnerType.matches(Node.getDecayedType(), Finder, Builder);
}
/// Matches declarations whose declaration context, interpreted as a
/// Decl, matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// \endcode
///
/// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the
/// declaration of \c class \c D.
AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) {
const DeclContext *DC = Node.getDeclContext();
if (!DC) return false;
return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder);
}
/// Matches nested name specifiers.
///
/// Given
/// \code
/// namespace ns {
/// struct A { static void f(); };
/// void A::f() {}
/// void g() { A::f(); }
/// }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier()
/// matches "ns::" and both "A::"
extern const internal::VariadicAllOfMatcher<NestedNameSpecifier>
nestedNameSpecifier;
/// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc.
extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc>
nestedNameSpecifierLoc;
/// Matches \c NestedNameSpecifierLocs for which the given inner
/// NestedNameSpecifier-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(
internal::BindableMatcher<NestedNameSpecifierLoc>, loc,
internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) {
return internal::BindableMatcher<NestedNameSpecifierLoc>(
new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>(
InnerMatcher));
}
/// Matches nested name specifiers that specify a type matching the
/// given \c QualType matcher without qualifiers.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(specifiesType(
/// hasDeclaration(cxxRecordDecl(hasName("A")))
/// ))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifier, specifiesType,
internal::Matcher<QualType>, InnerMatcher) {
if (!Node.getAsType())
return false;
return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder);
}
/// Matches nested name specifier locs that specify a type matching the
/// given \c TypeLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type(
/// hasDeclaration(cxxRecordDecl(hasName("A")))))))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc,
internal::Matcher<TypeLoc>, InnerMatcher) {
return Node && Node.getNestedNameSpecifier()->getAsType() &&
InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifier.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix,
internal::Matcher<NestedNameSpecifier>, InnerMatcher,
0) {
const NestedNameSpecifier *NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(*NextNode, Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifierLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A")))))
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix,
internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher,
1) {
NestedNameSpecifierLoc NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(NextNode, Finder, Builder);
}
/// Matches nested name specifiers that specify a namespace matching the
/// given namespace matcher.
///
/// Given
/// \code
/// namespace ns { struct A {}; }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier(specifiesNamespace(hasName("ns")))
/// matches "ns::"
AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace,
internal::Matcher<NamespaceDecl>, InnerMatcher) {
if (!Node.getAsNamespace())
return false;
return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder);
}
/// Overloads for the \c equalsNode matcher.
/// FIXME: Implement for other node types.
/// @{
/// Matches if a node equals another node.
///
/// \c Decl has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Stmt has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Type has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) {
return &Node == Other;
}
/// @}
/// Matches each case or default statement belonging to the given switch
/// statement. This matcher may produce multiple matches.
///
/// Given
/// \code
/// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } }
/// \endcode
/// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s")
/// matches four times, with "c" binding each of "case 1:", "case 2:",
/// "case 3:" and "case 4:", and "s" respectively binding "switch (1)",
/// "switch (1)", "switch (2)" and "switch (2)".
AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>,
InnerMatcher) {
BoundNodesTreeBuilder Result;
// FIXME: getSwitchCaseList() does not necessarily guarantee a stable
// iteration order. We should use the more general iterating matchers once
// they are capable of expressing this matcher (for example, it should ignore
// case statements belonging to nested switch statements).
bool Matched = false;
for (const SwitchCase *SC = Node.getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase()) {
BoundNodesTreeBuilder CaseBuilder(*Builder);
bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder);
if (CaseMatched) {
Matched = true;
Result.addMatch(CaseBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches each constructor initializer in a constructor definition.
///
/// Given
/// \code
/// class A { A() : i(42), j(42) {} int i; int j; };
/// \endcode
/// cxxConstructorDecl(forEachConstructorInitializer(
/// forField(decl().bind("x"))
/// ))
/// will trigger two matches, binding for 'i' and 'j' respectively.
AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *I : Node.inits()) {
BoundNodesTreeBuilder InitBuilder(*Builder);
if (InnerMatcher.matches(*I, Finder, &InitBuilder)) {
Matched = true;
Result.addMatch(InitBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches constructor declarations that are copy constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3.
AST_MATCHER(CXXConstructorDecl, isCopyConstructor) {
return Node.isCopyConstructor();
}
/// Matches constructor declarations that are move constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2.
AST_MATCHER(CXXConstructorDecl, isMoveConstructor) {
return Node.isMoveConstructor();
}
/// Matches constructor declarations that are default constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3.
AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) {
return Node.isDefaultConstructor();
}
/// Matches constructors that delegate to another constructor.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(int) {} // #2
/// S(S &&) : S() {} // #3
/// };
/// S::S() : S(0) {} // #4
/// \endcode
/// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not
/// #1 or #2.
AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) {
return Node.isDelegatingConstructor();
}
/// Matches constructor, conversion function, and deduction guide declarations
/// that have an explicit specifier if this explicit specifier is resolved to
/// true.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9.
/// cxxConversionDecl(isExplicit()) will match #4, but not #3.
/// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5.
AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES(
CXXConstructorDecl, CXXConversionDecl,
CXXDeductionGuideDecl)) {
return Node.isExplicit();
}
/// Matches the expression in an explicit specifier if present in the given
/// declaration.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2.
/// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4.
/// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6.
AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>,
InnerMatcher) {
ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node);
if (!ES.getExpr())
return false;
return InnerMatcher.matches(*ES.getExpr(), Finder, Builder);
}
/// Matches function and namespace declarations that are marked with
/// the inline keyword.
///
/// Given
/// \code
/// inline void f();
/// void g();
/// namespace n {
/// inline namespace m {}
/// }
/// \endcode
/// functionDecl(isInline()) will match ::f().
/// namespaceDecl(isInline()) will match n::m.
AST_POLYMORPHIC_MATCHER(isInline,
AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl,
FunctionDecl)) {
// This is required because the spelling of the function used to determine
// whether inline is specified or not differs between the polymorphic types.
if (const auto *FD = dyn_cast<FunctionDecl>(&Node))
return FD->isInlineSpecified();
else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node))
return NSD->isInline();
llvm_unreachable("Not a valid polymorphic type");
}
/// Matches anonymous namespace declarations.
///
/// Given
/// \code
/// namespace n {
/// namespace {} // #1
/// }
/// \endcode
/// namespaceDecl(isAnonymous()) will match #1 but not ::n.
AST_MATCHER(NamespaceDecl, isAnonymous) {
return Node.isAnonymousNamespace();
}
/// Matches declarations in the namespace `std`, but not in nested namespaces.
///
/// Given
/// \code
/// class vector {};
/// namespace foo {
/// class vector {};
/// namespace std {
/// class vector {};
/// }
/// }
/// namespace std {
/// inline namespace __1 {
/// class vector {}; // #1
/// namespace experimental {
/// class vector {};
/// }
/// }
/// }
/// \endcode
/// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1.
AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); }
/// If the given case statement does not use the GNU case range
/// extension, matches the constant given in the statement.
///
/// Given
/// \code
/// switch (1) { case 1: case 1+1: case 3 ... 4: ; }
/// \endcode
/// caseStmt(hasCaseConstant(integerLiteral()))
/// matches "case 1:"
AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>,
InnerMatcher) {
if (Node.getRHS())
return false;
return InnerMatcher.matches(*Node.getLHS(), Finder, Builder);
}
/// Matches declaration that has a given attribute.
///
/// Given
/// \code
/// __attribute__((device)) void f() { ... }
/// \endcode
/// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of
/// f. If the matcher is used from clang-query, attr::Kind parameter should be
/// passed as a quoted string. e.g., hasAttr("attr::CUDADevice").
AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) {
for (const auto *Attr : Node.attrs()) {
if (Attr->getKind() == AttrKind)
return true;
}
return false;
}
/// Matches the return value expression of a return statement
///
/// Given
/// \code
/// return a + b;
/// \endcode
/// hasReturnValue(binaryOperator())
/// matches 'return a + b'
/// with binaryOperator()
/// matching 'a + b'
AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>,
InnerMatcher) {
if (const auto *RetValue = Node.getRetValue())
return InnerMatcher.matches(*RetValue, Finder, Builder);
return false;
}
/// Matches CUDA kernel call expression.
///
/// Example matches,
/// \code
/// kernel<<<i,j>>>();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr>
cudaKernelCallExpr;
/// Matches expressions that resolve to a null pointer constant, such as
/// GNU's __null, C++11's nullptr, or C's NULL macro.
///
/// Given:
/// \code
/// void *v1 = NULL;
/// void *v2 = nullptr;
/// void *v3 = __null; // GNU extension
/// char *cp = (char *)0;
/// int *ip = 0;
/// int i = 0;
/// \endcode
/// expr(nullPointerConstant())
/// matches the initializer for v1, v2, v3, cp, and ip. Does not match the
/// initializer for i.
AST_MATCHER(Expr, nullPointerConstant) {
return Node.isNullPointerConstant(Finder->getASTContext(),
Expr::NPC_ValueDependentIsNull);
}
/// Matches declaration of the function the statement belongs to
///
/// Given:
/// \code
/// F& operator=(const F& o) {
/// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; });
/// return *this;
/// }
/// \endcode
/// returnStmt(forFunction(hasName("operator=")))
/// matches 'return *this'
/// but does not match 'return v > 0'
AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>,
InnerMatcher) {
const auto &Parents = Finder->getASTContext().getParents(Node);
llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end());
while(!Stack.empty()) {
const auto &CurNode = Stack.back();
Stack.pop_back();
if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) {
if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) {
return true;
}
} else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) {
if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(),
Finder, Builder)) {
return true;
}
} else {
for(const auto &Parent: Finder->getASTContext().getParents(CurNode))
Stack.push_back(Parent);
}
}
return false;
}
/// Matches a declaration that has external formal linkage.
///
/// Example matches only z (matcher = varDecl(hasExternalFormalLinkage()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
///
/// Example matches f() because it has external formal linkage despite being
/// unique to the translation unit as though it has internal likage
/// (matcher = functionDecl(hasExternalFormalLinkage()))
///
/// \code
/// namespace {
/// void f() {}
/// }
/// \endcode
AST_MATCHER(NamedDecl, hasExternalFormalLinkage) {
return Node.hasExternalFormalLinkage();
}
/// Matches a declaration that has default arguments.
///
/// Example matches y (matcher = parmVarDecl(hasDefaultArgument()))
/// \code
/// void x(int val) {}
/// void y(int val = 0) {}
/// \endcode
///
/// Deprecated. Use hasInitializer() instead to be able to
/// match on the contents of the default argument. For example:
///
/// \code
/// void x(int val = 7) {}
/// void y(int val = 42) {}
/// \endcode
/// parmVarDecl(hasInitializer(integerLiteral(equals(42))))
/// matches the parameter of y
///
/// A matcher such as
/// parmVarDecl(hasInitializer(anything()))
/// is equivalent to parmVarDecl(hasDefaultArgument()).
AST_MATCHER(ParmVarDecl, hasDefaultArgument) {
return Node.hasDefaultArg();
}
/// Matches array new expressions.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(isArray())
/// matches the expression 'new MyClass[10]'.
AST_MATCHER(CXXNewExpr, isArray) {
return Node.isArray();
}
/// Matches placement new expression arguments.
///
/// Given:
/// \code
/// MyClass *p1 = new (Storage, 16) MyClass();
/// \endcode
/// cxxNewExpr(hasPlacementArg(1, integerLiteral(equals(16))))
/// matches the expression 'new (Storage, 16) MyClass()'.
AST_MATCHER_P2(CXXNewExpr, hasPlacementArg, unsigned, Index,
internal::Matcher<Expr>, InnerMatcher) {
return Node.getNumPlacementArgs() > Index &&
InnerMatcher.matches(*Node.getPlacementArg(Index), Finder, Builder);
}
/// Matches any placement new expression arguments.
///
/// Given:
/// \code
/// MyClass *p1 = new (Storage) MyClass();
/// \endcode
/// cxxNewExpr(hasAnyPlacementArg(anything()))
/// matches the expression 'new (Storage, 16) MyClass()'.
AST_MATCHER_P(CXXNewExpr, hasAnyPlacementArg, internal::Matcher<Expr>,
InnerMatcher) {
return llvm::any_of(Node.placement_arguments(), [&](const Expr *Arg) {
return InnerMatcher.matches(*Arg, Finder, Builder);
});
}
/// Matches array new expressions with a given array size.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(hasArraySize(integerLiteral(equals(10))))
/// matches the expression 'new MyClass[10]'.
AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) {
return Node.isArray() && *Node.getArraySize() &&
InnerMatcher.matches(**Node.getArraySize(), Finder, Builder);
}
/// Matches a class declaration that is defined.
///
/// Example matches x (matcher = cxxRecordDecl(hasDefinition()))
/// \code
/// class x {};
/// class y;
/// \endcode
AST_MATCHER(CXXRecordDecl, hasDefinition) {
return Node.hasDefinition();
}
/// Matches C++11 scoped enum declaration.
///
/// Example matches Y (matcher = enumDecl(isScoped()))
/// \code
/// enum X {};
/// enum class Y {};
/// \endcode
AST_MATCHER(EnumDecl, isScoped) {
return Node.isScoped();
}
/// Matches a function declared with a trailing return type.
///
/// Example matches Y (matcher = functionDecl(hasTrailingReturn()))
/// \code
/// int X() {}
/// auto Y() -> int {}
/// \endcode
AST_MATCHER(FunctionDecl, hasTrailingReturn) {
if (const auto *F = Node.getType()->getAs<FunctionProtoType>())
return F->hasTrailingReturn();
return false;
}
/// Matches expressions that match InnerMatcher that are possibly wrapped in an
/// elidable constructor and other corresponding bookkeeping nodes.
///
/// In C++17, elidable copy constructors are no longer being generated in the
/// AST as it is not permitted by the standard. They are, however, part of the
/// AST in C++14 and earlier. So, a matcher must abstract over these differences
/// to work in all language modes. This matcher skips elidable constructor-call
/// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and
/// various implicit nodes inside the constructor calls, all of which will not
/// appear in the C++17 AST.
///
/// Given
///
/// \code
/// struct H {};
/// H G();
/// void f() {
/// H D = G();
/// }
/// \endcode
///
/// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))``
/// matches ``H D = G()`` in C++11 through C++17 (and beyond).
AST_MATCHER_P(Expr, ignoringElidableConstructorCall,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
// E tracks the node that we are examining.
const Expr *E = &Node;
// If present, remove an outer `ExprWithCleanups` corresponding to the
// underlying `CXXConstructExpr`. This check won't cover all cases of added
// `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the
// EWC is placed on the outermost node of the expression, which this may not
// be), but, it still improves the coverage of this matcher.
if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node))
E = CleanupsExpr->getSubExpr();
if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) {
if (CtorExpr->isElidable()) {
if (const auto *MaterializeTemp =
dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) {
return InnerMatcher.matches(*MaterializeTemp->getSubExpr(), Finder,
Builder);
}
}
}
return InnerMatcher.matches(Node, Finder, Builder);
}
//----------------------------------------------------------------------------//
// OpenMP handling.
//----------------------------------------------------------------------------//
/// Matches any ``#pragma omp`` executable directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective()`` matches ``omp parallel``,
/// ``omp parallel default(none)`` and ``omp taskyield``.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective>
ompExecutableDirective;
/// Matches standalone OpenMP directives,
/// i.e., directives that can't have a structured block.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// {}
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective(isStandaloneDirective()))`` matches
/// ``omp taskyield``.
AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) {
return Node.isStandaloneDirective();
}
/// Matches the structured-block of the OpenMP executable directive
///
/// Prerequisite: the executable directive must not be standalone directive.
/// If it is, it will never match.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// ;
/// #pragma omp parallel
/// {}
/// \endcode
///
/// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;``
AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock,
internal::Matcher<Stmt>, InnerMatcher) {
if (Node.isStandaloneDirective())
return false; // Standalone directives have no structured blocks.
return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder);
}
/// Matches any clause in an OpenMP directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// \endcode
///
/// ``ompExecutableDirective(hasAnyClause(anything()))`` matches
/// ``omp parallel default(none)``.
AST_MATCHER_P(OMPExecutableDirective, hasAnyClause,
internal::Matcher<OMPClause>, InnerMatcher) {
ArrayRef<OMPClause *> Clauses = Node.clauses();
return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(),
Clauses.end(), Finder, Builder);
}
/// Matches OpenMP ``default`` clause.
///
/// Given
///
/// \code
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel
/// \endcode
///
/// ``ompDefaultClause()`` matches ``default(none)`` and ``default(shared)``.
extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause>
ompDefaultClause;
/// Matches if the OpenMP ``default`` clause has ``none`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// \endcode
///
/// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``.
AST_MATCHER(OMPDefaultClause, isNoneKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_none;
}
/// Matches if the OpenMP ``default`` clause has ``shared`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// \endcode
///
/// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``.
AST_MATCHER(OMPDefaultClause, isSharedKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_shared;
}
/// Matches if the OpenMP directive is allowed to contain the specified OpenMP
/// clause kind.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel for
/// #pragma omp for
/// \endcode
///
/// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches
/// ``omp parallel`` and ``omp parallel for``.
///
/// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter
/// should be passed as a quoted string. e.g.,
/// ``isAllowedToContainClauseKind("OMPC_default").``
AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind,
OpenMPClauseKind, CKind) {
return llvm::omp::isAllowedClauseForDirective(
Node.getDirectiveKind(), CKind,
Finder->getASTContext().getLangOpts().OpenMP);
}
//----------------------------------------------------------------------------//
// End OpenMP handling.
//----------------------------------------------------------------------------//
} // namespace ast_matchers
} // namespace clang
#endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
|
raytrace.c |
#include "raytrace.h"
int *numpix;
int *pixstart;
int *indices;
float *lengths;
int *numrays;
int *raystart;
int *rays;
float *clens;
void
coverage(
const float ozmin, const float oxmin, const float oymin,
const float zsize, const float xsize, const float ysize,
const int oz, const int ox, const int oy, const int ot,
const float *theta,
const float *h,
const float *v,
const float *line_weights,
const int dsize,
float *coverage_map)
{
get_intersections_and_lengths(
dsize, theta, h,
ox, oxmin, xsize,
oy, oymin, ysize);
back_project(ox*oy, numrays, raystart, rays, lengths,
coverage_map, line_weights);
}
void get_intersections_and_lengths(const int numprob, const float *theta, const float *rho,
const int nx, const float oxmin, const float xsize,
const int ny, const float oymin, const float ysize){
//PREPROCESSING FIRST PASS
int *numpix = malloc(sizeof *numpix * numprob);
int *pixstart = malloc(sizeof *pixstart * numprob);
float domain[4] = {oxmin, oxmin+xsize, oymin, oymin+ysize};
float res = xsize / nx;
#pragma omp parallel for
for(int k = 0; k < numprob; k++){ //FOR EACH RAY
numpix[k] = 0;
findnumpix(theta[k], rho[k], &numpix[k], &domain[0], res, nx, ny);
}
pixstart[0] = 0;
for(int k = 1; k < numprob; k++)pixstart[k] = pixstart[k-1]+numpix[k-1];
int totpix = pixstart[numprob-1]+numpix[numprob-1];
printf("totpx %d\n",totpix);
//PREPROCESSING SECOND PASS
int *indices = malloc(sizeof *indices * totpix);
float *lengths = malloc(sizeof *lengths * totpix);
#pragma omp parallel for
for(int k = 0; k < numprob; k++){ //FOR EACH RAY
numpix[k] = 0;
placepixels(theta[k], rho[k], &indices[pixstart[k]],
&lengths[pixstart[k]],
&numpix[k], &domain[0], res, nx, ny);
}
//TRANSPOSE DATA STRUCTURES FIRST PASS
int numunk = nx*ny;
int *numrays = malloc(sizeof *numrays * numunk);
int *raystart = malloc(sizeof *raystart * numunk);
#pragma omp parallel for
for(int n = 0; n < numunk; n++)numrays[n] = 0;
for(int k = 0; k < numprob; k++)
for(int m = 0; m < numpix[k]; m++)
numrays[indices[pixstart[k]+m]]++;
raystart[0] = 0;
for(int n = 1; n < numunk; n++)raystart[n] = raystart[n-1]+numrays[n-1];
int totrays = raystart[numunk-1]+numrays[numunk-1];
//TRANSPOSE DATA STRUCTURES SECOND PASS
int *rays = malloc(sizeof *rays * totrays);
float *clens = malloc(sizeof *clens * totrays);
#pragma omp parallel for
for(int n = 0; n < numunk; n++)numrays[n] = 0;
for(int k = 0; k < numprob; k++)
for(int m = 0; m < numpix[k]; m++){
int n = indices[pixstart[k]+m];
rays[raystart[n]+numrays[n]] = k;
clens[raystart[n]+numrays[n]] = lengths[pixstart[k]+m];
numrays[n]++;
}
}
void forward_project(
const int numprob,
const int * const numpix, const int * const pixstart,
const int * const indices, const float * const lengths,
const float * const object, float * const sinogram){
#pragma omp parallel for
for(int k = 0; k < numprob; k++){
int m = pixstart[k];
for(int t = 0; t < numpix[k]; t++)
sinogram[k] = sinogram[k] + object[indices[m+t]]*lengths[m+t];
}
}
void back_project(
const int numunk,
const int * const numrays, const int * const raystart,
const int * const rays, const float * const clens,
float * const object, const float * const sinogram){
#pragma omp parallel for
for(int n = 0; n < numunk; n++){
int k = raystart[n];
for(int t = 0; t < numrays[n]; t++)
object[n] = object[n] + sinogram[rays[k+t]]*clens[k+t];
}
}
//domain is the [left, right, bottom, top] boundaries of the region
//res is the width of a pixel assumed to be cubic
void findnumpix(float theta, float rho, int *numpix, float *domain, float res,
int nx, int ny){
// int numproc;
// int myid;
// MPI_Comm_size(MPI_COMM_WORLD,&numproc);
// MPI_Comm_rank(MPI_COMM_WORLD,&myid);
float raylength = 1e6;
//RAY'S VECTOR REPRESENTAION
float x = rho*cosf(theta)+0.5*raylength*sinf(theta);
float y = rho*sinf(theta)-0.5*raylength*cosf(theta);
float dx = -raylength*sinf(theta);
float dy = +raylength*cosf(theta);
//TOP LEVEL
// Check if this ray passes on my rank's output domain region
float p[4] = {-dx,dx,-dy,dy};
float q[4] = {x-domain[0],domain[1]-x,y-domain[2],domain[3]-y};
float u1 = -1*INFINITY;
float u2 = INFINITY;
bool pass = true;
int inid = 0;
for(int k = 0; k < 4; k++)
if(p[k] == 0){
if(q[k] < 0){
pass = false;
break;
}
}else{
float t = q[k]/p[k];
if(p[k] < 0 && u1 < t){
u1 = t;
inid = k;
}
else if(p[k] > 0 && u2 > t)
u2 = t;
}
if(u1 > u2 || u1 > 1 || u1 < 0) pass = false;
//IF RAY COLLIDES WITH DOMAIN
if(pass){
//FIND THE INITIAL PIXEL
int init = 0;
int initx = 0;
int inity = 0;
if(inid == 0){ /// left
initx = 0;
inity = (int)((y+u1*dy-domain[2])/res);
}
if(inid == 1){ /// right
initx = nx-1;
inity = (int)((y+u1*dy-domain[2])/res);
}
if(inid == 2){ /// bottom
initx = (int)((x+u1*dx-domain[0])/res);
inity = 0;
}
if(inid == 3){ /// top
initx = (int)((x+u1*dx-domain[0])/res);
inity = ny - 1;
}
float px = domain[0] + initx*res+res/2;
float py = domain[2] + inity*res+res/2;
//TRACE RAY WHILE IT IS IN THE DOMAIN
while(px > domain[0] && px < domain[1] && py < domain[3] && py > domain[2]){
int exid = 0;
q[0] = x-(px-res/2);
q[1] = (px+res/2)-x;
q[2] = y-(py-res/2);
q[3] = (py+res/2)-y;
u1 = -1*INFINITY;
u2 = INFINITY;
for(int k = 0; k < 4; k++){
float t = q[k]/p[k];
if(p[k] < 0 && u1 < t)
u1 = t;
else if(p[k] > 0 && u2 > t){
u2 = t;
exid = k;
}
} /// u2-u1 = ray length in first/current pixel (px,py)
/// exid shows which side this ray exits
//ADD CONTRIBUTION FROM CURRENT PIXEL
//int z = unkmap[inity*len+initx];
//int z = encode(initx,inity);
//int z = inity*nx+initx;
*numpix = *numpix + 1;
//FIND NEXT PIXEL
if(exid == 0){
initx = initx-1;
px = px - res;
}
if(exid == 1){
initx = initx+1;
px = px + res;
}
if(exid == 2){
inity = inity-1;
py = py - res;
}
if(exid == 3){
inity = inity+1;
py = py + res;
}
}/// Done with the tracing for this ray/measurement */
} /// Does this ray pass or not
}
void placepixels(float theta, float rho, int *indices, float *weights,
int *numpix, float *domain, float res,
int nx, int ny){
// int numproc;
// int myid;
// MPI_Comm_size(MPI_COMM_WORLD,&numproc);
// MPI_Comm_rank(MPI_COMM_WORLD,&myid);
float raylength = 1e6;
//RAY'S VECTOR REPRESENTAION
float x = rho*cosf(theta)+0.5*raylength*sinf(theta);
float y = rho*sinf(theta)-0.5*raylength*cosf(theta);
float dx = -raylength*sinf(theta);
float dy = +raylength*cosf(theta);
//TOP LEVEL
// Check if this ray passes on my rank's output domain region
float p[4] = {-dx,dx,-dy,dy};
float q[4] = {x-domain[0],domain[1]-x,y-domain[2],domain[3]-y};
float u1 = -1*INFINITY;
float u2 = INFINITY;
bool pass = true;
int inid = 0;
for(int k = 0; k < 4; k++)
if(p[k] == 0){
if(q[k] < 0){
pass = false;
break;
}
}else{
float t = q[k]/p[k];
if(p[k] < 0 && u1 < t){
u1 = t;
inid = k;
}
else if(p[k] > 0 && u2 > t)
u2 = t;
}
if(u1 > u2 || u1 > 1 || u1 < 0) pass = false;
//IF RAY COLLIDES WITH DOMAIN
if(pass){
//FIND THE INITIAL PIXEL
int init = 0;
int initx = 0;
int inity = 0;
if(inid == 0){ /// left
initx = 0;
inity = (int)((y+u1*dy-domain[2])/res);
}
if(inid == 1){ /// right
initx = nx-1;
inity = (int)((y+u1*dy-domain[2])/res);
}
if(inid == 2){ /// bottom
initx = (int)((x+u1*dx-domain[0])/res);
inity = 0;
}
if(inid == 3){ /// top
initx = (int)((x+u1*dx-domain[0])/res);
inity = ny - 1;
}
float px = domain[0] + initx*res+res/2;
float py = domain[2] + inity*res+res/2;
//TRACE RAY WHILE IT IS IN THE DOMAIN
while(px > domain[0] && px < domain[1] && py < domain[3] && py > domain[2]){
int exid = 0;
q[0] = x-(px-res/2);
q[1] = (px+res/2)-x;
q[2] = y-(py-res/2);
q[3] = (py+res/2)-y;
u1 = -1*INFINITY;
u2 = INFINITY;
for(int k = 0; k < 4; k++){
float t = q[k]/p[k];
if(p[k] < 0 && u1 < t)
u1 = t;
else if(p[k] > 0 && u2 > t){
u2 = t;
exid = k;
}
} /// u2-u1 = ray length in first/current pixel (px,py)
/// exid shows which side this ray exits
//ADD CONTRIBUTION FROM CURRENT PIXEL
//int z = unkmap[inity*len+initx];
//int z = encode(initx,inity);
int z = inity*nx+initx;
indices[*numpix] = z;
weights[*numpix] = (u2-u1)*raylength;
*numpix = *numpix + 1;
//FIND NEXT PIXEL
if(exid == 0){
initx = initx-1;
px = px - res;
}
if(exid == 1){
initx = initx+1;
px = px + res;
}
if(exid == 2){
inity = inity-1;
py = py - res;
}
if(exid == 3){
inity = inity+1;
py = py + res;
}
}/// Done with the tracing for this ray/measurement */
} /// Does this ray pass or not
}
|
detector.c | #include "darknet.h"
#include <unistd.h> ////0
#include <dirent.h> ////0
#include <stdlib.h> ////0
#include <sys/stat.h> ////0
static int coco_ids[] = {1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,67,70,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,90};
void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear)
{
list *options = read_data_cfg(datacfg);
char *train_images = option_find_str(options, "train", "data/train.list");
char *backup_directory = option_find_str(options, "backup", "/backup/");
srand(time(0));
char *base = basecfg(cfgfile);
printf("%s\n", base);
float avg_loss = -1;
network **nets = calloc(ngpus, sizeof(network));
srand(time(0));
int seed = rand();
int i;
for(i = 0; i < ngpus; ++i){
srand(seed);
#ifdef GPU
cuda_set_device(gpus[i]);
#endif
nets[i] = load_network(cfgfile, weightfile, clear);
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
network *net = nets[0];
int imgs = net->batch * net->subdivisions * ngpus;
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
data train, buffer;
layer l = net->layers[net->n - 1];
int classes = l.classes;
float jitter = l.jitter;
list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **)list_to_array(plist);
load_args args = get_base_args(net);
args.coords = l.coords;
args.paths = paths;
args.n = imgs;
args.m = plist->size;
args.classes = classes;
args.jitter = jitter;
args.num_boxes = l.max_boxes;
args.d = &buffer;
args.type = DETECTION_DATA;
//args.type = INSTANCE_DATA;
args.threads = 64;
pthread_t load_thread = load_data(args);
double time;
int count = 0;
//while(i*imgs < N*120){
while(get_current_batch(net) < net->max_batches){
if(l.random && count++%10 == 0){
printf("Resizing\n");
int dim = (rand() % 10 + 10) * 32;
if (get_current_batch(net)+200 > net->max_batches) dim = 608;
//int dim = (rand() % 4 + 16) * 32;
printf("%d\n", dim);
args.w = dim;
args.h = dim;
pthread_join(load_thread, 0);
train = buffer;
free_data(train);
load_thread = load_data(args);
#pragma omp parallel for
for(i = 0; i < ngpus; ++i){
resize_network(nets[i], dim, dim);
}
net = nets[0];
}
time=what_time_is_it_now();
pthread_join(load_thread, 0);
train = buffer;
load_thread = load_data(args);
/*
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[10] + 1 + k*5);
if(!b.x) break;
printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h);
}
*/
/*
int zz;
for(zz = 0; zz < train.X.cols; ++zz){
image im = float_to_image(net->w, net->h, 3, train.X.vals[zz]);
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[zz] + k*5, 1);
printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
draw_bbox(im, b, 1, 1,0,0);
}
show_image(im, "truth11");
cvWaitKey(0);
save_image(im, "truth11");
}
*/
printf("Loaded: %lf seconds\n", what_time_is_it_now()-time);
time=what_time_is_it_now();
float loss = 0;
#ifdef GPU
if(ngpus == 1){
loss = train_network(net, train);
} else {
loss = train_networks(nets, ngpus, train, 4);
}
#else
loss = train_network(net, train);
#endif
if (avg_loss < 0) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
i = get_current_batch(net);
printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, i*imgs);
if(i%100==0){
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s.backup", backup_directory, base);
save_weights(net, buff);
}
if(i%10000==0 || (i < 1000 && i%100 == 0)){
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
save_weights(net, buff);
}
free_data(train);
}
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_final.weights", backup_directory, base);
save_weights(net, buff);
}
static int get_coco_image_id(char *filename)
{
char *p = strrchr(filename, '/');
char *c = strrchr(filename, '_');
if(c) p = c;
return atoi(p+1);
}
static void print_cocos(FILE *fp, char *image_path, detection *dets, int num_boxes, int classes, int w, int h)
{
int i, j;
int image_id = get_coco_image_id(image_path);
for(i = 0; i < num_boxes; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
float bx = xmin;
float by = ymin;
float bw = xmax - xmin;
float bh = ymax - ymin;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fp, "{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f},\n", image_id, coco_ids[j], bx, by, bw, bh, dets[i].prob[j]);
}
}
}
void print_detector_detections(FILE **fps, char *id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2. + 1;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2. + 1;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2. + 1;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2. + 1;
if (xmin < 1) xmin = 1;
if (ymin < 1) ymin = 1;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fps[j], "%s %f %f %f %f %f\n", id, dets[i].prob[j],
xmin, ymin, xmax, ymax);
}
}
}
void print_imagenet_detections(FILE *fp, int id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
int class = j;
if (dets[i].prob[class]) fprintf(fp, "%d %d %f %f %f %f %f\n", id, j+1, dets[i].prob[class],
xmin, ymin, xmax, ymax);
}
}
}
void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 2);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
image input = make_image(net->w, net->h, net->c*2);
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data, 1);
flip_image(val_resized[t]);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data + net->w*net->h*net->c, 1);
network_predict(net, input.data);
int w = val[t].w;
int h = val[t].h;
int num = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &num);
if (nms) do_nms_sort(dets, num, classes, nms);
if (coco){
print_cocos(fp, path, dets, num, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, num, classes, w, h);
} else {
print_detector_detections(fps, id, dets, num, classes, w, h);
}
free_detections(dets, num);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
float *X = val_resized[t].data;
network_predict(net, X);
int w = val[t].w;
int h = val[t].h;
int nboxes = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &nboxes);
if (nms) do_nms_sort(dets, nboxes, classes, nms);
if (coco){
print_cocos(fp, path, dets, nboxes, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, nboxes, classes, w, h);
} else {
print_detector_detections(fps, id, dets, nboxes, classes, w, h);
}
free_detections(dets, nboxes);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector_recall(char *cfgfile, char *weightfile)
{
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths("data/coco_val_5k.list");
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int j, k;
int m = plist->size;
int i=0;
float thresh = .001;
float iou_thresh = .5;
float nms = .4;
int total = 0;
int correct = 0;
int proposals = 0;
float avg_iou = 0;
for(i = 0; i < m; ++i){
char *path = paths[i];
image orig = load_image_color(path, 0, 0);
image sized = resize_image(orig, net->w, net->h);
char *id = basecfg(path);
network_predict(net, sized.data);
int nboxes = 0;
detection *dets = get_network_boxes(net, sized.w, sized.h, thresh, .5, 0, 1, &nboxes);
if (nms) do_nms_obj(dets, nboxes, 1, nms);
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int num_labels = 0;
box_label *truth = read_boxes(labelpath, &num_labels);
for(k = 0; k < nboxes; ++k){
if(dets[k].objectness > thresh){
++proposals;
}
}
for (j = 0; j < num_labels; ++j) {
++total;
box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h};
float best_iou = 0;
for(k = 0; k < l.w*l.h*l.n; ++k){
float iou = box_iou(dets[k].bbox, t);
if(dets[k].objectness > thresh && iou > best_iou){
best_iou = iou;
}
}
avg_iou += best_iou;
if(best_iou > iou_thresh){
++correct;
}
}
fprintf(stderr, "%5d %5d %5d\tRPs/Img: %.2f\tIOU: %.2f%%\tRecall:%.2f%%\n", i, correct, total, (float)proposals/(i+1), avg_iou*100/total, 100.*correct/total);
free(id);
free_image(orig);
free_image(sized);
}
}
void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen, char *idir, char *odir) ////0
//void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen) //1
{
list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
image **alphabet = load_alphabet();
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
double time;
char buff[256];
char *input = buff;
float nms=.45;
while(1){
if(filename){
strncpy(input, filename, 256);
} else {
/*printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if(!input) return;
strtok(input, "\n");*/ //1
/////////////0
if(!idir || !odir) //原版测试多张图片
{
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if(!input)
return;
strtok(input, "\n");
}
else
{
//带indir 和 odir参数
//idir && odir
char imagepath[512];
char savedir[512];
struct dirent *imagename; //readdir return
DIR *dir; /////////
dir = opendir(idir);
//遍历输入文件夹
while((imagename=readdir(dir))!= NULL)
{
//忽略 ./ ../目录
if(!strcmp(imagename->d_name,".")||!strcmp(imagename->d_name,".."))
continue;
sprintf(imagepath,"%s%s",idir,imagename->d_name);
image im = load_image_color(imagepath, 0, 0);
image sized = letterbox_image(im, net->w, net->h);
layer l = net->layers[net->n-1];
float *X = sized.data;
time=what_time_is_it_now();
network_predict(net, X);
printf("%s: Predicted in %f seconds.\n", imagepath, what_time_is_it_now()-time);
int nboxes = 0;
detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
if (nms)
do_nms_sort(dets, nboxes, l.classes, nms);
//****modified0612******//
//draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes);
draw_detections_person(imagename->d_name, odir, im, dets, nboxes, thresh, names, alphabet, l.classes);
free_detections(dets, nboxes);
char imagesdir[512];
sprintf(imagesdir,"%s%s",odir,"images/");
sprintf(savedir,"%s%s",imagesdir,imagename->d_name);
//strcat(odir, imagename->d_name);
int k = 0;
for (k = strlen(savedir)-1; k>=0; k--)
{
if((savedir[k]!='j')&&(savedir[k]!='p')&&(savedir[k]!='g')&&(savedir[k]!='.'))
{
break;
}
else
{
savedir[k] = '\0';
}
}
save_image(im, savedir);
printf("image saved success!\n");
free_image(im);
free_image(sized);
}
closedir(dir);
break;
}
}
image im = load_image_color(input,0,0);
image sized = letterbox_image(im, net->w, net->h);
//image sized = resize_image(im, net->w, net->h);
//image sized2 = resize_max(im, net->w);
//image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h);
//resize_network(net, sized.w, sized.h);
layer l = net->layers[net->n-1];
float *X = sized.data;
time=what_time_is_it_now();
network_predict(net, X);
printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now()-time);
int nboxes = 0;
detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
//printf("%d\n", nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
//draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes); //1
draw_detections_person(input, odir, im, dets, nboxes, thresh, names, alphabet, l.classes); ////0
free_detections(dets, nboxes);
if(outfile){
save_image(im, outfile);
}
else{
save_image(im, "predictions");
#ifdef OPENCV
make_window("predictions", 512, 512, 0);
show_image(im, "predictions", 0);
#endif
}
free_image(im);
free_image(sized);
if (filename) break;
}
}
/*
void censor_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
float nms = .45;
while(1){
image in = get_image_from_stream(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
float *X = in_s.data;
network_predict(net, X);
int nboxes = 0;
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 0, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int left = b.x-b.w/2.;
int top = b.y-b.h/2.;
censor_image(in, left, top, b.w, b.h);
}
}
show_image(in, base);
cvWaitKey(10);
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream(cap);
free_image(in);
}
}
#endif
}
void extract_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
int count = 0;
float nms = .45;
while(1){
image in = get_image_from_stream(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
show_image(in, base);
int nboxes = 0;
float *X = in_s.data;
network_predict(net, X);
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 1, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int size = b.w*in.w > b.h*in.h ? b.w*in.w : b.h*in.h;
int dx = b.x*in.w-size/2.;
int dy = b.y*in.h-size/2.;
image bim = crop_image(in, dx, dy, size, size);
char buff[2048];
sprintf(buff, "results/extract/%07d", count);
++count;
save_image(bim, buff);
free_image(bim);
}
}
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream(cap);
free_image(in);
}
}
#endif
}
*/
/*
void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets)
{
network_predict_image(net, im);
layer l = net->layers[net->n-1];
int nboxes = num_boxes(net);
fill_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 0, dets);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
}
*/
void run_detector(int argc, char **argv)
{
char *prefix = find_char_arg(argc, argv, "-prefix", 0);
float thresh = find_float_arg(argc, argv, "-thresh", .5);
float hier_thresh = find_float_arg(argc, argv, "-hier", .5);
int cam_index = find_int_arg(argc, argv, "-c", 0);
int frame_skip = find_int_arg(argc, argv, "-s", 0);
int avg = find_int_arg(argc, argv, "-avg", 3);
if(argc < 4){
fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
return;
}
char *gpu_list = find_char_arg(argc, argv, "-gpus", 0);
char *outfile = find_char_arg(argc, argv, "-out", 0);
int *gpus = 0;
int gpu = 0;
int ngpus = 0;
if(gpu_list){
printf("%s\n", gpu_list);
int len = strlen(gpu_list);
ngpus = 1;
int i;
for(i = 0; i < len; ++i){
if (gpu_list[i] == ',') ++ngpus;
}
gpus = calloc(ngpus, sizeof(int));
for(i = 0; i < ngpus; ++i){
gpus[i] = atoi(gpu_list);
gpu_list = strchr(gpu_list, ',')+1;
}
} else {
gpu = gpu_index;
gpus = &gpu;
ngpus = 1;
}
int clear = find_arg(argc, argv, "-clear");
int fullscreen = find_arg(argc, argv, "-fullscreen");
int width = find_int_arg(argc, argv, "-w", 0);
int height = find_int_arg(argc, argv, "-h", 0);
int fps = find_int_arg(argc, argv, "-fps", 0);
//int class = find_int_arg(argc, argv, "-class", 0);
char *datacfg = argv[3];
char *cfg = argv[4];
char *weights = (argc > 5) ? argv[5] : 0;
//char *filename = (argc > 6) ? argv[6]: 0; //1
char *filename = find_char_arg(argc, argv, "-input", 0); ////0
char *idir = find_char_arg(argc, argv, "-idir", 0); ////0
char *odir = find_char_arg(argc, argv, "-odir", 0); ////0
if(0==strcmp(argv[2], "test"))
//test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh, outfile, fullscreen); //1
test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh, outfile, fullscreen, idir, odir); ////0
else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear);
else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "valid2")) validate_detector_flip(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "recall")) validate_detector_recall(cfg, weights);
else if(0==strcmp(argv[2], "demo")) {
list *options = read_data_cfg(datacfg);
int classes = option_find_int(options, "classes", 20);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
demo(cfg, weights, thresh, cam_index, filename, names, classes, frame_skip, prefix, avg, hier_thresh, width, height, fps, fullscreen);
}
//else if(0==strcmp(argv[2], "extract")) extract_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
//else if(0==strcmp(argv[2], "censor")) censor_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
}
|
explicit_solver_strategy.h | //
// Authors:
// Miguel Angel Celigueta maceli@cimne.upc.edu
// Miquel Santasusana msantasusana@cimne.upc.edu
//
#if !defined(KRATOS_EXPLICIT_SOLVER_STRATEGY)
#define KRATOS_EXPLICIT_SOLVER_STRATEGY
// Project includes
#include "utilities/timer.h"
#include "custom_elements/Particle_Contact_Element.h"
#include "includes/variables.h"
#include "includes/deprecated_variables.h"
/* System includes */
#include <limits>
#include <iostream>
#include <iomanip>
#include <time.h>
/* External includes */
#ifdef _OPENMP
#include <omp.h>
#endif
#define CUSTOMTIMER 0 // ACTIVATES AND DISABLES ::TIMER:::::
#include "includes/define.h"
#include "utilities/openmp_utils.h"
#include "includes/model_part.h"
#include "solving_strategies/strategies/solving_strategy.h"
#include "solving_strategies/schemes/scheme.h"
#include "custom_strategies/schemes/dem_integration_scheme.h"
#include "custom_utilities/create_and_destroy.h"
#include "custom_utilities/dem_fem_utilities.h"
#include "custom_utilities/GeometryFunctions.h"
#include "custom_utilities/inlet.h"
#include "custom_elements/cluster3D.h"
#include "custom_elements/rigid_body_element.h"
////Cfeng
#include "custom_utilities/dem_fem_search.h"
#include "custom_utilities/discrete_particle_configure.h"
#include "custom_utilities/rigid_face_geometrical_object_configure.h"
#ifdef USING_CGAL
#include <CGAL/spatial_sort.h>
#endif
/* Timer defines */
#ifdef CUSTOMTIMER
#define KRATOS_TIMER_START(t) Timer::Start(t);
#define KRATOS_TIMER_STOP(t) Timer::Stop(t);
#else
#define KRATOS_TIMER_START(t)
#define KRATOS_TIMER_STOP(t)
#endif
namespace Kratos {
class ExplicitSolverSettings {
public:
KRATOS_CLASS_POINTER_DEFINITION(ExplicitSolverSettings);
ExplicitSolverSettings() {
}
~ExplicitSolverSettings() {
}
ModelPart* r_model_part;
ModelPart* contact_model_part;
ModelPart* fem_model_part;
ModelPart* cluster_model_part;
ModelPart* inlet_model_part;
};
class KRATOS_API(DEM_APPLICATION) ExplicitSolverStrategy {
public:
typedef ModelPart::NodesContainerType NodesArrayType;
typedef ModelPart::ElementsContainerType ElementsArrayType;
typedef ElementsArrayType::iterator ElementsIterator;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef ModelPart::NodesContainerType::ContainerType NodesContainerType;
typedef ModelPart::ElementsContainerType::ContainerType ElementsContainerType;
typedef ModelPart::ConditionsContainerType::ContainerType ConditionsContainerType;
typedef SpatialSearch::ResultElementsContainerType ResultElementsContainerType;
typedef SpatialSearch::VectorResultElementsContainerType VectorResultElementsContainerType;
typedef SpatialSearch::RadiusArrayType RadiusArrayType;
typedef SpatialSearch::DistanceType DistanceType;
typedef SpatialSearch::VectorDistanceType VectorDistanceType;
typedef SpatialSearch::ResultConditionsContainerType ResultConditionsContainerType;
typedef SpatialSearch::VectorResultConditionsContainerType VectorResultConditionsContainerType;
typedef PointerVectorSet<Properties, IndexedObject> PropertiesContainerType;
typedef PropertiesContainerType::iterator PropertiesIterator;
typedef DiscreteParticleConfigure<3> ElementConfigureType;
typedef RigidFaceGeometricalObjectConfigure<3> RigidFaceGeometricalConfigureType;
typedef Variable<double> ComponentOf3ComponentsVariableType;
/// Pointer definition of ExplicitSolverStrategy
KRATOS_CLASS_POINTER_DEFINITION(ExplicitSolverStrategy);
ExplicitSolverStrategy() {
}
ExplicitSolverStrategy(ExplicitSolverSettings& settings,
const double max_delta_time,
const int n_step_search,
const double safety_factor,
const int delta_option,
ParticleCreatorDestructor::Pointer p_creator_destructor,
DEM_FEM_Search::Pointer p_dem_fem_search,
SpatialSearch::Pointer pSpSearch,
Parameters strategy_parameters) {
mParameters = strategy_parameters;
mDeltaOption = delta_option;
mpParticleCreatorDestructor = p_creator_destructor;
mpDemFemSearch = p_dem_fem_search;
mpSpSearch = pSpSearch;
if(mParameters["do_search_neighbours"].GetBool()) mDoSearchNeighbourElements = true;
else mDoSearchNeighbourElements = false;
p_creator_destructor->SetDoSearchNeighbourElements(mDoSearchNeighbourElements);
mMaxTimeStep = max_delta_time;
mNStepSearch = n_step_search;
mSafetyFactor = safety_factor;
mpDem_model_part = &(*(settings.r_model_part));
if (mpDem_model_part == NULL)
KRATOS_THROW_ERROR(std::runtime_error, "Undefined settings.r_model_part in ExplicitSolverStrategy constructor", "")
mpContact_model_part = &(*(settings.contact_model_part));
if (mpContact_model_part == NULL)
KRATOS_THROW_ERROR(std::runtime_error, "Undefined settings.contact_model_part in ExplicitSolverStrategy constructor", "")
mpFem_model_part = &(*(settings.fem_model_part));
if (mpFem_model_part == NULL)
KRATOS_THROW_ERROR(std::runtime_error, "Undefined settings.fem_model_part in ExplicitSolverStrategy constructor", "")
mpCluster_model_part = &(*(settings.cluster_model_part));
if (mpCluster_model_part == NULL)
KRATOS_THROW_ERROR(std::runtime_error, "Undefined settings.cluster_model_part in ExplicitSolverStrategy constructor", "")
mpInlet_model_part = &(*(settings.inlet_model_part));
if (mpInlet_model_part == NULL)
KRATOS_THROW_ERROR(std::runtime_error, "Undefined settings.inlet_model_part in ExplicitSolverStrategy constructor", "")
if(mParameters["RemoveBallsInitiallyTouchingWalls"].GetBool()) mRemoveBallsInitiallyTouchingWallsOption = true;
else mRemoveBallsInitiallyTouchingWallsOption = false;
}
/// Destructor.
virtual ~ExplicitSolverStrategy() {
//Timer::SetOuputFile("TimesPartialRelease");
//Timer::PrintTimingInformation();
}
struct LessX {
bool operator()(const SphericParticle* p, const SphericParticle* q) const {return p->GetGeometry()[0].Coordinates()[0] < q->GetGeometry()[0].Coordinates()[0];}
};
struct LessY {
bool operator()(const SphericParticle* p, const SphericParticle* q) const {return p->GetGeometry()[0].Coordinates()[1] < q->GetGeometry()[0].Coordinates()[1];}
};
struct LessZ {
bool operator()(const SphericParticle* p, const SphericParticle* q) const {return p->GetGeometry()[0].Coordinates()[2] < q->GetGeometry()[0].Coordinates()[2];}
};
struct SpatialSortingTraits {
typedef SphericParticle* Point_2;
typedef LessX Less_x_2;
typedef LessY Less_y_2;
typedef LessZ Less_z_2;
Less_x_2 less_x_2_object() const {return Less_x_2();}
Less_y_2 less_y_2_object() const {return Less_y_2();}
Less_z_2 less_z_2_object() const { return Less_z_2();}
};
#ifdef USING_CGAL
void ReorderParticles() {
SpatialSortingTraits sst;
CGAL::spatial_sort(mListOfSphericParticles.begin(), mListOfSphericParticles.end(), sst);
}
#endif
template <class T>
void RebuildListOfSphericParticles(ElementsArrayType& pElements, std::vector<T*>& rCustomListOfParticles){
KRATOS_TRY
rCustomListOfParticles.resize(pElements.size());
#pragma omp parallel for
for (int k = 0; k < (int)pElements.size(); k++){
ElementsArrayType::iterator particle_pointer_it = pElements.ptr_begin() + k;
T* spheric_particle = dynamic_cast<T*>(&(*particle_pointer_it));
rCustomListOfParticles[k] = spheric_particle;
}
return;
KRATOS_CATCH("")
}
void RebuildListOfDiscontinuumSphericParticles() {
RebuildListOfSphericParticles<SphericParticle>(GetModelPart().GetCommunicator().LocalMesh().Elements(), mListOfSphericParticles);
}
void RebuildPropertiesProxyPointers(std::vector<SphericParticle*>& rCustomListOfSphericParticles);
void SendProcessInfoToClustersModelPart();
void UpdateMaxIdOfCreatorDestructor();
void RepairPointersToNormalProperties(std::vector<SphericParticle*>& rCustomListOfSphericParticles);
virtual void Initialize();
virtual void AttachSpheresToStickyWalls();
virtual void DisplayThreadInfo();
virtual void CalculateMaxTimeStep();
double CalculateMaxInletTimeStep();
virtual void InitializeClusters();
virtual void GetClustersForce();
virtual void GetRigidBodyElementsForce();
virtual double SolveSolutionStep();
void SearchDEMOperations(ModelPart& r_model_part, bool has_mpi = true);
void SearchFEMOperations(ModelPart& r_model_part, bool has_mpi = true) ;
virtual void ForceOperations(ModelPart& r_model_part);
void InitialTimeStepCalculation(); //TODO: remove this one
void GetForce();
void FastGetForce();
virtual void PerformTimeIntegrationOfMotion(int StepFlag = 0);
void InitializeSolutionStep();
virtual void BoundingBoxUtility(bool is_time_to_mark_and_remove = true);
virtual void FinalizeSolutionStep();
void InitializeElements();
void InitializeDEMElements();
void InitializeFEMElements();
//void InitializeRigidBodyElements();
void InitializeFEMWallsAsRigidBodyElements(ModelPart::SubModelPartsContainerType::iterator& sub_model_part);
void MarkToDeleteAllSpheresInitiallyIndentedWithFEM(ModelPart& rSpheresModelPart);
void ComputeNodalArea();
void ComputeNormalPressureVectorField();
virtual void CalculateConditionsRHSAndAdd();
void ClearFEMForces();
void CalculateNodalPressuresAndStressesOnWalls();
void SetFlagAndVariableToNodes(const Kratos::Flags& r_flag_name, ComponentOf3ComponentsVariableType& r_variable_to_set, const double value, NodesArrayType& r_nodes_array);
void SetVariableToNodes(ComponentOf3ComponentsVariableType& r_variable_to_set, const double value, NodesArrayType& r_nodes_array);
void ResetPrescribedMotionFlagsRespectingImposedDofs();
void ApplyPrescribedBoundaryConditions();
void ApplyInitialConditions();
void SetSearchRadiiOnAllParticles(ModelPart& r_model_part, const double added_search_distance = 0.0, const double amplification = 1.0);
void SetNormalRadiiOnAllParticles(ModelPart& r_model_part);
void SetSearchRadiiWithFemOnAllParticles(ModelPart& r_model_part, const double added_search_distance = 0.0, const double amplification = 1.0);
virtual void SearchNeighbours();
virtual void ComputeNewNeighboursHistoricalData();
virtual void CreateContactElements();
void InitializeContactElements();
// void ContactInitializeSolutionStep();
void PrepareContactElementsForPrinting();
virtual void ComputeNewRigidFaceNeighboursHistoricalData();
virtual void SearchRigidFaceNeighbours();
void CheckHierarchyWithCurrentNeighbours();
/* This should work only with one iteration, but it with mpi does not */
void CalculateInitialMaxIndentations(ProcessInfo& r_process_info);
void PrepareContactModelPart(ModelPart& r_model_part, ModelPart& mcontacts_model_part);
void PrepareElementsForPrinting();
void SynchronizeHistoricalVariables(ModelPart& r_model_part);
void SynchronizeRHS(ModelPart& r_model_part);
void CleanEnergies();
ModelPart& GetModelPart() { return (*mpDem_model_part);}
ModelPart& GetFemModelPart() { return (*mpFem_model_part);}
ModelPart& GetContactModelPart() { return (*mpContact_model_part);}
ModelPart& GetClusterModelPart() { return (*mpCluster_model_part);}
ModelPart& GetInletModelPart() { return (*mpInlet_model_part);}
ModelPart& GetRigidBodyModelPart() { return (*mpRigidBody_model_part);}
VectorResultElementsContainerType& GetResults() { return (mResults);}
VectorDistanceType& GetResultsDistances() { return (mResultsDistances);}
RadiusArrayType& GetArrayOfAmplifiedRadii() { return (mArrayOfAmplifiedRadii);}
int& GetNStepSearch() { return (mNStepSearch);}
int& GetSearchControl() { return mSearchControl;}
int& GetNumberOfThreads() { return (mNumberOfThreads);}
double& GetMaxTimeStep() { return (mMaxTimeStep);}
double& GetSafetyFactor() { return (mSafetyFactor);}
int& GetDeltaOption() { return (mDeltaOption);}
std::vector<unsigned int>& GetElementPartition() { return (mElementPartition);}
ParticleCreatorDestructor::Pointer& GetParticleCreatorDestructor() { return (mpParticleCreatorDestructor);}
SpatialSearch::Pointer& GetSpSearch() { return (mpSpSearch);}
VectorResultConditionsContainerType& GetRigidFaceResults() { return (mRigidFaceResults);}
VectorDistanceType& GetRigidFaceResultsDistances() { return (mRigidFaceResultsDistances);}
std::vector<unsigned int>& GetConditionPartition() { return (mConditionPartition);}
DEM_FEM_Search::Pointer& GetDemFemSearch() { return (mpDemFemSearch);}
virtual ElementsArrayType& GetElements(ModelPart& r_model_part) { return r_model_part.GetCommunicator().LocalMesh().Elements();}
virtual ElementsArrayType& GetAllElements(ModelPart& r_model_part) {
return r_model_part.Elements();
}
protected:
Parameters mParameters;
bool mRemoveBallsInitiallyTouchingWallsOption;
VectorResultElementsContainerType mResults;
VectorDistanceType mResultsDistances;
RadiusArrayType mArrayOfAmplifiedRadii;
int mNStepSearch;
int mSearchControl;
int mNumberOfThreads;
double mMaxTimeStep;
double mSafetyFactor;
int mDeltaOption;
std::vector<unsigned int> mElementPartition;
ParticleCreatorDestructor::Pointer mpParticleCreatorDestructor;
DEM_FEM_Search::Pointer mpDemFemSearch;
SpatialSearch::Pointer mpSpSearch;
bool mDoSearchNeighbourElements;
VectorResultConditionsContainerType mRigidFaceResults;
VectorDistanceType mRigidFaceResultsDistances;
std::vector<unsigned int> mConditionPartition;
ModelPart *mpFem_model_part;
ModelPart *mpDem_model_part;
ModelPart *mpInlet_model_part;
ModelPart *mpContact_model_part;
ModelPart *mpCluster_model_part;
ModelPart *mpRigidBody_model_part;
std::vector<SphericParticle*> mListOfSphericParticles;
std::vector<SphericParticle*> mListOfGhostSphericParticles;
}; // Class ExplicitSolverStrategy
} // namespace Kratos.
#endif // KRATOS_EXPLICIT_SOLVER_STRATEGY defined
|
DRB018-plusplus-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Data race on outLen due to ++ operation.
Adding private (outLen) can avoid race condition. But it is wrong semantically.
Data races on outLen also cause output[outLen++] to have data races.
Data race pairs (we allow two pairs to preserve the original code pattern):
1. outLen@72 vs. outLen@72
2. output[]@72 vs. output[]@72
*/
#include "omprace.h"
#include <omp.h>
#include <stdlib.h>
#include <stdio.h>
int input[1000];
int output[1000];
int main()
{
omprace_init();
int i ;
int inLen=1000 ;
int outLen = 0;
for (i=0; i<inLen; ++i)
input[i]= i;
#pragma omp parallel for
for (i=0; i<inLen; ++i)
{
output[outLen++] = input[i] ;
}
printf("output[500]=%d\n",output[500]);
omprace_fini();
return 0;
}
|
GB_hcat_slice.c | //------------------------------------------------------------------------------
// GB_hcat_slice: horizontal concatenation of the slices of C
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Horizontal concatenation of slices into the matrix C.
#define GB_FREE_WORK \
{ \
GB_FREE_MEMORY (Cnzs, nthreads+1, sizeof (int64_t)) ; \
GB_FREE_MEMORY (Cnvecs, nthreads+1, sizeof (int64_t)) ; \
}
#include "GB_mxm.h"
GrB_Info GB_hcat_slice // horizontal concatenation of the slices of C
(
GrB_Matrix *Chandle, // output matrix C to create
int nthreads, // # of slices to concatenate
GrB_Matrix *Cslice, // array of slices of size nthreads
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (nthreads > 1) ;
ASSERT (Chandle != NULL) ;
ASSERT (*Chandle == NULL) ;
ASSERT (Cslice != NULL) ;
for (int tid = 0 ; tid < nthreads ; tid++)
{
ASSERT_OK (GB_check (Cslice [tid], "a slice of C", GB0)) ;
ASSERT (!GB_PENDING (Cslice [tid])) ;
ASSERT (!GB_ZOMBIES (Cslice [tid])) ;
ASSERT ((Cslice [tid])->is_hyper) ;
// each Cslice [tid] is constructed as its own matrix, with Cslice
// [tid] = A * Bslice [tid]. It is not a slice of an other matrix, so
// Cslice [tid]->is_slice is false.
ASSERT (!(Cslice [tid])->is_slice) ;
ASSERT ((Cslice [tid])->type == (Cslice [0])->type) ;
ASSERT ((Cslice [tid])->vlen == (Cslice [0])->vlen) ;
ASSERT ((Cslice [tid])->vdim == (Cslice [0])->vdim) ;
}
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
int64_t *restrict Cnzs ; // size nthreads+1
int64_t *restrict Cnvecs ; // size nthreads+1
GB_MALLOC_MEMORY (Cnzs, nthreads+1, sizeof (int64_t)) ;
GB_MALLOC_MEMORY (Cnvecs, nthreads+1, sizeof (int64_t)) ;
if (Cnzs == NULL || Cnvecs == NULL)
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// find the size and type of C
//--------------------------------------------------------------------------
// Let cnz_slice [tid] be the number of entries in Cslice [tid], and let
// cnvec_slice [tid] be the number vectors in Cslice [tid]. Then Cnzs and
// Cnvecs are cumulative sums of cnz_slice and cnvec_slice, respectively:
// Cnzs [tid] = sum of cnz_slice [0:tid-1]
// Cnvecs [tid] = sum of cnvec_slice [0:tid-1]
// both arrays are size nthreads+1. Thus, both Cnzs [0] and Cnvecs [0] are
// zero, and their last entries are the total # entries and vectors in C,
// respectively.
// all the slices have the same type and dimension
GrB_Type ctype = (Cslice [0])->type ;
int64_t cvlen = (Cslice [0])->vlen ;
int64_t cvdim = (Cslice [0])->vdim ;
int64_t cnz = 0 ;
int64_t cnvec = 0 ;
int64_t cnvec_nonempty = 0 ;
for (int tid = 0 ; tid < nthreads ; tid++)
{
// compute the cumulative sum of the # entries and # vectors
Cnzs [tid] = cnz ;
Cnvecs [tid] = cnvec ;
cnz += GB_NNZ (Cslice [tid]) ;
cnvec += (Cslice [tid])->nvec ;
// also sum the total number of non-empty vectors in all the slices
cnvec_nonempty += (Cslice [tid])->nvec_nonempty ;
}
Cnzs [nthreads] = cnz ; // total # entries in C
Cnvecs [nthreads] = cnvec ; // total # vectors in C
//--------------------------------------------------------------------------
// create C and allocate all of its space
//--------------------------------------------------------------------------
GrB_Info info ;
GB_CREATE (Chandle, ctype, cvlen, cvdim, GB_Ap_malloc, true,
GB_FORCE_HYPER, GB_Global_hyper_ratio_get ( ), cnvec, cnz, true,
Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
GrB_Matrix C = (*Chandle) ;
int64_t *restrict Ch = C->h ;
int64_t *restrict Cp = C->p ;
int64_t *restrict Ci = C->i ;
GB_void *restrict Cx = C->x ;
size_t csize = ctype->size ;
C->nvec_nonempty = cnvec_nonempty ;
C->nvec = cnvec ;
Cp [cnvec] = cnz ;
//--------------------------------------------------------------------------
// copy each slice into C
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(static,1)
for (int tid = 0 ; tid < nthreads ; tid++)
{
// get the Cslice [tid] and its position in C
int64_t *restrict Csliceh = (Cslice [tid])->h ;
int64_t *restrict Cslicep = (Cslice [tid])->p ;
int64_t *restrict Cslicei = (Cslice [tid])->i ;
GB_void *restrict Cslicex = (Cslice [tid])->x ;
int64_t cnz = Cnzs [tid] ;
int64_t cnz_slice = Cnzs [tid+1] - cnz ;
int64_t cnvec = Cnvecs [tid] ;
int64_t cnvec_slice = Cnvecs [tid+1] - cnvec ;
// copy the row indices and values of Cslice [tid] into Ci and Cx
memcpy (Ci + cnz , Cslicei, cnz_slice * sizeof (int64_t)) ;
memcpy (Cx + cnz * csize, Cslicex, cnz_slice * csize) ;
// copy the column indices of Cslice into Ch
memcpy (Ch + cnvec, Csliceh, cnvec_slice * sizeof (int64_t)) ;
// construct the column pointers of C (shift upwards by cnz)
for (int64_t k = 0 ; k < cnvec_slice ; k++)
{
Cp [cnvec + k] = Cslicep [k] + cnz ;
}
}
//--------------------------------------------------------------------------
// free workspace and finalize the matrix
//--------------------------------------------------------------------------
GB_FREE_WORK ;
C->magic = GB_MAGIC ;
ASSERT_OK (GB_check (C, "C from horizontal concatenation", GB0)) ;
return (GrB_SUCCESS) ;
}
|
bellman_ford.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include <limits.h>
#include "ompdist/queues.h"
#include "ompdist/graph.h"
#include "ompdist/graph_gen.h"
#include "ompdist/utils.h"
#include "ompdist/msr.h"
#include "config.h"
#define INF INT_MAX
typedef struct {
int distance;
} payload;
typedef struct {
int from;
int y;
} message;
int ROOT;
/**
* initialize_graph - Initializes the graph nodes with the payload data.
*
* @g: the graph
*/
void initialize_graph(graph* g) {
for (int i = 0; i < g->N; i++) {
node* u = elem_at(&g->vertices, i);
payload* data = malloc(sizeof(payload));
data->distance = INF;
u->data = data;
}
node* root = elem_at(&g->vertices, ROOT);
payload* root_data = root->data;
root_data->distance = 0;
}
/**
* root_message - Intiates the root message.
*
* @g: the graph
* @recv: the recv queuelist
*/
void root_message(graph* g, queuelist* recv) {
DEBUG("sending root message\n");
node* root = elem_at(&g->vertices, ROOT);
for (int j = 0; j < root->degree; j++) {
node* u = *((node**) elem_at(&root->neighbors, j));
message m = {ROOT, 1};
enqueue(recv, u->label, &m);
}
}
/**
* messages_in_queue - Checks if there are any messages in the queues of any
* of the nodes.
*
* @g: the graph
* @recv: the recv queuelist
*
* Returns 1 if there are any messages, 0 otherwise.
*/
int messages_in_queue(int N, queuelist* recv) {
int result = 0;
#pragma omp parallel for schedule(SCHEDULING_METHOD)
for (int i = 0; i < N; i++) {
if (!is_ql_queue_empty(recv, i))
result = 1;
}
DEBUG("messages in queue = %d\n", result);
return result;
}
/**
* recv_and_send - Receives messages and passes them on (after incrementing
* the message distance) to its neighbors.
*
* @g: the graph
* @recv: the recv queuelist
* @send: the send queuelist
*/
void recv_and_send(graph* g, queuelist* recv, queuelist* send) {
DEBUG("receiving and sending messages\n");
#pragma omp parallel for schedule(SCHEDULING_METHOD)
for (int i = 0; i < g->N; i++) {
node* u = elem_at(&g->vertices, i);
payload* u_data = u->data;
int lowest_y = INT_MAX;
int lowest_from = 0;
while (!is_ql_queue_empty(recv, i)) {
message* m = dequeue(recv, i);
if (lowest_y > m->y) {
lowest_y = m->y;
lowest_from = m->from;
}
}
if (lowest_y != INT_MAX && lowest_y < u_data->distance) {
u_data->distance = lowest_y;
for (int j = 0; j < u->degree; j++) {
node* v = *((node**) elem_at(&u->neighbors, j));
payload* u_data = u->data;
if (v->label == lowest_from)
continue;
message m = {u->label, lowest_y+1};
enqueue(send, v->label, &m);
}
}
}
}
/**
* propagate_messages - Moves messages from the send queuelist to the recv
* queuelist.
*
* @g: the graph
* @recv: the recv queuelist
* @send: the send queuelist
*/
void propagate_messages(graph* g, queuelist* recv, queuelist* send) {
DEBUG("propagating messages from the send queuelist to recv\n");
#pragma omp parallel for schedule(SCHEDULING_METHOD)
for (int i = 0; i < g->N; i++) {
node* u = elem_at(&g->vertices, i);
while (!is_ql_queue_empty(send, u->label))
enqueue(recv, u->label, dequeue(send, u->label));
}
}
/**
* print_solution - prints a BFS Bellman-Ford solution
*
* @g: a pointer to the graph object
*/
void print_solution(graph* g) {
int max_distance = 0;
for (int i = 0; i < g->N; i++) {
node* cur = elem_at(&g->vertices, i);
payload* data = cur->data;
if (data->distance > max_distance)
max_distance = data->distance;
INFO("%d->distance = %d\n", cur->label, data->distance);
}
INFO("max_distance = %d\n", max_distance);
}
/**
* Based on Roger Wattenhofer's Principles of Distributed Computing's
* Algorithm 2.13 (Bellman-Ford BFS) to solve the single-source shortest
* path problem.
*/
int main(int argc, char* argv[]) {
int N;
int M;
graph* g;
int iterate;
int iterations = 1;
if ((iterate = input_through_argv(argc, argv))) {
FILE* in = fopen(argv[2], "r");
fscanf(in, "%d\n", &N);
g = new_graph(N, 0);
fscanf(in, "%d\n", &ROOT);
g->M = M = read_graph(g, in);
fclose(in);
sscanf(argv[3], "%d", &iterations);
}
else {
N = 16;
M = 64;
if (argc > 1) {
sscanf(argv[1], "%d", &N);
sscanf(argv[2], "%d", &M);
}
g = generate_new_connected_graph(N, M);
ROOT = 0;
}
long long duration = 0;
double total_energy = 0;
for (int i = 0; i < iterations; i++) {
queuelist* recv = new_queuelist(N, sizeof(message));
queuelist* send = new_queuelist(N, sizeof(message));
begin_timer();
init_energy_measure();
initialize_graph(g);
root_message(g, recv);
while (messages_in_queue(g->N, recv)) {
recv_and_send(g, recv, send);
propagate_messages(g, recv, send);
}
total_energy += total_energy_used();
duration += time_elapsed();
// print_solution(g);
}
if (iterate)
printf("%.2lf %.2lf\n", ((double) duration) / iterations, total_energy / iterations);
return 0;
}
|
novikov_d_rectangles_method_omp.h | // Copyright 2021 Novikov Danil
#ifndef MODULES_TASK_2_NOVIKOV_D_RECTANGLES_METHOD_OMP_NOVIKOV_D_RECTANGLES_METHOD_OMP_H_
#define MODULES_TASK_2_NOVIKOV_D_RECTANGLES_METHOD_OMP_NOVIKOV_D_RECTANGLES_METHOD_OMP_H_
#include <omp.h>
#include <cstring>
#include <cmath>
#include <vector>
#include <iostream>
template <typename Function>
double rectangles_base(Function function, std::vector <double> begin_point,
std::vector <double> end_point, const int number_of_partitions) {
if (number_of_partitions <= 0) {
throw 1;
}
if (begin_point.size() != end_point.size()) {
throw 1;
}
int dimension = begin_point.size();
std::vector<double> h(dimension);
for (int i = 0; i < dimension; ++i) {
h[i] = (end_point[i] - begin_point[i]) /
static_cast<double>(number_of_partitions);
}
std::vector <double> half_point(dimension);
double result = 0.0;
int k;
int sectors = std::pow(number_of_partitions, dimension);
for (int i = 0; i < sectors; ++i) {
for (k = 0; k < dimension; k++) {
int a = std::pow(number_of_partitions, k+1);
int b = (number_of_partitions * k == 0 ? 1 :
std::pow(number_of_partitions, k));
int p = (i % a) / b;
half_point[k] = begin_point[k] + h[k] * p + h[k] * 0.5;
}
result += function(half_point);
}
for (int i = 0; i < dimension; ++i) {
result *= h[i];
}
return result;
}
template <typename Function>
double rectangles_omp(Function function, std::vector <double> begin_point,
std::vector <double> end_point, const int number_of_partitions) {
if (number_of_partitions <= 0) {
throw 1;
}
if (begin_point.size() != end_point.size()) {
throw 1;
}
int dimension = begin_point.size();
std::vector<double> h(dimension);
for (int i = 0; i < dimension; ++i) {
h[i] = (end_point[i] - begin_point[i]) /
static_cast<double>(number_of_partitions);
}
double result = 0.0;
int sectors = std::pow(number_of_partitions, dimension);
#pragma omp parallel for reduction(+:result)
for (int i = 0; i < sectors; ++i) {
std::vector <double> half_point(dimension);
for (int k = 0; k < dimension; k++) {
int a = std::pow(number_of_partitions, k+1);
int b = (number_of_partitions * k == 0 ? 1 :
std::pow(number_of_partitions, k));
int p = (i % a) / b;
half_point[k] = begin_point[k] + h[k] * p + h[k] * 0.5;
}
double f_res = function(half_point);
result += f_res;
}
for (int i = 0; i < dimension; ++i) {
result *= h[i];
}
return result;
}
#endif // MODULES_TASK_2_NOVIKOV_D_RECTANGLES_METHOD_OMP_NOVIKOV_D_RECTANGLES_METHOD_OMP_H_
|
Par-11-ParallelForSeqStmtsParallelFor.c |
int main(int argc, char **argv) {
int a[4] = {1,2,3,4};
int b[4] = {0, 0, 0, 0};
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < 4; ++i) {
a[i] = 3*a[i];
}
}
a[0] = 1;
int k = a[1] + a[0] * 4;
#pragma omp parallel
{
#pragma omp for
for (int i = 0; i < 4; ++i) {
a[i] = i + a[i];
}
}
return 0;
}
|
test.c | #include <stdio.h>
#define NUM_DIVS 1000000
#define NUM_ITS 100
double calc_pi()
{
double div_width = 1.0/NUM_DIVS;
double curx = 0.0;
double cury = 0.0;
double sum = 0.0;
int i = 0;
#pragma omp parallel for private(curx, cury) reduction(+: sum)
for(i = 0; i < NUM_DIVS; i++)
{
curx = ((double)i + 0.5) * div_width;
cury = 4.0 / (1 + (curx * curx));
sum += div_width * cury;
}
return sum;
}
int main(int argc, char** argv)
{
int i = 0;
double pi = 0.0;
for(i = 0; i < NUM_ITS; i++)
{
pi = calc_pi();
}
printf("Pi: %1.16f\n", pi);
return 0;
}
|
GB_binop__min_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__min_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__min_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__min_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__min_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__min_int8)
// A*D function (colscale): GB (_AxD__min_int8)
// D*A function (rowscale): GB (_DxB__min_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__min_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__min_int8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_int8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_int8)
// C=scalar+B GB (_bind1st__min_int8)
// C=scalar+B' GB (_bind1st_tran__min_int8)
// C=A+scalar GB (_bind2nd__min_int8)
// C=A'+scalar GB (_bind2nd_tran__min_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = GB_IMIN (aij, bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IMIN (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MIN || GxB_NO_INT8 || GxB_NO_MIN_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__min_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__min_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__min_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__min_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__min_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__min_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__min_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__min_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__min_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__min_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__min_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__min_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IMIN (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__min_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IMIN (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMIN (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__min_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMIN (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__min_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRACC_OMP_037_Vector_add_Mult_no_Barrier_yes.c | /*
Vector addition then scalar multiplication with no implicit barrier in between.
Teams distribute for implements no implicit barrier at the end of a structured block. Similar to nowait.
*/
#include <stdio.h>
#include <stdbool.h>
#include <stdlib.h>
#define N 100
#define C 512
int a;
int b[C];
int c[C];
int temp[C];
int init(){
for(int i=0; i<C; i++){
b[i]=0;
c[i]=2;
temp[i]=0;
}
a=2;
return 0;
}
int add_Mult(){
#pragma omp target map(tofrom:b[0:C]) map(to:c[0:C],temp[0:C],a) device(0)
{
#pragma omp teams
for(int i=0; i<N ;i++){
#pragma omp distribute
for(int i=0; i<C; i++){
temp[i] = b[i] + c[i];
}
#pragma omp distribute
for(int i=C; i>0; i--){
b[i] = temp[i] * a;
}
}
}
return 0;
}
int check(){
bool test = false;
int val = 0;
for(int i=0; i<N; i++){
val = val + 2;
val = val * 2;
}
for(int i=0; i<C; i++){
if(b[i]!=val){
test = true;
}
}
printf("Memory Access Issue visible: %s\n",test ? "true" : "false");
return 0;
}
int main(){
init();
add_Mult();
check();
return 0;
} |
mkl_util.h | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
#define TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
#ifdef INTEL_MKL
#include <string>
#include <vector>
#include <unordered_map>
#include <utility>
#ifdef INTEL_MKL_ML
#include "mkl_dnn.h"
#include "mkl_dnn_types.h"
#include "mkl_service.h"
#include "mkl_trans.h"
#endif
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/graph/mkl_graph_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/util/padding.h"
#include "tensorflow/core/util/tensor_format.h"
#ifndef INTEL_MKL_ML
#include "mkldnn.hpp"
#include "tensorflow/core/lib/core/stringpiece.h"
using mkldnn::engine;
using mkldnn::memory;
using mkldnn::padding_kind;
using mkldnn::primitive;
using mkldnn::reorder;
#endif
#ifdef _WIN32
typedef unsigned int uint;
#endif
namespace tensorflow {
// The file contains a number of utility classes and functions used by MKL
// enabled kernels
// This class encapsulates all the meta data that is associated with an MKL
// tensor. A tensor is an MKL tensor if it was created as the result of an
// MKL operation, and did not go through a conversion to a standard
// Tensorflow tensor.
typedef enum { W = 0, H = 1, C = 2, N = 3 } MklDims;
typedef enum {
Dim_N = 0,
Dim_C = 1,
Dim_H = 2,
Dim_W = 3,
Dim_O = 0,
Dim_I = 1
} MklDnnDims;
#ifdef INTEL_MKL_ML
class MklShape {
public:
MklShape() {}
TF_DISALLOW_COPY_AND_ASSIGN(MklShape); // Cannot copy
~MklShape() {
if (sizes_) delete[] sizes_;
if (strides_) delete[] strides_;
if (mklLayout_) CHECK_EQ(dnnLayoutDelete_F32(mklLayout_), E_SUCCESS);
if (tfLayout_) CHECK_EQ(dnnLayoutDelete_F32(tfLayout_), E_SUCCESS);
if (tf_to_mkl_dim_map_) delete[] tf_to_mkl_dim_map_;
}
const bool IsMklTensor() const { return isMklTensor_; }
void SetMklTensor(const bool isMklTensor) { isMklTensor_ = isMklTensor; }
void SetDimensions(const size_t dimension) { dimension_ = dimension; }
void SetMklLayout(dnnLayout_t mklLayout) { mklLayout_ = mklLayout; }
void SetMklLayout(const void* primitive, size_t resourceType) {
CHECK_EQ(
dnnLayoutCreateFromPrimitive_F32(&mklLayout_, (dnnPrimitive_t)primitive,
(dnnResourceType_t)resourceType),
E_SUCCESS);
}
void SetTfLayout(const size_t dimension, const size_t* sizes,
const size_t* strides) {
dimension_ = dimension;
if (dimension > 0) { // MKl doesn't support zero dimension tensors
sizes_ = new size_t[dimension];
strides_ = new size_t[dimension];
for (int ii = 0; ii < dimension; ii++) {
sizes_[ii] = sizes[ii];
strides_[ii] = strides[ii];
}
CHECK_EQ(dnnLayoutCreate_F32(&tfLayout_, dimension, sizes, strides),
E_SUCCESS);
}
}
// Default case - MKL dim ordering is opposite of TF dim ordering
// MKL -> (DIMS-1)...0 where (DIMS-1) is outermost dim and 0 is innermost dim
// TF -> 0...(DIMS-1) where 0 is outermost dim and (DIMS-1) is innermost dim
// For layers that rely on data_format semantics (conv, pooling etc.)
// or operate only on certain dimensions (relu, concat, split etc.),
// Mkl APIs might require us to reorder these dimensions. In such cases,
// kernels should explicitly set this map
void SetTfDimOrder(const size_t dimension) {
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
for (size_t ii = 0; ii < dimension; ii++) {
tf_to_mkl_dim_map_[ii] = dimension - (ii + 1);
}
}
void SetTfDimOrder(const size_t dimension, const size_t* tf_to_mkl_dim_map) {
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
for (size_t ii = 0; ii < dimension; ii++) {
tf_to_mkl_dim_map_[ii] = tf_to_mkl_dim_map[ii];
}
}
void SetTfDimOrder(const size_t dimension, TensorFormat data_format) {
CHECK_EQ(dimension, 4);
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDims::W;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDims::H;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDims::C;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDims::N;
}
const dnnLayout_t GetMklLayout() const { return mklLayout_; }
const dnnLayout_t GetTfLayout() const { return tfLayout_; }
const dnnLayout_t GetCurLayout() const {
return isMklTensor_ ? mklLayout_ : tfLayout_;
}
size_t GetDimension() const { return dimension_; }
const size_t* GetSizes() const { return sizes_; }
int64 dim_size(int index) const { return sizes_[index]; }
int64 tf_dim_size(int index) const {
return sizes_[tf_to_mkl_dim_map_[index]];
}
const size_t* GetStrides() const { return strides_; }
const size_t* GetTfToMklDimMap() const { return tf_to_mkl_dim_map_; }
size_t tf_dim_idx(int index) const { return tf_to_mkl_dim_map_[index]; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Channel dimension.
bool IsMklChannelDim(int d) const { return tf_dim_idx(d) == MklDims::C; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Batch dimension.
bool IsMklBatchDim(int d) const { return tf_dim_idx(d) == MklDims::N; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Width dimension.
bool IsMklWidthDim(int d) const { return tf_dim_idx(d) == MklDims::W; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Height dimension.
bool IsMklHeightDim(int d) const { return tf_dim_idx(d) == MklDims::H; }
// Check if the TF-Mkl dimension ordering map specifies if the input
// tensor is in NCHW format.
bool IsTensorInNCHWFormat() const {
TensorFormat data_format = FORMAT_NCHW;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
// Check if the TF-Mkl dimension ordering map specifies if the input
// tensor is in NHWC format.
bool IsTensorInNHWCFormat() const {
TensorFormat data_format = FORMAT_NHWC;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
void GetConvertedFlatData(dnnLayout_t targetLayout, void* input,
void* output) const {
dnnLayout_t curLayout;
if (isMklTensor_)
curLayout = mklLayout_;
else
curLayout = tfLayout_;
dnnPrimitive_t convert;
CHECK_EQ(dnnConversionCreate_F32(&convert, curLayout, targetLayout),
E_SUCCESS);
CHECK_EQ(dnnConversionExecute_F32(convert, input, output), E_SUCCESS);
CHECK_EQ(dnnDelete_F32(convert), E_SUCCESS);
}
// The following methods are used for serializing and de-serializing the
// contents of the mklshape object.
// The data is serialized in this order
// isMklTensor_
// dimension_
// sizes_
// strides_
// mklLayout_
// tfLayout_
// tf_to_mkl_dim_map_
#define SIZE_OF_MKL_DNN_BUF \
(dnnLayoutSerializationBufferSize_F32()) // Size of buffer needed to
// serialize dnn_layout pointer
// Size of buffer to hold the serialized object, the size is computed as
// follows sizeof(isMklTensor_) + sizeof(dimension_) + sizeof(sizes_) +
// sizeof(strides_)
// + sizeof(mklLayout_ buffer) + sizeof(tfLayout_ buffer)
// + sizeof(tf_to_mkl_dim_map_)
#define SIZE_OF_MKL_SERIAL_DATA(dims) \
(2 * sizeof(size_t) + 3 * dims * sizeof(size_t) + 2 * SIZE_OF_MKL_DNN_BUF)
// First we need to define some macro for offsets into the serial buffer where
// different elements of Mklshape is written/read from
#define IS_MKL_TENSOR_OFFSET 0
// Location from start of buffer where isMklTensor_ is serialized
#define DIMS_OFFSET \
(IS_MKL_TENSOR_OFFSET + sizeof(size_t)) // Location of dimension_
// Location of sizes. Note dim is not used here, left here
// to make macros consistent.
#define SIZES_OFFSET(dims) (DIMS_OFFSET + sizeof(size_t))
#define STRIDES_OFFSET(dims) \
(SIZES_OFFSET(dims) + dims * sizeof(size_t)) // Location of strides
#define MKL_LAYOUT_OFFSET(dims) \
(STRIDES_OFFSET(dims) + dims * sizeof(size_t)) // Location of mklLayout_
#define TF_LAYOUT_OFFSET(dims) \
(MKL_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF) // Location of tfLayout_
// Location of tf_to_mkl_dim_map_
#define TF_TO_MKL_DIM_MAP_OFFSET(dims) \
(TF_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF)
// TODO(agramesh1) make sure to create a const to share with rewrite pass
// for min size of MKL metadata tensor.
void DeSerializeMklShape(const unsigned char* buf, size_t buf_size) {
CHECK(buf_size >= sizeof(size_t)) << "Bufsize too small in DeSerialize";
// Make sure buffer holds at least isMklTensor_
isMklTensor_ =
*reinterpret_cast<const size_t*>(buf + IS_MKL_TENSOR_OFFSET) != 0;
if (isMklTensor_) { // If it is an MKL Tensor then read the rest
dimension_ = *(reinterpret_cast<const size_t*>(buf + DIMS_OFFSET));
CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_))
<< "Bufsize too small in DeSerialize";
sizes_ = new size_t[dimension_];
strides_ = new size_t[dimension_];
tf_to_mkl_dim_map_ = new size_t[dimension_];
for (int i = 0; i < dimension_; i++) {
sizes_[i] =
reinterpret_cast<const size_t*>(buf + SIZES_OFFSET(dimension_))[i];
strides_[i] = reinterpret_cast<const size_t*>(
buf + STRIDES_OFFSET(dimension_))[i];
tf_to_mkl_dim_map_[i] = reinterpret_cast<const size_t*>(
buf + TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i];
}
CHECK_EQ(dnnLayoutDeserialize_F32(&mklLayout_,
buf + MKL_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
CHECK_EQ(dnnLayoutDeserialize_F32(&tfLayout_,
buf + TF_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
}
}
void SerializeMklShape(unsigned char* buf, size_t buf_size) const {
CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_))
<< "Bufsize too small to Serialize";
*reinterpret_cast<size_t*>(buf + IS_MKL_TENSOR_OFFSET) =
isMklTensor_ ? 1 : 0;
if (isMklTensor_) {
*(reinterpret_cast<size_t*>(buf + DIMS_OFFSET)) = dimension_;
for (int i = 0; i < dimension_; i++) {
reinterpret_cast<size_t*>(buf + SIZES_OFFSET(dimension_))[i] =
sizes_[i];
reinterpret_cast<size_t*>(buf + STRIDES_OFFSET(dimension_))[i] =
strides_[i];
reinterpret_cast<size_t*>(buf +
TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i] =
tf_to_mkl_dim_map_[i];
}
CHECK_EQ(dnnLayoutSerialize_F32(mklLayout_,
buf + MKL_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
CHECK_EQ(
dnnLayoutSerialize_F32(tfLayout_, buf + TF_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
}
}
private:
bool isMklTensor_ =
false; // Flag to indicate if the tensor is an MKL tensor or not
dnnLayout_t mklLayout_ = nullptr; // Pointer to the MKL layout
dnnLayout_t tfLayout_ = nullptr; // Pointer to layout of corresponding
// Tensorflow tensor, used when conversion from MKL to standard tensor
size_t dimension_ = 0;
size_t* sizes_ = nullptr; // Required by MKL for conversions
size_t* strides_ = nullptr; // Required by MKL for conversions
size_t* tf_to_mkl_dim_map_ =
nullptr; // TF dimension corresponding to this MKL dimension
};
#else
// Forward decl
TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format);
memory::dims CalculateTFStrides(const memory::dims& dims_tf_order);
memory::desc CreateBlockedMemDescHelper(const memory::dims& dim,
const memory::dims& strides,
memory::data_type dtype);
class MklDnnShape {
private:
typedef struct {
/// Flag to indicate if the tensor is an MKL tensor or not
bool is_mkl_tensor_ = false;
/// Number of dimensions in Tensorflow format
size_t dimension_ = 0;
/// Required by MKLDNN for conversions
mkldnn_dims_t sizes_; // Required by MKL for conversions
memory::format tf_data_format_ = memory::format::format_undef;
memory::data_type T_ = memory::data_type::data_undef;
// MKL layout
mkldnn_memory_desc_t mkl_md_;
/// TF dimension corresponding to this MKL dimension
mkldnn_dims_t map_;
} MklShapeData;
MklShapeData data_;
typedef std::remove_extent<mkldnn_dims_t>::type mkldnn_dim_t;
#define INVALID_DIM_SIZE -1
public:
MklDnnShape() {
for (size_t i = 0; i < sizeof(data_.sizes_) / sizeof(data_.sizes_[0]);
++i) {
data_.sizes_[i] = -1;
}
for (size_t i = 0; i < sizeof(data_.map_) / sizeof(data_.map_[0]); ++i) {
data_.map_[i] = -1;
}
}
~MklDnnShape() {}
TF_DISALLOW_COPY_AND_ASSIGN(MklDnnShape); // Cannot copy
/// Helper function to compare memory::desc objects for MklDnn.
/// May be this should go into MklDnn directly.
inline bool CompareMklDnnLayouts(const memory::desc& md1,
const memory::desc& md2) const {
mkldnn_memory_desc_t mdd1 = md1.data;
mkldnn_memory_desc_t mdd2 = md2.data;
const char* d1 = reinterpret_cast<const char*>(&mdd1);
const char* d2 = reinterpret_cast<const char*>(&mdd2);
size_t md_size = sizeof(mdd1);
for (size_t i = 0; i < md_size; i++) {
if (*d1++ != *d2++) {
return false;
}
}
return true;
}
/// Equality function for MklDnnShape objects
/// @return true if both are equal; false otherwise.
inline bool operator==(const MklDnnShape& input_shape) const {
if (this->IsMklTensor() != input_shape.IsMklTensor()) {
return false;
}
// If input tensors are in Mkl layout, then we check for dimensions and
// sizes.
if (this->IsMklTensor()) {
return this->GetTfShape() == input_shape.GetTfShape() &&
CompareMklDnnLayouts(this->GetMklLayout(),
input_shape.GetMklLayout());
}
return true;
}
/// Equality operator for MklDnnShape and TFShape.
/// Returns: true if TF shapes for both are the same, false otherwise
inline bool operator==(const TensorShape& input_shape) const {
if (!this->IsMklTensor()) {
return false;
}
return this->GetTfShape() == input_shape;
}
inline const bool IsMklTensor() const { return data_.is_mkl_tensor_; }
inline void SetMklTensor(bool is_mkl_tensor) {
data_.is_mkl_tensor_ = is_mkl_tensor;
}
inline void SetDimensions(const size_t dimension) {
data_.dimension_ = dimension;
}
inline size_t GetDimension(char dimension) const {
int index = GetMklDnnTensorDimIndex(dimension);
CHECK(index >= 0 && index < this->GetDimension())
<< "Invalid index from the dimension: " << index << ", " << dimension;
return this->DimSize(index);
}
inline int32 GetMklDnnTensorDimIndex(char dimension) const {
switch (dimension) {
case 'N':
return MklDnnDims::Dim_N;
case 'C':
return MklDnnDims::Dim_C;
case 'H':
return MklDnnDims::Dim_H;
case 'W':
return MklDnnDims::Dim_W;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1; // Avoid compiler warning about missing return value
}
}
inline size_t GetDimension() const { return data_.dimension_; }
inline const int* GetSizes() const {
return reinterpret_cast<const int*>(&data_.sizes_[0]);
}
// Returns an mkldnn::memory::dims object that contains the sizes of this
// MklDnnShape object.
inline memory::dims GetSizesAsMklDnnDims() const {
memory::dims retVal;
if (data_.is_mkl_tensor_) {
size_t dimensions = sizeof(data_.sizes_) / sizeof(data_.sizes_[0]);
for (size_t i = 0; i < dimensions; i++) {
if (data_.sizes_[i] != INVALID_DIM_SIZE)
retVal.push_back(data_.sizes_[i]);
}
} else {
CHECK_EQ(data_.is_mkl_tensor_, true);
}
return retVal;
}
inline int64 DimSize(int index) const {
CHECK_LT(index, sizeof(data_.sizes_) / sizeof(data_.sizes_[0]));
return data_.sizes_[index];
}
/// Return TensorShape that describes the Tensorflow shape of the tensor
/// represented by this MklShape.
inline TensorShape GetTfShape() const {
CHECK_EQ(data_.is_mkl_tensor_, true);
std::vector<int32> shape(data_.dimension_, -1);
if (data_.tf_data_format_ != memory::format::blocked) {
for (size_t idx = 0; idx < data_.dimension_; ++idx) {
shape[idx] = data_.sizes_[TfDimIdx(idx)];
}
} else {
// If Tensorflow shape is in Blocked format, then we don't have dimension
// map for it. So we just create Tensorflow shape from sizes in the
// specified order.
for (size_t idx = 0; idx < data_.dimension_; ++idx) {
shape[idx] = data_.sizes_[idx];
}
}
TensorShape ts;
bool ret = TensorShapeUtils::MakeShape(shape, &ts).ok();
CHECK_EQ(ret, true);
return ts;
}
inline void SetElemType(memory::data_type dt) { data_.T_ = dt; }
inline const memory::data_type GetElemType() { return data_.T_; }
inline void SetMklLayout(memory::primitive_desc* pd) {
CHECK_NOTNULL(pd);
data_.mkl_md_ = pd->desc().data;
}
inline void SetMklLayout(memory::desc* md) {
CHECK_NOTNULL(md);
data_.mkl_md_ = md->data;
}
inline const memory::desc GetMklLayout() const {
return memory::desc(data_.mkl_md_);
}
inline memory::format GetTfDataFormat() const {
return data_.tf_data_format_;
}
/// We don't create primitive_descriptor for TensorFlow layout now.
/// We use lazy evaluation and create it only when needed. Input format can
/// also be Blocked format.
inline void SetTfLayout(size_t dims, const memory::dims& sizes,
memory::format format) {
CHECK_EQ(dims, sizes.size());
data_.dimension_ = dims;
for (size_t ii = 0; ii < dims; ii++) {
data_.sizes_[ii] = sizes[ii];
}
data_.tf_data_format_ = format;
if (format != memory::format::blocked) {
SetTfDimOrder(dims, format);
}
}
inline const memory::desc GetTfLayout() const {
memory::dims dims;
for (size_t ii = 0; ii < data_.dimension_; ii++) {
dims.push_back(data_.sizes_[ii]);
}
// Create Blocked memory desc if input TF format was set like that.
if (data_.tf_data_format_ == memory::format::blocked) {
auto strides = CalculateTFStrides(dims);
return CreateBlockedMemDescHelper(dims, strides, data_.T_);
} else {
return memory::desc(dims, data_.T_, data_.tf_data_format_);
}
}
inline const memory::desc GetCurLayout() const {
return IsMklTensor() ? GetMklLayout() : GetTfLayout();
}
// nhasabni - I've removed SetTfDimOrder that was setting default order in
// case of MKL-ML. We don't need a case of default dimension order because
// when an operator that does not get data_format attribute gets all inputs
// in Tensorflow format, it will produce output in Tensorflow format.
inline void SetTfDimOrder(const size_t dimension, const mkldnn_dims_t map) {
CHECK(dimension == data_.dimension_);
for (size_t ii = 0; ii < dimension; ii++) {
data_.map_[ii] = map[ii];
}
}
inline void SetTfDimOrder(const size_t dimension, TensorFormat data_format) {
// TODO(nhasabni): Why do we restrict this to 4D?
CHECK_EQ(dimension, 4);
CHECK(dimension == data_.dimension_);
data_.map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDnnDims::Dim_W;
data_.map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDnnDims::Dim_H;
data_.map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDnnDims::Dim_C;
data_.map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDnnDims::Dim_N;
}
inline void SetTfDimOrder(const size_t dimension, memory::format format) {
TensorFormat data_format = MklDnnDataFormatToTFDataFormat(format);
SetTfDimOrder(dimension, data_format);
}
inline const mkldnn_dim_t* GetTfToMklDimMap() const { return &data_.map_[0]; }
inline size_t TfDimIdx(int index) const { return data_.map_[index]; }
inline int64 TfDimSize(int index) const {
return data_.sizes_[TfDimIdx(index)];
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Channel dimension.
inline bool IsMklChannelDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_C;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Batch dimension.
inline bool IsMklBatchDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_N;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Width dimension.
inline bool IsMklWidthDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_W;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Height dimension.
inline bool IsMklHeightDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_H;
}
/// Check if the TF-Mkl dimension ordering map specifies if the input
/// tensor is in NCHW format.
inline bool IsTensorInNCHWFormat() const {
TensorFormat data_format = FORMAT_NCHW;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
/// Check if the TF-Mkl dimension ordering map specifies if the input
/// tensor is in NHWC format.
inline bool IsTensorInNHWCFormat() const {
TensorFormat data_format = FORMAT_NHWC;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
/// The following methods are used for serializing and de-serializing the
/// contents of the mklshape object.
/// The data is serialized in this order
/// is_mkl_tensor_ : dimension_ : sizes_ : map_: format_ : T_ : mkl_pd_;
/// Size of buffer to hold the serialized object, the size is computed by
/// following above mentioned order
inline size_t GetSerializeBufferSize() const { return sizeof(MklShapeData); }
void SerializeMklDnnShape(unsigned char* buf, size_t buf_size) const {
CHECK(buf_size >= GetSerializeBufferSize())
<< "Buffer size is too small to SerializeMklDnnShape";
*reinterpret_cast<MklShapeData*>(buf) = data_;
}
void DeSerializeMklDnnShape(const unsigned char* buf, size_t buf_size) {
// Make sure buffer holds at least is_mkl_tensor_.
CHECK(buf_size >= sizeof(data_.is_mkl_tensor_))
<< "Buffer size is too small in DeSerializeMklDnnShape";
const bool is_mkl_tensor = *reinterpret_cast<const bool*>(buf);
if (is_mkl_tensor) { // If it is an MKL Tensor then read the rest
CHECK(buf_size >= GetSerializeBufferSize())
<< "Buffer size is too small in DeSerializeMklDnnShape";
data_ = *reinterpret_cast<const MklShapeData*>(buf);
}
}
};
#endif
// List of MklShape objects. Used in Concat/Split layers.
#ifndef INTEL_MKL_ML
typedef std::vector<MklDnnShape> MklDnnShapeList;
#else
typedef std::vector<MklShape> MklShapeList;
#endif
#ifdef INTEL_MKL_ML
// Check if all tensors specified by MklShapes are MKL tensors.
inline bool AreAllMklTensors(const MklShapeList& shapes) {
for (auto& s : shapes) {
if (!s.IsMklTensor()) {
return false;
}
}
return true;
}
template <typename T>
inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor,
const MklShape& mkl_shape) {
Tensor output_tensor;
TensorShape output_shape;
for (size_t j = 0; j < mkl_shape.GetDimension(); j++) {
// Outermost to innermost dimension
output_shape.AddDim(mkl_shape.GetSizes()[mkl_shape.tf_dim_idx(j)]);
}
// Allocate output tensor.
context->allocate_temp(DataTypeToEnum<T>::v(), output_shape, &output_tensor);
dnnLayout_t output_layout = static_cast<dnnLayout_t>(mkl_shape.GetTfLayout());
void* input_buffer = const_cast<T*>(mkl_tensor.flat<T>().data());
void* output_buffer = const_cast<T*>(output_tensor.flat<T>().data());
if (mkl_tensor.NumElements() != 0) {
mkl_shape.GetConvertedFlatData(output_layout, input_buffer, output_buffer);
}
return output_tensor;
}
#else
using mkldnn::stream;
template <typename T> class MklDnnData;
template <typename T>
inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor,
const MklDnnShape& mkl_shape) {
Tensor output_tensor;
try {
if (!mkl_shape.IsMklTensor())
return mkl_tensor; // return input since it is already TF tensor
TensorShape output_shape = mkl_shape.GetTfShape();;
// Allocate output tensor.
context->allocate_temp(DataTypeToEnum<T>::v(),
output_shape, &output_tensor);
auto cpu_engine = engine(engine::cpu, 0);
MklDnnData<T> input(&cpu_engine);
// Get Mkl layout of input tensor.
auto input_mkl_md = mkl_shape.GetMklLayout();
auto output_tf_md = mkl_shape.GetTfLayout();
auto output_tf_pd = memory::primitive_desc(output_tf_md, cpu_engine);
input.SetUsrMem(input_mkl_md, &mkl_tensor);
// reorder
if (input.IsReorderNeeded(output_tf_pd)) {
std::vector<primitive> net;
CHECK_EQ(input.CheckReorderToOpMem(output_tf_pd, &output_tensor, &net),
true);
stream(stream::kind::eager).submit(net).wait();
} else {
// If not, just forward input tensor to output tensor.
CHECK(output_tensor.CopyFrom(mkl_tensor, output_shape));
}
} catch (mkldnn::error& e) {
string error_msg = "Status: " + std::to_string(e.status) +
", message: " + string(e.message) + ", in file " +
string(__FILE__) + ":" + std::to_string(__LINE__);
LOG(FATAL) << "Operation received an exception: " << error_msg;
}
return output_tensor;
}
#endif
// Get the MKL shape from the second string tensor
#ifdef INTEL_MKL_ML
inline void GetMklShape(OpKernelContext* ctext, int n, MklShape* mklshape) {
mklshape->DeSerializeMklShape(
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.data(),
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.size() *
sizeof(uint8));
}
#else
inline void GetMklShape(OpKernelContext* ctext, int n, MklDnnShape* mklshape) {
mklshape->DeSerializeMklDnnShape(
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.data(),
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.size() *
sizeof(uint8));
}
#endif
// Gets the actual input
inline const Tensor& MklGetInput(OpKernelContext* ctext, int n) {
return ctext->input(GetTensorDataIndex(n, ctext->num_inputs()));
}
inline void GetMklInputList(OpKernelContext* ctext, StringPiece name,
OpInputList* input_tensors) {
CHECK_NOTNULL(input_tensors);
ctext->input_list(name, input_tensors);
}
#ifdef INTEL_MKL_ML
inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name,
MklShapeList* mkl_shapes) {
OpInputList input_mkl_tensors;
GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors);
for (int i = 0; i < input_mkl_tensors.size(); i++) {
(*mkl_shapes)[i].DeSerializeMklShape(
input_mkl_tensors[i].flat<uint8>().data(),
input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8));
}
}
#else
inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name,
MklDnnShapeList* mkl_shapes) {
OpInputList input_mkl_tensors;
GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors);
for (int i = 0; i < input_mkl_tensors.size(); i++) {
(*mkl_shapes)[i].DeSerializeMklDnnShape(
input_mkl_tensors[i].flat<uint8>().data(),
input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8));
}
}
#endif
#ifndef INTEL_MKL_ML
/// Get shape of input tensor pointed by 'input_idx' in TensorShape format.
/// If the input tensor is in MKL layout, then obtains TensorShape from
/// MklShape.
inline TensorShape GetTfShape(OpKernelContext* context, size_t input_idx) {
// Sanity check.
CHECK_NOTNULL(context);
CHECK_LT(input_idx, context->num_inputs());
MklDnnShape input_mkl_shape;
GetMklShape(context, input_idx, &input_mkl_shape);
if (input_mkl_shape.IsMklTensor()) {
return input_mkl_shape.GetTfShape();
} else {
const Tensor& t = MklGetInput(context, input_idx);
return t.shape();
}
}
#endif
#ifdef INTEL_MKL_ML
// Allocate the second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
const MklShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension()));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#else
// Allocate the second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
const MklDnnShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(mkl_shape.GetSerializeBufferSize());
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklDnnShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#endif
#ifdef INTEL_MKL_ML
// Allocate the output tensor, create a second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
Tensor** output,
const TensorShape& tf_shape,
const MklShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension()));
OP_REQUIRES_OK(
ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()),
tf_shape, output));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#else
// Allocate the output tensor, create a second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
Tensor** output,
const TensorShape& tf_shape,
const MklDnnShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(mkl_shape.GetSerializeBufferSize());
OP_REQUIRES_OK(
ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()),
tf_shape, output));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklDnnShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#endif
// Allocates a temp tensor and returns the data buffer for temporary storage.
// Currently
#ifndef INTEL_MKL_ML
template <typename T>
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
const memory::primitive_desc& pd, void** buf_out) {
TensorShape tf_shape;
tf_shape.AddDim(pd.get_size() / sizeof(T) + 1);
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(),
tf_shape, tensor_out));
*buf_out = static_cast<void*>(tensor_out->flat<T>().data());
}
#else
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
dnnLayout_t lt_buff, void** buf_out) {
TensorShape tf_shape;
tf_shape.AddDim(
dnnLayoutGetMemorySize_F32(static_cast<dnnLayout_t>(lt_buff)) /
sizeof(float) +
1);
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<float>::v(),
tf_shape, tensor_out));
*buf_out = static_cast<void*>(tensor_out->flat<float>().data());
}
#endif
template <typename T>
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
TensorShape tf_shape) {
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(),
tf_shape, tensor_out));
}
inline void GetStridesFromSizes(TensorFormat data_format, size_t* strides,
const size_t* sizes) {
// MKL requires strides in NCHW
if (data_format == FORMAT_NHWC) {
strides[0] = sizes[2];
strides[1] = sizes[0] * sizes[2];
strides[2] = 1;
strides[3] = sizes[0] * sizes[1] * sizes[2];
} else {
strides[0] = 1;
strides[1] = sizes[0];
strides[2] = sizes[0] * sizes[1];
strides[3] = sizes[0] * sizes[1] * sizes[2];
}
}
#ifdef INTEL_MKL_ML
inline void MklSizesToTFSizes(OpKernelContext* context,
TensorFormat data_format_,
const MklShape& mkl_shape,
TensorShape* tf_shape) {
size_t tf_dim = mkl_shape.GetDimension();
const size_t* tf_sizes = mkl_shape.GetSizes();
OP_REQUIRES(context, tf_dim == 4,
errors::InvalidArgument("MKLSizesToTFSizes: size must be 4-dim"));
std::vector<int32> sizes;
sizes.push_back(tf_sizes[3]);
if (data_format_ == FORMAT_NHWC) {
sizes.push_back(tf_sizes[1]);
sizes.push_back(tf_sizes[0]);
sizes.push_back(tf_sizes[2]);
} else {
sizes.push_back(tf_sizes[2]);
sizes.push_back(tf_sizes[1]);
sizes.push_back(tf_sizes[0]);
}
OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(sizes, tf_shape));
}
#endif
inline int32 GetMklTensorDimIndex(char dimension) {
switch (dimension) {
case 'N':
return MklDims::N;
case 'C':
return MklDims::C;
case 'H':
return MklDims::H;
case 'W':
return MklDims::W;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1; // Avoid compiler warning about missing return value
}
}
#ifdef INTEL_MKL_ML
inline int64 GetMklTensorDim(const MklShape& mkl_shape, char dimension) {
int index = GetMklTensorDimIndex(dimension);
CHECK(index >= 0 && index < mkl_shape.GetDimension())
<< "Invalid index from the dimension: " << index << ", " << dimension;
return mkl_shape.dim_size(index);
}
#endif
inline void CopyMklTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
const Tensor& meta = context->input(idx_meta_in);
Tensor output(data.dtype());
Tensor meta_output(meta.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, data.shape()));
CHECK(meta_output.CopyFrom(meta, meta.shape()));
context->set_output(idx_data_out, output);
context->set_output(idx_meta_out, meta_output);
}
#ifdef INTEL_MKL_ML
inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in,
int idx_out,
const TensorShape& shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
Tensor output(data.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, shape));
context->set_output(idx_data_out, output);
}
#else
inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in,
int idx_out,
const TensorShape& shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
MklDnnShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
Tensor output(data.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, shape));
context->set_output(idx_data_out, output);
}
#endif
#ifdef INTEL_MKL_ML
inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#else
inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
MklDnnShape dnn_shape_output;
dnn_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, dnn_shape_output);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#endif
inline void ForwardMklTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
context->set_output(idx_meta_out, context->input(idx_meta_in));
}
}
#ifndef INTEL_MKL_ML
// Set a dummy MKLDNN shape (called when the output is in TF format)
inline void SetDummyMklDnnShapeOutput(OpKernelContext* context,
uint32 idx_data_out) {
MklDnnShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output);
}
inline void ForwardMklTensorInToOutWithMklShape(OpKernelContext* context,
int idx_in, int idx_out,
const MklDnnShape& mkl_shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
AllocateOutputSetMklShape(context, idx_out, mkl_shape);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#endif
// Forward the MKL shape ONLY (used in elementwise and other ops where
// we call the eigen implementation and MKL shape is not used)
inline void ForwardMklMetaDataInToOut(OpKernelContext* context,
uint32 idx_data_in,
uint32_t idx_data_out) {
uint32 idx_meta_in =
GetTensorMetaDataIndex(idx_data_in, context->num_inputs());
uint32 idx_meta_out =
GetTensorMetaDataIndex(idx_data_out, context->num_outputs());
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out);
} else {
context->set_output(idx_meta_out, context->input(idx_meta_in));
}
}
#ifdef INTEL_MKL_ML
// Set a dummy MKL shape (called when the output is in TF format)
inline void SetDummyMklShapeOutput(OpKernelContext* context,
uint32 idx_data_out) {
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output);
}
// We don't need these functions in MKLDNN. We have defined equality operator
// on MklDnnShape class directly.
// Checks if the TF shape for both MKL tensors is the same or not
// Returns: true if both TF shapes are the same, false otherwise
inline bool MklCompareShapes(const MklShape* input_shape_0,
const MklShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->GetDimension() != input_shape_1->GetDimension()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->GetDimension();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const MklShape* input_shape_0,
const TensorShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->GetDimension() != input_shape_1->dims()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->GetDimension();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->tf_dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const TensorShape* input_shape_0,
const MklShape* input_shape_1) {
return MklCompareShapes(input_shape_1, input_shape_0);
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const TensorShape* input_shape_0,
const TensorShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->dims() != input_shape_1->dims()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->dims();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
// These functions do not compile with MKL-DNN since mkl.h is missing.
// We may need to remove them later.
// TODO(intel_tf): Remove this routine when faster MKL layout conversion is
// out.
inline void MklNHWCToNCHW(const Tensor& input, Tensor** output) {
const float* buf_in = input.flat<float>().data();
float* buf_out = (*output)->flat<float>().data();
int64 N = input.dim_size(0);
int64 H = input.dim_size(1);
int64 W = input.dim_size(2);
int64 C = input.dim_size(3);
int64 stride_n = H * W * C;
#pragma omp parallel for num_threads(16)
for (int64 n = 0; n < N; ++n) {
mkl_somatcopy('R', 'T', H * W, C, 1, buf_in + n * stride_n, C,
buf_out + n * stride_n, H * W);
}
}
inline void MklNCHWToNHWC(const Tensor& input, Tensor** output) {
const float* buf_in = input.flat<float>().data();
float* buf_out = (*output)->flat<float>().data();
int64 N = (*output)->dim_size(0);
int64 H = (*output)->dim_size(1);
int64 W = (*output)->dim_size(2);
int64 C = (*output)->dim_size(3);
int64 stride_n = H * W * C;
#pragma omp parallel for num_threads(16)
for (int64 n = 0; n < N; ++n) {
mkl_somatcopy('R', 'T', C, H * W, 1, buf_in + n * stride_n, H * W,
buf_out + n * stride_n, C);
}
}
#endif
// -------------------------------------------------------------------
#ifndef INTEL_MKL_ML
/// Return MKL-DNN data type (memory::data_type) for input type T
///
/// @input None
/// @return memory::data_type corresponding to type T
template <typename T>
static memory::data_type MklDnnType();
/// Instantiation for float type. Add similar instantiations for other
/// type if needed.
template <>
memory::data_type MklDnnType<float>() {
return memory::data_type::f32;
}
/// Map TensorFlow's data format into MKL-DNN data format
///
/// @input: TensorFlow data format
/// @return: memory::format corresponding to TensorFlow data format;
/// Fails with an error if invalid data format.
inline memory::format TFDataFormatToMklDnnDataFormat(TensorFormat format) {
if (format == FORMAT_NHWC)
return memory::format::nhwc;
else if (format == FORMAT_NCHW)
return memory::format::nchw;
TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format"));
// Return to get rid of compiler warning
return memory::format::format_undef;
}
/// Map MKL-DNN data format to TensorFlow's data format
///
/// @input: memory::format
/// @return: Tensorflow data format corresponding to memory::format
/// Fails with an error if invalid data format.
inline TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format) {
if (format == memory::format::nhwc)
return FORMAT_NHWC;
else if (format == memory::format::nchw)
return FORMAT_NCHW;
TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format"));
// Return to prevent compiler warnings, otherwise TF_CHECK_OK will ensure
// that we don't come here.
return FORMAT_NHWC;
}
/// Map TensorShape object into memory::dims required by MKL-DNN
///
/// This function will simply map input TensorShape into MKL-DNN dims
/// naively. So it will preserve the order of dimensions. E.g., if
/// input tensor is in NHWC format, then dims will be in NHWC format
/// also.
///
/// @input TensorShape object in shape
/// @return memory::dims corresponding to TensorShape
inline memory::dims TFShapeToMklDnnDims(const TensorShape& shape) {
memory::dims dims(shape.dims());
for (int d = 0; d < shape.dims(); ++d) {
dims[d] = shape.dim_size(d);
}
return dims;
}
/// Map TensorShape object into memory::dims in NCHW format required by MKL-DNN
///
/// This function is a specific one than above function. It will map input
/// TensorShape into MKL-DNN dims in NCHW format. So it may not preserve the
/// order of dimensions. E.g., if input tensor is in NHWC format, then dims
/// will be in NCHW format, and not in NHWC format.
///
/// @input TensorShape object in shape
/// @return memory::dims in MKL-DNN required NCHW format
inline memory::dims TFShapeToMklDnnDimsInNCHW(const TensorShape& shape,
TensorFormat format) {
// Check validity of format.
CHECK_NE(TFDataFormatToMklDnnDataFormat(format),
memory::format::format_undef);
int n = shape.dim_size(GetTensorDimIndex(format, 'N'));
int c = shape.dim_size(GetTensorDimIndex(format, 'C'));
int h = shape.dim_size(GetTensorDimIndex(format, 'H'));
int w = shape.dim_size(GetTensorDimIndex(format, 'W'));
// MKL-DNN requires dimensions in NCHW format.
return memory::dims({n, c, h, w});
}
/// Overloaded version of function above. Input parameters are
/// self-explanatory.
inline memory::dims MklDnnDimsInNCHW(const memory::dims& in_dims,
TensorFormat format) {
// Check validity of format.
CHECK_NE(TFDataFormatToMklDnnDataFormat(format),
memory::format::format_undef);
int n = in_dims[GetTensorDimIndex(format, 'N')];
int c = in_dims[GetTensorDimIndex(format, 'C')];
int h = in_dims[GetTensorDimIndex(format, 'H')];
int w = in_dims[GetTensorDimIndex(format, 'W')];
// MKL-DNN requires dimensions in NCHW format.
return memory::dims({n, c, h, w});
}
/// Map MklDnn memory::dims object into TensorShape object.
///
/// This function will simply map input shape in MKL-DNN memory::dims format
/// in Tensorflow's TensorShape object by preserving dimension order.
///
/// @input MKL-DNN memory::dims object
/// @output TensorShape corresponding to memory::dims
inline TensorShape MklDnnDimsToTFShape(const memory::dims& dims) {
std::vector<int32> shape(dims.size(), -1);
for (int d = 0; d < dims.size(); d++) {
shape[d] = dims[d];
}
TensorShape ret;
CHECK_EQ(TensorShapeUtils::MakeShape(shape, &ret).ok(), true);
return ret;
}
/// Function to calculate strides given tensor shape in Tensorflow order
/// E.g., if dims_tf_order is {1, 2, 3, 4}, then as per Tensorflow convention,
/// dimesion with size 1 is outermost dimension; while dimension with size 4 is
/// innermost dimension. So strides for this tensor would be {4 * 3 * 2,
/// 4 * 3, 4, 1}, i.e., {24, 12, 4, 1}.
///
/// @input Tensorflow shape in memory::dims type
/// @return memory::dims containing strides for the tensor.
inline memory::dims CalculateTFStrides(const memory::dims& dims_tf_order) {
CHECK_GT(dims_tf_order.size(), 0);
memory::dims strides(dims_tf_order.size());
int last_dim_idx = dims_tf_order.size() - 1;
strides[last_dim_idx] = 1;
for (int d = last_dim_idx - 1; d >= 0; d--) {
strides[d] = strides[d + 1] * dims_tf_order[d + 1];
}
return strides;
}
inline padding_kind TFPaddingToMklDnnPadding(Padding pad) {
// MKL-DNN only supports zero padding.
return padding_kind::zero;
}
/// Helper function to create memory descriptor in Blocked format
///
/// @input: Tensor dimensions
/// @input: strides corresponding to dimensions. One can use utility
/// function such as CalculateTFStrides to compute strides
/// for given dimensions.
/// @return: memory::desc object corresponding to blocked memory format
/// for given dimensions and strides.
inline memory::desc CreateBlockedMemDescHelper(const memory::dims& dim,
const memory::dims& strides,
memory::data_type dtype) {
CHECK_EQ(dim.size(), strides.size());
// We have to construct memory descriptor in a C style. This is not at all
// ideal but MKLDNN does not offer any API to construct descriptor in
// blocked format except a copy constructor that accepts
// mkldnn_memory_desc_t.
mkldnn_memory_desc_t md;
md.primitive_kind = mkldnn_memory;
md.ndims = dim.size();
md.format = mkldnn_blocked;
md.data_type = memory::convert_to_c(dtype);
for (size_t i = 0; i < dim.size(); i++) {
md.layout_desc.blocking.block_dims[i] = 1;
md.layout_desc.blocking.strides[1][i] = 1;
md.layout_desc.blocking.strides[0][i] = strides[i];
md.layout_desc.blocking.padding_dims[i] = dim[i];
md.layout_desc.blocking.offset_padding_to_data[i] = 0;
md.dims[i] = dim[i];
}
md.layout_desc.blocking.offset_padding = 0;
return memory::desc(md);
}
/*
* Class to represent all the resources corresponding to a tensor in TensorFlow
* that are required to execute an operation (such as Convolution).
*/
template <typename T>
class MklDnnData {
private:
/// MKL-DNN memory primitive for input user memory
memory* user_memory_;
/// MKL-DNN memory primitive in case input or output reorder is needed.
memory* reorder_memory_;
/// Operations memory descriptor
memory::desc* op_md_;
/// CPU engine on which operation will be executed
const engine* cpu_engine_;
public:
explicit MklDnnData(const engine* e)
: user_memory_(nullptr),
reorder_memory_(nullptr),
op_md_(nullptr),
cpu_engine_(e) {}
~MklDnnData() {
cpu_engine_ = nullptr; // We don't own this.
delete (user_memory_);
delete (reorder_memory_);
delete (op_md_);
}
inline void* GetTensorBuffer(const Tensor* tensor) const {
CHECK_NOTNULL(tensor);
return const_cast<void*>(
static_cast<const void*>(tensor->flat<T>().data()));
}
/// Set user memory primitive using specified dimensions, memory format and
/// data_buffer. Function automatically uses element data type by using
/// input type T used for creating call object.
///
/// In a nutshell, function allows user to describe the input tensor to
/// an operation. E.g., filter of Conv2D is of shape {1, 2, 3, 4}, and
/// memory format HWIO, and the buffer that contains actual values is
/// pointed by data_buffer.
inline void SetUsrMem(const memory::dims& dim, memory::format fm,
void* data_buffer = nullptr) {
auto md = memory::desc(dim, MklDnnType<T>(), fm);
SetUsrMem(md, data_buffer);
}
inline void SetUsrMem(const memory::dims& dim, memory::format fm,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(dim, fm, GetTensorBuffer(tensor));
}
/// Helper function to create memory descriptor in Blocked format
///
/// @input: Tensor dimensions
/// @input: strides corresponding to dimensions. One can use utility
/// function such as CalculateTFStrides to compute strides
/// for given dimensions.
/// @return: memory::desc object corresponding to blocked memory format
/// for given dimensions and strides.
static inline memory::desc CreateBlockedMemDesc(const memory::dims& dim,
const memory::dims& strides) {
return CreateBlockedMemDescHelper(dim, strides, MklDnnType<T>());
}
/// A version of SetUsrMem call that allows user to create memory in blocked
/// format. So in addition to accepting dimensions, it also accepts strides.
/// This allows user to create memory for tensor in a format that is not
/// supported by MKLDNN. E.g., MKLDNN does not support tensor format for 6
/// dimensional tensor as a native format. But by using blocked format, a user
/// can create memory for 6D tensor.
inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides,
void* data_buffer = nullptr) {
CHECK_EQ(dim.size(), strides.size());
auto blocked_md = MklDnnData<T>::CreateBlockedMemDesc(dim, strides);
SetUsrMem(blocked_md, data_buffer);
}
inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(dim, strides, GetTensorBuffer(tensor));
}
/// A version of function to set user memory primitive that accepts memory
/// descriptor directly, instead of accepting dimensions and format. This
/// function is more generic that the one above, but the function above is
/// sufficient in most cases.
inline void SetUsrMem(const memory::desc& md, void* data_buffer = nullptr) {
auto pd = memory::primitive_desc(md, *cpu_engine_);
SetUsrMem(pd, data_buffer);
}
/// A version of SetUsrMem with memory descriptor and tensor
inline void SetUsrMem(const memory::desc& md, const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(md, GetTensorBuffer(tensor));
}
/// A version of function to set user memory primitive that accepts primitive
/// descriptor directly, instead of accepting dimensions and format. This
/// function is more generic that the one above, but the function above is
/// sufficient in most cases.
inline void SetUsrMem(const memory::primitive_desc& pd,
void* data_buffer = nullptr) {
CHECK_NOTNULL(cpu_engine_);
// TODO(nhasabni): can we remove dynamic memory allocation?
if (data_buffer) {
user_memory_ = new memory(pd, data_buffer);
} else {
user_memory_ = new memory(pd);
}
}
/// A version of SetUsrMem with primitive descriptor and tensor
inline void SetUsrMem(const memory::primitive_desc& pd,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(pd, GetTensorBuffer(tensor));
}
/// Get function for user memory primitive.
inline const memory* GetUsrMem() const { return user_memory_; }
/// Get function for primitive descriptor of user memory primitive.
inline const memory::primitive_desc GetUsrMemPrimDesc() const {
CHECK_NOTNULL(user_memory_);
return user_memory_->get_primitive_desc();
}
/// Get function for descriptor of user memory.
inline memory::desc GetUsrMemDesc() {
// This is ugly. Why MKL-DNN does not provide desc() method of const type??
const memory::primitive_desc pd = GetUsrMemPrimDesc();
return const_cast<memory::primitive_desc*>(&pd)->desc();
}
/// Get function for data buffer of user memory primitive.
inline void* GetUsrMemDataHandle() const {
CHECK_NOTNULL(user_memory_);
return user_memory_->get_data_handle();
}
/// Set function for data buffer of user memory primitive.
inline void SetUsrMemDataHandle(void* data_buffer) {
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(data_buffer);
user_memory_->set_data_handle(data_buffer);
}
/// Set function for data buffer of user memory primitive.
inline void SetUsrMemDataHandle(const Tensor* tensor) {
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(tensor);
user_memory_->set_data_handle(GetTensorBuffer(tensor));
}
/// Get the memory primitive for input and output of an op. If inputs
/// to an op require reorders, then this function returns memory primitive
/// for reorder. Otherwise, it will return memory primitive for user memory.
///
/// E.g., Conv2D(I, F) is a primitive with I and F being inputs. Then to
/// execute Conv2D, we need memory primitive for I and F. Buf if reorder is
/// required for I and F (say I_r is reorder primitive for I; F_r is reorder
/// primitive for F), then we need I_r and F_r to perform Conv2D.
inline const memory& GetOpMem() const {
return reorder_memory_ ? *reorder_memory_ : *user_memory_;
}
/// Set memory descriptor of an operation in terms of dimensions and memory
/// format. E.g., For Conv2D, the dimensions would be same as user dimensions
/// but memory::format would be mkldnn::any because we want MKL-DNN to choose
/// best layout/format for given input dimensions.
inline void SetOpMemDesc(const memory::dims& dim, memory::format fm) {
// TODO(nhasabni): can we remove dynamic memory allocation?
op_md_ = new memory::desc(dim, MklDnnType<T>(), fm);
}
/// Get function for memory descriptor for an operation
inline const memory::desc& GetOpMemDesc() const { return *op_md_; }
/// Predicate that checks if we need to reorder user's memory into memory
/// pointed by op_pd.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool IsReorderNeeded(const memory::primitive_desc& op_pd) const {
CHECK_NOTNULL(user_memory_);
return op_pd != user_memory_->get_primitive_desc();
}
/// Predicate that checks if we need to reorder user's memory into memory
/// based on the provided format.
///
/// @input: target_format - memory format of the given input of an
/// operation
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool IsReorderNeeded(const memory::format& target_format) const {
CHECK_NOTNULL(user_memory_);
return target_format !=
user_memory_->get_primitive_desc().desc().data.format;
}
/// Function to create a reorder from memory pointed by from to memory pointed
/// by to. Returns created primitive.
inline primitive CreateReorder(const memory* from, const memory* to) const {
CHECK_NOTNULL(from);
CHECK_NOTNULL(to);
return reorder(*from, *to);
}
/// Function to handle input reordering
///
/// Check if we need to reorder this input of an operation.
/// Return true and allocate reorder memory primitive if reorder is needed.
/// Otherwise, return false and do not allocate reorder memory primitive.
///
/// To check if reorder is needed, this function compares memory primitive
/// descriptor of an operation (op_pd) for the given input with the
/// user-specified memory primitive descriptor.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd);
net->push_back(CreateReorder(user_memory_, reorder_memory_));
return true;
}
return false;
}
/// Overloaded version of above function that accepts memory buffer
/// where output of reorder needs to be stored.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @reorder_data_handle - memory buffer where output of reorder needs to be
/// stored. Primitive does not check if buffer is
/// enough size to write.
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
void* reorder_data_handle,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(reorder_data_handle);
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd, reorder_data_handle);
net->push_back(CreateReorder(user_memory_, reorder_memory_));
return true;
}
return false;
}
/// Another overloaded version of CheckReorderToOpMem that accepts Tensor
/// where output of reorder needs to be stored.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @reorder_tensor - Tensor whose buffer is to be used to store output of
/// reorder. Primitive does not check if buffer is
/// enough size to write.
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
Tensor* reorder_tensor,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(reorder_tensor);
return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor), net);
}
/// Function to handle output reorder
///
/// This function performs very similar functionality as input reordering
/// function above. The only difference is that this function does not add
/// reorder primitive to the net. The reason for this is: the reorder
/// primitive for output needs to be added to the list only after operation
/// has executed. But we need to prepare a temporary buffer in case output
/// reorder is needed. And this temporary buffer will hold the output of
/// an operation before it is fed to reorder primitive.
///
/// @input memory primitive descriptor for the given output of an operation
/// @return: true in case reorder of output is needed; false, otherwise.
inline bool PrepareReorderToUserMemIfReq(
const memory::primitive_desc& op_pd) {
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd);
return true;
}
return false;
}
/// Function to actually insert reorder primitive in the net
///
/// This function completes remaining part of output reordering. It inserts
/// a reordering primitive from the temporary buffer that holds the output
/// to the user-specified output buffer.
///
/// @input: net - net to which to add reorder primitive
inline void InsertReorderToUserMem(std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(reorder_memory_);
net->push_back(CreateReorder(reorder_memory_, user_memory_));
}
};
/// Base class for operations with reuse of DNN primitives
///
class DnnOp {
public:
virtual ~DnnOp() {}
// Dummy data. Its size, hard-coded as 256 here, does
// not matter since MKL should never operate on this buffer.
unsigned char DummyData[256];
};
const mkldnn::memory::dims NONE_DIMS = {};
// This constant is used to declare dummy buffer (size), for MKL primitives
template <typename T>
class DnnOpFactory {
public:
DnnOpFactory() {}
~DnnOpFactory() {}
DnnOp* GetOp(const std::string& key) {
auto stream_iter = DnnOpFactory<T>::GetHashMap().find(key);
if (stream_iter == DnnOpFactory<T>::GetHashMap().end()) {
return nullptr;
} else {
return stream_iter->second;
}
}
void SetOp(const std::string& key, DnnOp* op) {
auto stream_iter = DnnOpFactory<T>::GetHashMap().find(key);
CHECK(stream_iter == DnnOpFactory<T>::GetHashMap().end());
DnnOpFactory<T>::GetHashMap()[key] = op;
}
private:
static inline std::unordered_map<std::string, DnnOp*> &GetHashMap() {
static thread_local std::unordered_map<std::string, DnnOp*> map_;
return map_;
}
};
// utility class for creating keys of MKL primitive pool.
class FactoryKeyCreator {
public:
FactoryKeyCreator() {
key_.reserve(kMaxKeyLength);
}
~FactoryKeyCreator() {}
void AddAsKey(const string& str) { Append(str); }
void AddAsKey(const mkldnn::memory::dims &dims) {
for (unsigned int i = 0; i < dims.size(); i++) {
AddAsKey<int>(dims[i]);
}
}
template <typename T>
void AddAsKey(const T data) {
auto buffer = reinterpret_cast<const char *>(&data);
Append(StringPiece(buffer, sizeof(T)));
}
std::string GetKey() {
return key_;
}
private:
string key_;
const char delimiter = 'x';
const int kMaxKeyLength = 256;
void Append(StringPiece s) {
key_.append(s.ToString());
key_.append(1, delimiter);
}
};
#endif // INTEL_MKL_DNN
} // namespace tensorflow
#endif // INTEL_MKL
#endif // TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
|
quickOMP.c | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <omp.h>
const int MAX_STRING = 20;
int pivot(int *unarray, int izq, int der);
void Quicksort(int *unarray, int izq, int der);
int main(int argc, char *argv[]){
int arreglo[]={1,20,3,56,23,28,14,94,24,19,82,76,59,43,78,64,86,80,94,30};
int i;
int numeroDeHilos=strtol(argv[1],NULL,10);
for(i=0;i<MAX_STRING;i++){
printf("\nSin orden ");
printf("%d ",arreglo[i]);
}
#pragma omp parallel num_threads(numeroDeHilos)
{
#pragma omp parallel for
for(i=0;i<20;i++){
Quicksort(arreglo,i,19);
}
}
for(i=0;i<MAX_STRING;i++){
printf("\nOrden ");
printf("%d ",arreglo[i]);
}
}
int pivot(int *unarray, int izq, int der)
{
int i;
int pivote, valor_pivote;
int aux;
pivote = izq;
valor_pivote = unarray[pivote];
for (i=izq+1; i<=der; i++){
if (unarray[i] < valor_pivote){
pivote++;
aux=unarray[i];
unarray[i]=unarray[pivote];
unarray[pivote]=aux;
}
}
aux=unarray[izq];
unarray[izq]=unarray[pivote];
unarray[pivote]=aux;
return pivote;
}
void Quicksort(int *unarray, int izq, int der)
{
int pivote;
if(izq < der){
pivote=pivot(unarray, izq, der);
Quicksort(unarray, izq, pivote-1);
Quicksort(unarray, pivote+1, der);
}
} |
c-tree.h | /* Definitions for C parsing and type checking.
Copyright (C) 1987-2014 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_C_TREE_H
#define GCC_C_TREE_H
#include "c-family/c-common.h"
#include "diagnostic.h"
/* struct lang_identifier is private to c-decl.c, but langhooks.c needs to
know how big it is. This is sanity-checked in c-decl.c. */
#define C_SIZEOF_STRUCT_LANG_IDENTIFIER \
(sizeof (struct c_common_identifier) + 3 * sizeof (void *))
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */
#define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE
nonzero if the definition of the type has already started. */
#define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE)
/* In an incomplete RECORD_TYPE or UNION_TYPE, a list of variable
declarations whose type would be completed by completing that type. */
#define C_TYPE_INCOMPLETE_VARS(TYPE) TYPE_VFIELD (TYPE)
/* In an IDENTIFIER_NODE, nonzero if this identifier is actually a
keyword. C_RID_CODE (node) is then the RID_* value of the keyword,
and C_RID_YYCODE is the token number wanted by Yacc. */
#define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID)
/* Record whether a type or decl was written with nonconstant size.
Note that TYPE_SIZE may have simplified to a constant. */
#define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE)
#define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE)
/* Record whether a type is defined inside a struct or union type.
This is used for -Wc++-compat. */
#define C_TYPE_DEFINED_IN_STRUCT(TYPE) TYPE_LANG_FLAG_2 (TYPE)
/* Record whether a typedef for type `int' was actually `signed int'. */
#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was defined without an explicit
return type. */
#define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */
#define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP)
/* For FUNCTION_DECLs, evaluates true if the decl is built-in but has
been declared. */
#define C_DECL_DECLARED_BUILTIN(EXP) \
DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP))
/* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a
built-in prototype and does not have a non-built-in prototype. */
#define C_DECL_BUILTIN_PROTOTYPE(EXP) \
DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a decl was declared register. This is strictly a
front-end flag, whereas DECL_REGISTER is used for code generation;
they may differ for structures with volatile fields. */
#define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP)
/* Record whether a decl was used in an expression anywhere except an
unevaluated operand of sizeof / typeof / alignof. This is only
used for functions declared static but not defined, though outside
sizeof and typeof it is set for other function decls as well. */
#define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a variable has been declared threadprivate by
#pragma omp threadprivate. */
#define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL))
/* Nonzero for a decl which either doesn't exist or isn't a prototype.
N.B. Could be simplified if all built-in decls had complete prototypes
(but this is presently difficult because some of them need FILE*). */
#define C_DECL_ISNT_PROTOTYPE(EXP) \
(EXP == 0 \
|| (!prototype_p (TREE_TYPE (EXP)) \
&& !DECL_BUILT_IN (EXP)))
/* For FUNCTION_TYPE, a hidden list of types of arguments. The same as
TYPE_ARG_TYPES for functions with prototypes, but created for functions
without prototypes. */
#define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_LANG_SLOT_1 (NODE)
/* For a CONSTRUCTOR, whether some initializer contains a
subexpression meaning it is not a constant expression. */
#define CONSTRUCTOR_NON_CONST(EXPR) TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (EXPR))
/* Record parser information about an expression that is irrelevant
for code generation alongside a tree representing its value. */
struct c_expr
{
/* The value of the expression. */
tree value;
/* Record the original unary/binary operator of an expression, which may
have been changed by fold, STRING_CST for unparenthesized string
constants, C_MAYBE_CONST_EXPR for __builtin_constant_p calls
(even if parenthesized), for subexpressions, and for non-constant
initializers, or ERROR_MARK for other expressions (including
parenthesized expressions). */
enum tree_code original_code;
/* If not NULL, the original type of an expression. This will
differ from the type of the value field for an enum constant.
The type of an enum constant is a plain integer type, but this
field will be the enum type. */
tree original_type;
};
/* Type alias for struct c_expr. This allows to use the structure
inside the VEC types. */
typedef struct c_expr c_expr_t;
/* A kind of type specifier. Note that this information is currently
only used to distinguish tag definitions, tag references and typeof
uses. */
enum c_typespec_kind {
/* No typespec. This appears only in struct c_declspec. */
ctsk_none,
/* A reserved keyword type specifier. */
ctsk_resword,
/* A reference to a tag, previously declared, such as "struct foo".
This includes where the previous declaration was as a different
kind of tag, in which case this is only valid if shadowing that
tag in an inner scope. */
ctsk_tagref,
/* A reference to a tag, not previously declared in a visible
scope. */
ctsk_tagfirstref,
/* A definition of a tag such as "struct foo { int a; }". */
ctsk_tagdef,
/* A typedef name. */
ctsk_typedef,
/* An ObjC-specific kind of type specifier. */
ctsk_objc,
/* A typeof specifier, or _Atomic ( type-name ). */
ctsk_typeof
};
/* A type specifier: this structure is created in the parser and
passed to declspecs_add_type only. */
struct c_typespec {
/* What kind of type specifier this is. */
enum c_typespec_kind kind;
/* Whether the expression has operands suitable for use in constant
expressions. */
bool expr_const_operands;
/* The specifier itself. */
tree spec;
/* An expression to be evaluated before the type specifier, in the
case of typeof specifiers, or NULL otherwise or if no such
expression is required for a particular typeof specifier. In
particular, when typeof is applied to an expression of variably
modified type, that expression must be evaluated in order to
determine array sizes that form part of the type, but the
expression itself (as opposed to the array sizes) forms no part
of the type and so needs to be recorded separately. */
tree expr;
};
/* A storage class specifier. */
enum c_storage_class {
csc_none,
csc_auto,
csc_extern,
csc_register,
csc_static,
csc_typedef
};
/* A type specifier keyword "void", "_Bool", "char", "int", "float",
"double", "_Decimal32", "_Decimal64", "_Decimal128", "_Fract", "_Accum",
or none of these. */
enum c_typespec_keyword {
cts_none,
cts_void,
cts_bool,
cts_char,
cts_int,
cts_float,
cts_int128,
cts_double,
cts_dfloat32,
cts_dfloat64,
cts_dfloat128,
cts_fract,
cts_accum,
cts_auto_type
};
/* This enum lists all the possible declarator specifiers, storage
class or attribute that a user can write. There is at least one
enumerator per possible declarator specifier in the struct
c_declspecs below.
It is used to index the array of declspec locations in struct
c_declspecs. */
enum c_declspec_word {
cdw_typespec /* A catch-all for a typespec. */,
cdw_storage_class /* A catch-all for a storage class */,
cdw_attributes,
cdw_typedef,
cdw_explicit_signed,
cdw_deprecated,
cdw_default_int,
cdw_long,
cdw_long_long,
cdw_short,
cdw_signed,
cdw_unsigned,
cdw_complex,
cdw_inline,
cdw_noreturn,
cdw_thread,
cdw_const,
cdw_volatile,
cdw_restrict,
cdw_saturating,
cdw_alignas,
cdw_address_space,
cdw_number_of_elements /* This one must always be the last
enumerator. */
};
/* A sequence of declaration specifiers in C. When a new declaration
specifier is added, please update the enum c_declspec_word above
accordingly. */
struct c_declspecs {
source_location locations[cdw_number_of_elements];
/* The type specified, if a single type specifier such as a struct,
union or enum specifier, typedef name or typeof specifies the
whole type, or NULL_TREE if none or a keyword such as "void" or
"char" is used. Does not include qualifiers. */
tree type;
/* Any expression to be evaluated before the type, from a typeof
specifier. */
tree expr;
/* The attributes from a typedef decl. */
tree decl_attr;
/* When parsing, the attributes. Outside the parser, this will be
NULL; attributes (possibly from multiple lists) will be passed
separately. */
tree attrs;
/* The base-2 log of the greatest alignment required by an _Alignas
specifier, in bytes, or -1 if no such specifiers with nonzero
alignment. */
int align_log;
/* The storage class specifier, or csc_none if none. */
enum c_storage_class storage_class;
/* Any type specifier keyword used such as "int", not reflecting
modifiers such as "short", or cts_none if none. */
ENUM_BITFIELD (c_typespec_keyword) typespec_word : 8;
/* The kind of type specifier if one has been seen, ctsk_none
otherwise. */
ENUM_BITFIELD (c_typespec_kind) typespec_kind : 3;
/* Whether any expressions in typeof specifiers may appear in
constant expressions. */
BOOL_BITFIELD expr_const_operands : 1;
/* Whether any declaration specifiers have been seen at all. */
BOOL_BITFIELD declspecs_seen_p : 1;
/* Whether something other than a storage class specifier or
attribute has been seen. This is used to warn for the
obsolescent usage of storage class specifiers other than at the
start of the list. (Doing this properly would require function
specifiers to be handled separately from storage class
specifiers.) */
BOOL_BITFIELD non_sc_seen_p : 1;
/* Whether the type is specified by a typedef or typeof name. */
BOOL_BITFIELD typedef_p : 1;
/* Whether the type is explicitly "signed" or specified by a typedef
whose type is explicitly "signed". */
BOOL_BITFIELD explicit_signed_p : 1;
/* Whether the specifiers include a deprecated typedef. */
BOOL_BITFIELD deprecated_p : 1;
/* Whether the type defaulted to "int" because there were no type
specifiers. */
BOOL_BITFIELD default_int_p : 1;
/* Whether "long" was specified. */
BOOL_BITFIELD long_p : 1;
/* Whether "long" was specified more than once. */
BOOL_BITFIELD long_long_p : 1;
/* Whether "short" was specified. */
BOOL_BITFIELD short_p : 1;
/* Whether "signed" was specified. */
BOOL_BITFIELD signed_p : 1;
/* Whether "unsigned" was specified. */
BOOL_BITFIELD unsigned_p : 1;
/* Whether "complex" was specified. */
BOOL_BITFIELD complex_p : 1;
/* Whether "inline" was specified. */
BOOL_BITFIELD inline_p : 1;
/* Whether "_Noreturn" was speciied. */
BOOL_BITFIELD noreturn_p : 1;
/* Whether "__thread" or "_Thread_local" was specified. */
BOOL_BITFIELD thread_p : 1;
/* Whether "__thread" rather than "_Thread_local" was specified. */
BOOL_BITFIELD thread_gnu_p : 1;
/* Whether "const" was specified. */
BOOL_BITFIELD const_p : 1;
/* Whether "volatile" was specified. */
BOOL_BITFIELD volatile_p : 1;
/* Whether "restrict" was specified. */
BOOL_BITFIELD restrict_p : 1;
/* Whether "_Atomic" was specified. */
BOOL_BITFIELD atomic_p : 1;
/* Whether "_Sat" was specified. */
BOOL_BITFIELD saturating_p : 1;
/* Whether any alignment specifier (even with zero alignment) was
specified. */
BOOL_BITFIELD alignas_p : 1;
/* The address space that the declaration belongs to. */
addr_space_t address_space;
};
/* The various kinds of declarators in C. */
enum c_declarator_kind {
/* An identifier. */
cdk_id,
/* A function. */
cdk_function,
/* An array. */
cdk_array,
/* A pointer. */
cdk_pointer,
/* Parenthesized declarator with nested attributes. */
cdk_attrs
};
typedef struct c_arg_tag_d {
/* The argument name. */
tree id;
/* The type of the argument. */
tree type;
} c_arg_tag;
/* Information about the parameters in a function declarator. */
struct c_arg_info {
/* A list of parameter decls. */
tree parms;
/* A list of structure, union and enum tags defined. */
vec<c_arg_tag, va_gc> *tags;
/* A list of argument types to go in the FUNCTION_TYPE. */
tree types;
/* A list of non-parameter decls (notably enumeration constants)
defined with the parameters. */
tree others;
/* A compound expression of VLA sizes from the parameters, or NULL.
In a function definition, these are used to ensure that
side-effects in sizes of arrays converted to pointers (such as a
parameter int i[n++]) take place; otherwise, they are
ignored. */
tree pending_sizes;
/* True when these arguments had [*]. */
BOOL_BITFIELD had_vla_unspec : 1;
};
/* A declarator. */
struct c_declarator {
/* The kind of declarator. */
enum c_declarator_kind kind;
location_t id_loc; /* Currently only set for cdk_id, cdk_array. */
/* Except for cdk_id, the contained declarator. For cdk_id, NULL. */
struct c_declarator *declarator;
union {
/* For identifiers, an IDENTIFIER_NODE or NULL_TREE if an abstract
declarator. */
tree id;
/* For functions. */
struct c_arg_info *arg_info;
/* For arrays. */
struct {
/* The array dimension, or NULL for [] and [*]. */
tree dimen;
/* The qualifiers inside []. */
int quals;
/* The attributes (currently ignored) inside []. */
tree attrs;
/* Whether [static] was used. */
BOOL_BITFIELD static_p : 1;
/* Whether [*] was used. */
BOOL_BITFIELD vla_unspec_p : 1;
} array;
/* For pointers, the qualifiers on the pointer type. */
int pointer_quals;
/* For attributes. */
tree attrs;
} u;
};
/* A type name. */
struct c_type_name {
/* The declaration specifiers. */
struct c_declspecs *specs;
/* The declarator. */
struct c_declarator *declarator;
};
/* A parameter. */
struct c_parm {
/* The declaration specifiers, minus any prefix attributes. */
struct c_declspecs *specs;
/* The attributes. */
tree attrs;
/* The declarator. */
struct c_declarator *declarator;
};
/* Used when parsing an enum. Initialized by start_enum. */
struct c_enum_contents
{
/* While defining an enum type, this is 1 plus the last enumerator
constant value. */
tree enum_next_value;
/* Nonzero means that there was overflow computing enum_next_value. */
int enum_overflow;
};
/* A type of reference to a static identifier in an inline
function. */
enum c_inline_static_type {
/* Identifier with internal linkage used in function that may be an
inline definition (i.e., file-scope static). */
csi_internal,
/* Modifiable object with static storage duration defined in
function that may be an inline definition (i.e., local
static). */
csi_modifiable
};
/* in c-parser.c */
extern void c_parse_init (void);
/* in c-aux-info.c */
extern void gen_aux_info_record (tree, int, int, int);
/* in c-decl.c */
struct c_spot_bindings;
struct c_struct_parse_info;
extern struct obstack parser_obstack;
extern tree c_break_label;
extern tree c_cont_label;
extern bool global_bindings_p (void);
extern void push_scope (void);
extern tree pop_scope (void);
extern void c_bindings_start_stmt_expr (struct c_spot_bindings *);
extern void c_bindings_end_stmt_expr (struct c_spot_bindings *);
extern void record_inline_static (location_t, tree, tree,
enum c_inline_static_type);
extern void c_init_decl_processing (void);
extern void c_print_identifier (FILE *, tree, int);
extern int quals_from_declspecs (const struct c_declspecs *);
extern struct c_declarator *build_array_declarator (location_t, tree,
struct c_declspecs *,
bool, bool);
extern tree build_enumerator (location_t, location_t, struct c_enum_contents *,
tree, tree);
extern tree check_for_loop_decls (location_t, bool);
extern void mark_forward_parm_decls (void);
extern void declare_parm_level (void);
extern void undeclared_variable (location_t, tree);
extern tree lookup_label_for_goto (location_t, tree);
extern tree declare_label (tree);
extern tree define_label (location_t, tree);
extern struct c_spot_bindings *c_get_switch_bindings (void);
extern void c_release_switch_bindings (struct c_spot_bindings *);
extern bool c_check_switch_jump_warnings (struct c_spot_bindings *,
location_t, location_t);
extern void finish_decl (tree, location_t, tree, tree, tree);
extern tree finish_enum (tree, tree, tree);
extern void finish_function (void);
extern tree finish_struct (location_t, tree, tree, tree,
struct c_struct_parse_info *);
extern struct c_arg_info *build_arg_info (void);
extern struct c_arg_info *get_parm_info (bool, tree);
extern tree grokfield (location_t, struct c_declarator *,
struct c_declspecs *, tree, tree *);
extern tree groktypename (struct c_type_name *, tree *, bool *);
extern tree grokparm (const struct c_parm *, tree *);
extern tree implicitly_declare (location_t, tree);
extern void keep_next_level (void);
extern void pending_xref_error (void);
extern void c_push_function_context (void);
extern void c_pop_function_context (void);
extern void push_parm_decl (const struct c_parm *, tree *);
extern struct c_declarator *set_array_declarator_inner (struct c_declarator *,
struct c_declarator *);
extern tree c_builtin_function (tree);
extern tree c_builtin_function_ext_scope (tree);
extern void shadow_tag (const struct c_declspecs *);
extern void shadow_tag_warned (const struct c_declspecs *, int);
extern tree start_enum (location_t, struct c_enum_contents *, tree);
extern int start_function (struct c_declspecs *, struct c_declarator *, tree);
extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool,
tree);
extern tree start_struct (location_t, enum tree_code, tree,
struct c_struct_parse_info **);
extern void store_parm_decls (void);
extern void store_parm_decls_from (struct c_arg_info *);
extern void temp_store_parm_decls (tree, tree);
extern void temp_pop_parm_decls (void);
extern tree xref_tag (enum tree_code, tree);
extern struct c_typespec parser_xref_tag (location_t, enum tree_code, tree);
extern struct c_parm *build_c_parm (struct c_declspecs *, tree,
struct c_declarator *);
extern struct c_declarator *build_attrs_declarator (tree,
struct c_declarator *);
extern struct c_declarator *build_function_declarator (struct c_arg_info *,
struct c_declarator *);
extern struct c_declarator *build_id_declarator (tree);
extern struct c_declarator *make_pointer_declarator (struct c_declspecs *,
struct c_declarator *);
extern struct c_declspecs *build_null_declspecs (void);
extern struct c_declspecs *declspecs_add_qual (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_type (location_t,
struct c_declspecs *,
struct c_typespec);
extern struct c_declspecs *declspecs_add_scspec (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_attrs (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_addrspace (source_location,
struct c_declspecs *,
addr_space_t);
extern struct c_declspecs *declspecs_add_alignas (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *finish_declspecs (struct c_declspecs *);
/* in c-objc-common.c */
extern bool c_objc_common_init (void);
extern bool c_missing_noreturn_ok_p (tree);
extern bool c_warn_unused_global_decl (const_tree);
extern void c_initialize_diagnostics (diagnostic_context *);
extern bool c_vla_unspec_p (tree x, tree fn);
/* in c-typeck.c */
extern int in_alignof;
extern int in_sizeof;
extern int in_typeof;
extern tree c_last_sizeof_arg;
extern struct c_switch *c_switch_stack;
extern tree c_objc_common_truthvalue_conversion (location_t, tree);
extern tree require_complete_type (tree);
extern int same_translation_unit_p (const_tree, const_tree);
extern int comptypes (tree, tree);
extern int comptypes_check_different_types (tree, tree, bool *);
extern bool c_vla_type_p (const_tree);
extern bool c_mark_addressable (tree);
extern void c_incomplete_type_error (const_tree, const_tree);
extern tree c_type_promotes_to (tree);
extern struct c_expr default_function_array_conversion (location_t,
struct c_expr);
extern struct c_expr default_function_array_read_conversion (location_t,
struct c_expr);
extern struct c_expr convert_lvalue_to_rvalue (location_t, struct c_expr,
bool, bool);
extern void mark_exp_read (tree);
extern tree composite_type (tree, tree);
extern tree build_component_ref (location_t, tree, tree);
extern tree build_array_ref (location_t, tree, tree);
extern tree build_external_ref (location_t, tree, int, tree *);
extern void pop_maybe_used (bool);
extern struct c_expr c_expr_sizeof_expr (location_t, struct c_expr);
extern struct c_expr c_expr_sizeof_type (location_t, struct c_type_name *);
extern struct c_expr parser_build_unary_op (location_t, enum tree_code,
struct c_expr);
extern struct c_expr parser_build_binary_op (location_t,
enum tree_code, struct c_expr,
struct c_expr);
extern tree build_conditional_expr (location_t, tree, bool, tree, tree,
tree, tree);
extern tree build_compound_expr (location_t, tree, tree);
extern tree c_cast_expr (location_t, struct c_type_name *, tree);
extern tree build_c_cast (location_t, tree, tree);
extern void store_init_value (location_t, tree, tree, tree);
extern void error_init (const char *);
extern void pedwarn_init (location_t, int opt, const char *);
extern void maybe_warn_string_init (tree, struct c_expr);
extern void start_init (tree, tree, int);
extern void finish_init (void);
extern void really_start_incremental_init (tree);
extern void finish_implicit_inits (struct obstack *);
extern void push_init_level (int, struct obstack *);
extern struct c_expr pop_init_level (int, struct obstack *);
extern void set_init_index (tree, tree, struct obstack *);
extern void set_init_label (tree, struct obstack *);
extern void process_init_element (struct c_expr, bool, struct obstack *);
extern tree build_compound_literal (location_t, tree, tree, bool);
extern void check_compound_literal_type (location_t, struct c_type_name *);
extern tree c_start_case (location_t, location_t, tree);
extern void c_finish_case (tree);
extern tree build_asm_expr (location_t, tree, tree, tree, tree, tree, bool);
extern tree build_asm_stmt (tree, tree);
extern int c_types_compatible_p (tree, tree);
extern tree c_begin_compound_stmt (bool);
extern tree c_end_compound_stmt (location_t, tree, bool);
extern void c_finish_if_stmt (location_t, tree, tree, tree, bool);
extern void c_finish_loop (location_t, tree, tree, tree, tree, tree, bool);
extern tree c_begin_stmt_expr (void);
extern tree c_finish_stmt_expr (location_t, tree);
extern tree c_process_expr_stmt (location_t, tree);
extern tree c_finish_expr_stmt (location_t, tree);
extern tree c_finish_return (location_t, tree, tree);
extern tree c_finish_bc_stmt (location_t, tree *, bool);
extern tree c_finish_goto_label (location_t, tree);
extern tree c_finish_goto_ptr (location_t, tree);
extern tree c_expr_to_decl (tree, bool *, bool *);
extern tree c_begin_omp_parallel (void);
extern tree c_finish_omp_parallel (location_t, tree, tree);
extern tree c_begin_omp_task (void);
extern tree c_finish_omp_task (location_t, tree, tree);
extern void c_finish_omp_cancel (location_t, tree);
extern void c_finish_omp_cancellation_point (location_t, tree);
extern tree c_finish_omp_clauses (tree);
extern tree c_build_va_arg (location_t, tree, tree);
extern tree c_finish_transaction (location_t, tree, int);
extern bool c_tree_equal (tree, tree);
extern tree c_build_function_call_vec (location_t, vec<location_t>, tree,
vec<tree, va_gc> *, vec<tree, va_gc> *);
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
extern int current_function_returns_value;
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
extern int current_function_returns_null;
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
extern int current_function_returns_abnormally;
/* Mode used to build pointers (VOIDmode means ptr_mode). */
extern enum machine_mode c_default_pointer_mode;
/* In c-decl.c */
extern void c_finish_incomplete_decl (tree);
extern void c_write_global_declarations (void);
extern tree c_omp_reduction_id (enum tree_code, tree);
extern tree c_omp_reduction_decl (tree);
extern tree c_omp_reduction_lookup (tree, tree);
extern tree c_check_omp_declare_reduction_r (tree *, int *, void *);
/* In c-errors.c */
extern void pedwarn_c90 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4);
extern void pedwarn_c99 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4);
#endif /* ! GCC_C_TREE_H */
|
GB_unop__trunc_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__trunc_fc32_fc32
// op(A') function: GB_unop_tran__trunc_fc32_fc32
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = GB_ctruncf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_ctruncf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = GB_ctruncf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TRUNC || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__trunc_fc32_fc32
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_ctruncf (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__trunc_fc32_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
morphology.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y %
% MM MM O O R R P P H H O O L O O G Y Y %
% M M M O O RRRR PPPP HHHHH O O L O O G GGG Y %
% M M O O R R P H H O O L O O G G Y %
% M M OOO R R P H H OOO LLLLL OOO GGG Y %
% %
% %
% MagickCore Morphology Methods %
% %
% Software Design %
% Anthony Thyssen %
% January 2010 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Morphology is the application of various kernels, of any size or shape, to an
% image in various ways (typically binary, but not always).
%
% Convolution (weighted sum or average) is just one specific type of
% morphology. Just one that is very common for image bluring and sharpening
% effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring.
%
% This module provides not only a general morphology function, and the ability
% to apply more advanced or iterative morphologies, but also functions for the
% generation of many different types of kernel arrays from user supplied
% arguments. Prehaps even the generation of a kernel from a small image.
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/morphology.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/prepress.h"
#include "MagickCore/quantize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/registry.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
/*
Other global definitions used by module.
*/
#define Minimize(assign,value) assign=MagickMin(assign,value)
#define Maximize(assign,value) assign=MagickMax(assign,value)
/* Integer Factorial Function - for a Binomial kernel */
#if 1
static inline size_t fact(size_t n)
{
size_t f,l;
for(f=1, l=2; l <= n; f=f*l, l++);
return(f);
}
#elif 1 /* glibc floating point alternatives */
#define fact(n) ((size_t)tgamma((double)n+1))
#else
#define fact(n) ((size_t)lgamma((double)n+1))
#endif
/* Currently these are only internal to this module */
static void
CalcKernelMetaData(KernelInfo *),
ExpandMirrorKernelInfo(KernelInfo *),
ExpandRotateKernelInfo(KernelInfo *, const double),
RotateKernelInfo(KernelInfo *, double);
/* Quick function to find last kernel in a kernel list */
static inline KernelInfo *LastKernelInfo(KernelInfo *kernel)
{
while (kernel->next != (KernelInfo *) NULL)
kernel=kernel->next;
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelInfo() takes the given string (generally supplied by the
% user) and converts it into a Morphology/Convolution Kernel. This allows
% users to specify a kernel from a number of pre-defined kernels, or to fully
% specify their own kernel for a specific Convolution or Morphology
% Operation.
%
% The kernel so generated can be any rectangular array of floating point
% values (doubles) with the 'control point' or 'pixel being affected'
% anywhere within that array of values.
%
% Previously IM was restricted to a square of odd size using the exact
% center as origin, this is no longer the case, and any rectangular kernel
% with any value being declared the origin. This in turn allows the use of
% highly asymmetrical kernels.
%
% The floating point values in the kernel can also include a special value
% known as 'nan' or 'not a number' to indicate that this value is not part
% of the kernel array. This allows you to shaped the kernel within its
% rectangular area. That is 'nan' values provide a 'mask' for the kernel
% shape. However at least one non-nan value must be provided for correct
% working of a kernel.
%
% The returned kernel should be freed using the DestroyKernelInfo() when you
% are finished with it. Do not free this memory yourself.
%
% Input kernel defintion strings can consist of any of three types.
%
% "name:args[[@><]"
% Select from one of the built in kernels, using the name and
% geometry arguments supplied. See AcquireKernelBuiltIn()
%
% "WxH[+X+Y][@><]:num, num, num ..."
% a kernel of size W by H, with W*H floating point numbers following.
% the 'center' can be optionally be defined at +X+Y (such that +0+0
% is top left corner). If not defined the pixel in the center, for
% odd sizes, or to the immediate top or left of center for even sizes
% is automatically selected.
%
% "num, num, num, num, ..."
% list of floating point numbers defining an 'old style' odd sized
% square kernel. At least 9 values should be provided for a 3x3
% square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc.
% Values can be space or comma separated. This is not recommended.
%
% You can define a 'list of kernels' which can be used by some morphology
% operators A list is defined as a semi-colon separated list kernels.
%
% " kernel ; kernel ; kernel ; "
%
% Any extra ';' characters, at start, end or between kernel defintions are
% simply ignored.
%
% The special flags will expand a single kernel, into a list of rotated
% kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree
% cyclic rotations, while a '>' will generate a list of 90-degree rotations.
% The '<' also exands using 90-degree rotates, but giving a 180-degree
% reflected kernel before the +/- 90-degree rotations, which can be important
% for Thinning operations.
%
% Note that 'name' kernels will start with an alphabetic character while the
% new kernel specification has a ':' character in its specification string.
% If neither is the case, it is assumed an old style of a simple list of
% numbers generating a odd-sized square kernel has been given.
%
% The format of the AcquireKernal method is:
%
% KernelInfo *AcquireKernelInfo(const char *kernel_string)
%
% A description of each parameter follows:
%
% o kernel_string: the Morphology/Convolution kernel wanted.
%
*/
/* This was separated so that it could be used as a separate
** array input handling function, such as for -color-matrix
*/
static KernelInfo *ParseKernelArray(const char *kernel_string)
{
KernelInfo
*kernel;
char
token[MagickPathExtent];
const char
*p,
*end;
register ssize_t
i;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
MagickStatusType
flags;
GeometryInfo
args;
kernel=(KernelInfo *) AcquireQuantumMemory(1,sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) ResetMagickMemory(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = UserDefinedKernel;
kernel->next = (KernelInfo *) NULL;
kernel->signature=MagickCoreSignature;
if (kernel_string == (const char *) NULL)
return(kernel);
/* find end of this specific kernel definition string */
end = strchr(kernel_string, ';');
if ( end == (char *) NULL )
end = strchr(kernel_string, '\0');
/* clear flags - for Expanding kernel lists thorugh rotations */
flags = NoValue;
/* Has a ':' in argument - New user kernel specification
FUTURE: this split on ':' could be done by StringToken()
*/
p = strchr(kernel_string, ':');
if ( p != (char *) NULL && p < end)
{
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, kernel_string, (size_t) (p-kernel_string));
token[p-kernel_string] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
/* Size handling and checks of geometry settings */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 1.0; /* then width = 1 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
kernel->width = (size_t)args.rho;
kernel->height = (size_t)args.sigma;
/* Offset Handling and Checks */
if ( args.xi < 0.0 || args.psi < 0.0 )
return(DestroyKernelInfo(kernel));
kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi
: (ssize_t) (kernel->width-1)/2;
kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi
: (ssize_t) (kernel->height-1)/2;
if ( kernel->x >= (ssize_t) kernel->width ||
kernel->y >= (ssize_t) kernel->height )
return(DestroyKernelInfo(kernel));
p++; /* advance beyond the ':' */
}
else
{ /* ELSE - Old old specification, forming odd-square kernel */
/* count up number of values given */
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
for (i=0; p < end; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
}
/* set the size of the kernel - old sized square */
kernel->width = kernel->height= (size_t) sqrt((double) i+1.0);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
}
/* Read in the kernel values from rest of input string argument */
kernel->values=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->minimum=MagickMaximumValue;
kernel->maximum=(-MagickMaximumValue);
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
if ( LocaleCompare("nan",token) == 0
|| LocaleCompare("-",token) == 0 ) {
kernel->values[i] = nan; /* this value is not part of neighbourhood */
}
else {
kernel->values[i] = StringToDouble(token,(char **) NULL);
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
}
/* sanity check -- no more values in kernel definition */
GetNextToken(p,&p,MagickPathExtent,token);
if ( *token != '\0' && *token != ';' && *token != '\'' )
return(DestroyKernelInfo(kernel));
#if 0
/* this was the old method of handling a incomplete kernel */
if ( i < (ssize_t) (kernel->width*kernel->height) ) {
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
for ( ; i < (ssize_t) (kernel->width*kernel->height); i++)
kernel->values[i]=0.0;
}
#else
/* Number of values for kernel was not enough - Report Error */
if ( i < (ssize_t) (kernel->width*kernel->height) )
return(DestroyKernelInfo(kernel));
#endif
/* check that we recieved at least one real (non-nan) value! */
if (kernel->minimum == MagickMaximumValue)
return(DestroyKernelInfo(kernel));
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */
ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */
return(kernel);
}
static KernelInfo *ParseKernelName(const char *kernel_string,
ExceptionInfo *exception)
{
char
token[MagickPathExtent];
const char
*p,
*end;
GeometryInfo
args;
KernelInfo
*kernel;
MagickStatusType
flags;
ssize_t
type;
/* Parse special 'named' kernel */
GetNextToken(kernel_string,&p,MagickPathExtent,token);
type=ParseCommandOption(MagickKernelOptions,MagickFalse,token);
if ( type < 0 || type == UserDefinedKernel )
return((KernelInfo *) NULL); /* not a valid named kernel */
while (((isspace((int) ((unsigned char) *p)) != 0) ||
(*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';'))
p++;
end = strchr(p, ';'); /* end of this kernel defintion */
if ( end == (char *) NULL )
end = strchr(p, '\0');
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, p, (size_t) (end-p));
token[end-p] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
/* special handling of missing values in input string */
switch( type ) {
/* Shape Kernel Defaults */
case UnityKernel:
if ( (flags & WidthValue) == 0 )
args.rho = 1.0; /* Default scale = 1.0, zero is valid */
break;
case SquareKernel:
case DiamondKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
if ( (flags & HeightValue) == 0 )
args.sigma = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RingKernel:
if ( (flags & XValue) == 0 )
args.xi = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RectangleKernel: /* Rectangle - set size defaults */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 3; /* then width = 3 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
if ( (flags & XValue) == 0 ) /* center offset if not defined */
args.xi = (double)(((ssize_t)args.rho-1)/2);
if ( (flags & YValue) == 0 )
args.psi = (double)(((ssize_t)args.sigma-1)/2);
break;
/* Distance Kernel Defaults */
case ChebyshevKernel:
case ManhattanKernel:
case OctagonalKernel:
case EuclideanKernel:
if ( (flags & HeightValue) == 0 ) /* no distance scale */
args.sigma = 100.0; /* default distance scaling */
else if ( (flags & AspectValue ) != 0 ) /* '!' flag */
args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */
else if ( (flags & PercentValue ) != 0 ) /* '%' flag */
args.sigma *= QuantumRange/100.0; /* percentage of color range */
break;
default:
break;
}
kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args, exception);
if ( kernel == (KernelInfo *) NULL )
return(kernel);
/* global expand to rotated kernel list - only for single kernels */
if ( kernel->next == (KernelInfo *) NULL ) {
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 45.0);
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0);
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel);
}
return(kernel);
}
MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string,
ExceptionInfo *exception)
{
KernelInfo
*kernel,
*new_kernel;
char
*kernel_cache,
token[MagickPathExtent];
const char
*p;
if (kernel_string == (const char *) NULL)
return(ParseKernelArray(kernel_string));
p=kernel_string;
kernel_cache=(char *) NULL;
if (*kernel_string == '@')
{
kernel_cache=FileToString(kernel_string+1,~0UL,exception);
if (kernel_cache == (char *) NULL)
return((KernelInfo *) NULL);
p=(const char *) kernel_cache;
}
kernel=NULL;
while (GetNextToken(p,(const char **) NULL,MagickPathExtent,token), *token != '\0')
{
/* ignore extra or multiple ';' kernel separators */
if (*token != ';')
{
/* tokens starting with alpha is a Named kernel */
if (isalpha((int) ((unsigned char) *token)) != 0)
new_kernel=ParseKernelName(p,exception);
else /* otherwise a user defined kernel array */
new_kernel=ParseKernelArray(p);
/* Error handling -- this is not proper error handling! */
if (new_kernel == (KernelInfo *) NULL)
{
if (kernel != (KernelInfo *) NULL)
kernel=DestroyKernelInfo(kernel);
return((KernelInfo *) NULL);
}
/* initialise or append the kernel list */
if (kernel == (KernelInfo *) NULL)
kernel=new_kernel;
else
LastKernelInfo(kernel)->next=new_kernel;
}
/* look for the next kernel in list */
p=strchr(p,';');
if (p == (char *) NULL)
break;
p++;
}
if (kernel_cache != (char *) NULL)
kernel_cache=DestroyString(kernel_cache);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l B u i l t I n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelBuiltIn() returned one of the 'named' built-in types of
% kernels used for special purposes such as gaussian blurring, skeleton
% pruning, and edge distance determination.
%
% They take a KernelType, and a set of geometry style arguments, which were
% typically decoded from a user supplied string, or from a more complex
% Morphology Method that was requested.
%
% The format of the AcquireKernalBuiltIn method is:
%
% KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
% const GeometryInfo args)
%
% A description of each parameter follows:
%
% o type: the pre-defined type of kernel wanted
%
% o args: arguments defining or modifying the kernel
%
% Convolution Kernels
%
% Unity
% The a No-Op or Scaling single element kernel.
%
% Gaussian:{radius},{sigma}
% Generate a two-dimensional gaussian kernel, as used by -gaussian.
% The sigma for the curve is required. The resulting kernel is
% normalized,
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% NOTE: that the 'radius' is optional, but if provided can limit (clip)
% the final size of the resulting kernel to a square 2*radius+1 in size.
% The radius should be at least 2 times that of the sigma value, or
% sever clipping and aliasing may result. If not given or set to 0 the
% radius will be determined so as to produce the best minimal error
% result, which is usally much larger than is normally needed.
%
% LoG:{radius},{sigma}
% "Laplacian of a Gaussian" or "Mexician Hat" Kernel.
% The supposed ideal edge detection, zero-summing kernel.
%
% An alturnative to this kernel is to use a "DoG" with a sigma ratio of
% approx 1.6 (according to wikipedia).
%
% DoG:{radius},{sigma1},{sigma2}
% "Difference of Gaussians" Kernel.
% As "Gaussian" but with a gaussian produced by 'sigma2' subtracted
% from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1.
% The result is a zero-summing kernel.
%
% Blur:{radius},{sigma}[,{angle}]
% Generates a 1 dimensional or linear gaussian blur, at the angle given
% (current restricted to orthogonal angles). If a 'radius' is given the
% kernel is clipped to a width of 2*radius+1. Kernel can be rotated
% by a 90 degree angle.
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% Note that two convolutions with two "Blur" kernels perpendicular to
% each other, is equivalent to a far larger "Gaussian" kernel with the
% same sigma value, However it is much faster to apply. This is how the
% "-blur" operator actually works.
%
% Comet:{width},{sigma},{angle}
% Blur in one direction only, much like how a bright object leaves
% a comet like trail. The Kernel is actually half a gaussian curve,
% Adding two such blurs in opposite directions produces a Blur Kernel.
% Angle can be rotated in multiples of 90 degrees.
%
% Note that the first argument is the width of the kernel and not the
% radius of the kernel.
%
% Binomial:[{radius}]
% Generate a discrete kernel using a 2 dimentional Pascel's Triangle
% of values. Used for special forma of image filters.
%
% # Still to be implemented...
% #
% # Filter2D
% # Filter1D
% # Set kernel values using a resize filter, and given scale (sigma)
% # Cylindrical or Linear. Is this possible with an image?
% #
%
% Named Constant Convolution Kernels
%
% All these are unscaled, zero-summing kernels by default. As such for
% non-HDRI version of ImageMagick some form of normalization, user scaling,
% and biasing the results is recommended, to prevent the resulting image
% being 'clipped'.
%
% The 3x3 kernels (most of these) can be circularly rotated in multiples of
% 45 degrees to generate the 8 angled varients of each of the kernels.
%
% Laplacian:{type}
% Discrete Lapacian Kernels, (without normalization)
% Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood)
% Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood)
% Type 2 : 3x3 with center:4 edge:1 corner:-2
% Type 3 : 3x3 with center:4 edge:-2 corner:1
% Type 5 : 5x5 laplacian
% Type 7 : 7x7 laplacian
% Type 15 : 5x5 LoG (sigma approx 1.4)
% Type 19 : 9x9 LoG (sigma approx 1.4)
%
% Sobel:{angle}
% Sobel 'Edge' convolution kernel (3x3)
% | -1, 0, 1 |
% | -2, 0,-2 |
% | -1, 0, 1 |
%
% Roberts:{angle}
% Roberts convolution kernel (3x3)
% | 0, 0, 0 |
% | -1, 1, 0 |
% | 0, 0, 0 |
%
% Prewitt:{angle}
% Prewitt Edge convolution kernel (3x3)
% | -1, 0, 1 |
% | -1, 0, 1 |
% | -1, 0, 1 |
%
% Compass:{angle}
% Prewitt's "Compass" convolution kernel (3x3)
% | -1, 1, 1 |
% | -1,-2, 1 |
% | -1, 1, 1 |
%
% Kirsch:{angle}
% Kirsch's "Compass" convolution kernel (3x3)
% | -3,-3, 5 |
% | -3, 0, 5 |
% | -3,-3, 5 |
%
% FreiChen:{angle}
% Frei-Chen Edge Detector is based on a kernel that is similar to
% the Sobel Kernel, but is designed to be isotropic. That is it takes
% into account the distance of the diagonal in the kernel.
%
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) |
% | 1, 0, -1 |
%
% FreiChen:{type},{angle}
%
% Frei-Chen Pre-weighted kernels...
%
% Type 0: default un-nomalized version shown above.
%
% Type 1: Orthogonal Kernel (same as type 11 below)
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 2: Diagonal form of Kernel...
% | 1, sqrt(2), 0 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 0, -sqrt(2) -1 |
%
% However this kernel is als at the heart of the FreiChen Edge Detection
% Process which uses a set of 9 specially weighted kernel. These 9
% kernels not be normalized, but directly applied to the image. The
% results is then added together, to produce the intensity of an edge in
% a specific direction. The square root of the pixel value can then be
% taken as the cosine of the edge, and at least 2 such runs at 90 degrees
% from each other, both the direction and the strength of the edge can be
% determined.
%
% Type 10: All 9 of the following pre-weighted kernels...
%
% Type 11: | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 12: | 1, sqrt(2), 1 |
% | 0, 0, 0 | / 2*sqrt(2)
% | 1, sqrt(2), 1 |
%
% Type 13: | sqrt(2), -1, 0 |
% | -1, 0, 1 | / 2*sqrt(2)
% | 0, 1, -sqrt(2) |
%
% Type 14: | 0, 1, -sqrt(2) |
% | -1, 0, 1 | / 2*sqrt(2)
% | sqrt(2), -1, 0 |
%
% Type 15: | 0, -1, 0 |
% | 1, 0, 1 | / 2
% | 0, -1, 0 |
%
% Type 16: | 1, 0, -1 |
% | 0, 0, 0 | / 2
% | -1, 0, 1 |
%
% Type 17: | 1, -2, 1 |
% | -2, 4, -2 | / 6
% | -1, -2, 1 |
%
% Type 18: | -2, 1, -2 |
% | 1, 4, 1 | / 6
% | -2, 1, -2 |
%
% Type 19: | 1, 1, 1 |
% | 1, 1, 1 | / 3
% | 1, 1, 1 |
%
% The first 4 are for edge detection, the next 4 are for line detection
% and the last is to add a average component to the results.
%
% Using a special type of '-1' will return all 9 pre-weighted kernels
% as a multi-kernel list, so that you can use them directly (without
% normalization) with the special "-set option:morphology:compose Plus"
% setting to apply the full FreiChen Edge Detection Technique.
%
% If 'type' is large it will be taken to be an actual rotation angle for
% the default FreiChen (type 0) kernel. As such FreiChen:45 will look
% like a Sobel:45 but with 'sqrt(2)' instead of '2' values.
%
% WARNING: The above was layed out as per
% http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf
% But rotated 90 degrees so direction is from left rather than the top.
% I have yet to find any secondary confirmation of the above. The only
% other source found was actual source code at
% http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf
% Neigher paper defineds the kernels in a way that looks locical or
% correct when taken as a whole.
%
% Boolean Kernels
%
% Diamond:[{radius}[,{scale}]]
% Generate a diamond shaped kernel with given radius to the points.
% Kernel size will again be radius*2+1 square and defaults to radius 1,
% generating a 3x3 kernel that is slightly larger than a square.
%
% Square:[{radius}[,{scale}]]
% Generate a square shaped kernel of size radius*2+1, and defaulting
% to a 3x3 (radius 1).
%
% Octagon:[{radius}[,{scale}]]
% Generate octagonal shaped kernel of given radius and constant scale.
% Default radius is 3 producing a 7x7 kernel. A radius of 1 will result
% in "Diamond" kernel.
%
% Disk:[{radius}[,{scale}]]
% Generate a binary disk, thresholded at the radius given, the radius
% may be a float-point value. Final Kernel size is floor(radius)*2+1
% square. A radius of 5.3 is the default.
%
% NOTE: That a low radii Disk kernels produce the same results as
% many of the previously defined kernels, but differ greatly at larger
% radii. Here is a table of equivalences...
% "Disk:1" => "Diamond", "Octagon:1", or "Cross:1"
% "Disk:1.5" => "Square"
% "Disk:2" => "Diamond:2"
% "Disk:2.5" => "Octagon"
% "Disk:2.9" => "Square:2"
% "Disk:3.5" => "Octagon:3"
% "Disk:4.5" => "Octagon:4"
% "Disk:5.4" => "Octagon:5"
% "Disk:6.4" => "Octagon:6"
% All other Disk shapes are unique to this kernel, but because a "Disk"
% is more circular when using a larger radius, using a larger radius is
% preferred over iterating the morphological operation.
%
% Rectangle:{geometry}
% Simply generate a rectangle of 1's with the size given. You can also
% specify the location of the 'control point', otherwise the closest
% pixel to the center of the rectangle is selected.
%
% Properly centered and odd sized rectangles work the best.
%
% Symbol Dilation Kernels
%
% These kernel is not a good general morphological kernel, but is used
% more for highlighting and marking any single pixels in an image using,
% a "Dilate" method as appropriate.
%
% For the same reasons iterating these kernels does not produce the
% same result as using a larger radius for the symbol.
%
% Plus:[{radius}[,{scale}]]
% Cross:[{radius}[,{scale}]]
% Generate a kernel in the shape of a 'plus' or a 'cross' with
% a each arm the length of the given radius (default 2).
%
% NOTE: "plus:1" is equivalent to a "Diamond" kernel.
%
% Ring:{radius1},{radius2}[,{scale}]
% A ring of the values given that falls between the two radii.
% Defaults to a ring of approximataly 3 radius in a 7x7 kernel.
% This is the 'edge' pixels of the default "Disk" kernel,
% More specifically, "Ring" -> "Ring:2.5,3.5,1.0"
%
% Hit and Miss Kernels
%
% Peak:radius1,radius2
% Find any peak larger than the pixels the fall between the two radii.
% The default ring of pixels is as per "Ring".
% Edges
% Find flat orthogonal edges of a binary shape
% Corners
% Find 90 degree corners of a binary shape
% Diagonals:type
% A special kernel to thin the 'outside' of diagonals
% LineEnds:type
% Find end points of lines (for pruning a skeletion)
% Two types of lines ends (default to both) can be searched for
% Type 0: All line ends
% Type 1: single kernel for 4-conneected line ends
% Type 2: single kernel for simple line ends
% LineJunctions
% Find three line junctions (within a skeletion)
% Type 0: all line junctions
% Type 1: Y Junction kernel
% Type 2: Diagonal T Junction kernel
% Type 3: Orthogonal T Junction kernel
% Type 4: Diagonal X Junction kernel
% Type 5: Orthogonal + Junction kernel
% Ridges:type
% Find single pixel ridges or thin lines
% Type 1: Fine single pixel thick lines and ridges
% Type 2: Find two pixel thick lines and ridges
% ConvexHull
% Octagonal Thickening Kernel, to generate convex hulls of 45 degrees
% Skeleton:type
% Traditional skeleton generating kernels.
% Type 1: Tradional Skeleton kernel (4 connected skeleton)
% Type 2: HIPR2 Skeleton kernel (8 connected skeleton)
% Type 3: Thinning skeleton based on a ressearch paper by
% Dan S. Bloomberg (Default Type)
% ThinSE:type
% A huge variety of Thinning Kernels designed to preserve conectivity.
% many other kernel sets use these kernels as source definitions.
% Type numbers are 41-49, 81-89, 481, and 482 which are based on
% the super and sub notations used in the source research paper.
%
% Distance Measuring Kernels
%
% Different types of distance measuring methods, which are used with the
% a 'Distance' morphology method for generating a gradient based on
% distance from an edge of a binary shape, though there is a technique
% for handling a anti-aliased shape.
%
% See the 'Distance' Morphological Method, for information of how it is
% applied.
%
% Chebyshev:[{radius}][x{scale}[%!]]
% Chebyshev Distance (also known as Tchebychev or Chessboard distance)
% is a value of one to any neighbour, orthogonal or diagonal. One why
% of thinking of it is the number of squares a 'King' or 'Queen' in
% chess needs to traverse reach any other position on a chess board.
% It results in a 'square' like distance function, but one where
% diagonals are given a value that is closer than expected.
%
% Manhattan:[{radius}][x{scale}[%!]]
% Manhattan Distance (also known as Rectilinear, City Block, or the Taxi
% Cab distance metric), it is the distance needed when you can only
% travel in horizontal or vertical directions only. It is the
% distance a 'Rook' in chess would have to travel, and results in a
% diamond like distances, where diagonals are further than expected.
%
% Octagonal:[{radius}][x{scale}[%!]]
% An interleving of Manhatten and Chebyshev metrics producing an
% increasing octagonally shaped distance. Distances matches those of
% the "Octagon" shaped kernel of the same radius. The minimum radius
% and default is 2, producing a 5x5 kernel.
%
% Euclidean:[{radius}][x{scale}[%!]]
% Euclidean distance is the 'direct' or 'as the crow flys' distance.
% However by default the kernel size only has a radius of 1, which
% limits the distance to 'Knight' like moves, with only orthogonal and
% diagonal measurements being correct. As such for the default kernel
% you will get octagonal like distance function.
%
% However using a larger radius such as "Euclidean:4" you will get a
% much smoother distance gradient from the edge of the shape. Especially
% if the image is pre-processed to include any anti-aliasing pixels.
% Of course a larger kernel is slower to use, and not always needed.
%
% The first three Distance Measuring Kernels will only generate distances
% of exact multiples of {scale} in binary images. As such you can use a
% scale of 1 without loosing any information. However you also need some
% scaling when handling non-binary anti-aliased shapes.
%
% The "Euclidean" Distance Kernel however does generate a non-integer
% fractional results, and as such scaling is vital even for binary shapes.
%
*/
MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
const GeometryInfo *args,ExceptionInfo *exception)
{
KernelInfo
*kernel;
register ssize_t
i;
register ssize_t
u,
v;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
/* Generate a new empty kernel if needed */
kernel=(KernelInfo *) NULL;
switch(type) {
case UndefinedKernel: /* These should not call this function */
case UserDefinedKernel:
assert("Should not call this function" != (char *) NULL);
break;
case LaplacianKernel: /* Named Descrete Convolution Kernels */
case SobelKernel: /* these are defined using other kernels */
case RobertsKernel:
case PrewittKernel:
case CompassKernel:
case KirschKernel:
case FreiChenKernel:
case EdgesKernel: /* Hit and Miss kernels */
case CornersKernel:
case DiagonalsKernel:
case LineEndsKernel:
case LineJunctionsKernel:
case RidgesKernel:
case ConvexHullKernel:
case SkeletonKernel:
case ThinSEKernel:
break; /* A pre-generated kernel is not needed */
#if 0
/* set to 1 to do a compile-time check that we haven't missed anything */
case UnityKernel:
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case BlurKernel:
case CometKernel:
case BinomialKernel:
case DiamondKernel:
case SquareKernel:
case RectangleKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
case RingKernel:
case PeaksKernel:
case ChebyshevKernel:
case ManhattanKernel:
case OctangonalKernel:
case EuclideanKernel:
#else
default:
#endif
/* Generate the base Kernel Structure */
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) ResetMagickMemory(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = type;
kernel->next = (KernelInfo *) NULL;
kernel->signature=MagickCoreSignature;
break;
}
switch(type) {
/*
Convolution Kernels
*/
case UnityKernel:
{
kernel->height = kernel->width = (size_t) 1;
kernel->x = kernel->y = (ssize_t) 0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(1,sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->maximum = kernel->values[0] = args->rho;
break;
}
break;
case GaussianKernel:
case DoGKernel:
case LoGKernel:
{ double
sigma = fabs(args->sigma),
sigma2 = fabs(args->xi),
A, B, R;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else if ( (type != DoGKernel) || (sigma >= sigma2) )
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma);
else
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2);
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* WARNING: The following generates a 'sampled gaussian' kernel.
* What we really want is a 'discrete gaussian' kernel.
*
* How to do this is I don't know, but appears to be basied on the
* Error Function 'erf()' (intergral of a gaussian)
*/
if ( type == GaussianKernel || type == DoGKernel )
{ /* Calculate a Gaussian, OR positive half of a DoG */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
if ( type == DoGKernel )
{ /* Subtract a Negative Gaussian for "Difference of Gaussian" */
if ( sigma2 > MagickEpsilon )
{ sigma = sigma2; /* simplify loop expressions */
A = 1.0/(2.0*sigma*sigma);
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
}
if ( type == LoGKernel )
{ /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ R = ((double)(u*u+v*v))*A;
kernel->values[i] = (1-R)*exp(-R)*B;
}
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
/* Note the above kernels may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 2D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
break;
}
case BlurKernel:
{ double
sigma = fabs(args->sigma),
alpha, beta;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else
kernel->width = GetOptimalKernelWidth1D(args->rho,sigma);
kernel->height = 1;
kernel->x = (ssize_t) (kernel->width-1)/2;
kernel->y = 0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
#if 1
#define KernelRank 3
/* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix).
** It generates a gaussian 3 times the width, and compresses it into
** the expected range. This produces a closer normalization of the
** resulting kernel, especially for very low sigma values.
** As such while wierd it is prefered.
**
** I am told this method originally came from Photoshop.
**
** A properly normalized curve is generated (apart from edge clipping)
** even though we later normalize the result (for edge clipping)
** to allow the correct generation of a "Difference of Blurs".
*/
/* initialize */
v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */
(void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
/* Calculate a Positive 1D Gaussian */
if ( sigma > MagickEpsilon )
{ sigma *= KernelRank; /* simplify loop expressions */
alpha = 1.0/(2.0*sigma*sigma);
beta= (double) (1.0/(MagickSQ2PI*sigma ));
for ( u=-v; u <= v; u++) {
kernel->values[(u+v)/KernelRank] +=
exp(-((double)(u*u))*alpha)*beta;
}
}
else /* special case - generate a unity kernel */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
#else
/* Direct calculation without curve averaging
This is equivelent to a KernelRank of 1 */
/* Calculate a Positive Gaussian */
if ( sigma > MagickEpsilon )
{ alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
beta = 1.0/(MagickSQ2PI*sigma);
for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u))*alpha)*beta;
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
#endif
/* Note the above kernel may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, as a
** result of not generating a actual 'discrete' kernel, and thus
** producing a very bright 'impulse'.
**
** Becuase of these two factors Normalization is required!
*/
/* Normalize the 1D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
/* rotate the 1D kernel by given angle */
RotateKernelInfo(kernel, args->xi );
break;
}
case CometKernel:
{ double
sigma = fabs(args->sigma),
A;
if ( args->rho < 1.0 )
kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1;
else
kernel->width = (size_t)args->rho;
kernel->x = kernel->y = 0;
kernel->height = 1;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* A comet blur is half a 1D gaussian curve, so that the object is
** blurred in one direction only. This may not be quite the right
** curve to use so may change in the future. The function must be
** normalised after generation, which also resolves any clipping.
**
** As we are normalizing and not subtracting gaussians,
** there is no need for a divisor in the gaussian formula
**
** It is less comples
*/
if ( sigma > MagickEpsilon )
{
#if 1
#define KernelRank 3
v = (ssize_t) kernel->width*KernelRank; /* start/end points */
(void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*sizeof(*kernel->values));
sigma *= KernelRank; /* simplify the loop expression */
A = 1.0/(2.0*sigma*sigma);
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( u=0; u < v; u++) {
kernel->values[u/KernelRank] +=
exp(-((double)(u*u))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
}
for (i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range += kernel->values[i];
#else
A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range +=
kernel->values[i] = exp(-((double)(i*i))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
#endif
}
else /* special case - generate a unity kernel */
{ (void) ResetMagickMemory(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
}
kernel->minimum = 0.0;
kernel->maximum = kernel->values[0];
kernel->negative_range = 0.0;
ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */
RotateKernelInfo(kernel, args->xi); /* Rotate by angle */
break;
}
case BinomialKernel:
{
size_t
order_f;
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
order_f = fact(kernel->width-1);
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=0; v < (ssize_t)kernel->height; v++)
{ size_t
alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) );
for ( u=0; u < (ssize_t)kernel->width; u++, i++)
kernel->positive_range += kernel->values[i] = (double)
(alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) ));
}
kernel->minimum = 1.0;
kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width];
kernel->negative_range = 0.0;
break;
}
/*
Convolution Kernels - Well Known Named Constant Kernels
*/
case LaplacianKernel:
{ switch ( (int) args->rho ) {
case 0:
default: /* laplacian square filter -- default */
kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1");
break;
case 1: /* laplacian diamond filter */
kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0");
break;
case 2:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
break;
case 3:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1");
break;
case 5: /* a 5x5 laplacian */
kernel=ParseKernelArray(
"5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4");
break;
case 7: /* a 7x7 laplacian */
kernel=ParseKernelArray(
"7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" );
break;
case 15: /* a 5x5 LoG (sigma approx 1.4) */
kernel=ParseKernelArray(
"5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0");
break;
case 19: /* a 9x9 LoG (sigma approx 1.4) */
/* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */
kernel=ParseKernelArray(
"9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
break;
}
case SobelKernel:
{ /* Simple Sobel Kernel */
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case RobertsKernel:
{
kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case PrewittKernel:
{
kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case CompassKernel:
{
kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case KirschKernel:
{
kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case FreiChenKernel:
/* Direction is set to be left to right positive */
/* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */
/* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */
{ switch ( (int) args->rho ) {
default:
case 0:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +(MagickRealType) MagickSQ2;
kernel->values[5] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
break;
case 2:
kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = kernel->values[3]= +(MagickRealType) MagickSQ2;
kernel->values[5] = kernel->values[7]= -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 10:
{
kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
break;
}
case 1:
case 11:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +(MagickRealType) MagickSQ2;
kernel->values[5] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 12:
kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = +(MagickRealType) MagickSQ2;
kernel->values[7] = +(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 13:
kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[0] = +(MagickRealType) MagickSQ2;
kernel->values[8] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 14:
kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[2] = -(MagickRealType) MagickSQ2;
kernel->values[6] = +(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 15:
kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 16:
kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 17:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 18:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 19:
kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/3.0, NoValue);
break;
}
if ( fabs(args->sigma) >= MagickEpsilon )
/* Rotate by correctly supplied 'angle' */
RotateKernelInfo(kernel, args->sigma);
else if ( args->rho > 30.0 || args->rho < -30.0 )
/* Rotate by out of bounds 'type' */
RotateKernelInfo(kernel, args->rho);
break;
}
/*
Boolean or Shaped Kernels
*/
case DiamondKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case SquareKernel:
case RectangleKernel:
{ double
scale;
if ( type == SquareKernel )
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = (size_t) (2*args->rho+1);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
scale = args->sigma;
}
else {
/* NOTE: user defaults set in "AcquireKernelInfo()" */
if ( args->rho < 1.0 || args->sigma < 1.0 )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->width = (size_t)args->rho;
kernel->height = (size_t)args->sigma;
if ( args->xi < 0.0 || args->xi > (double)kernel->width ||
args->psi < 0.0 || args->psi > (double)kernel->height )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->x = (ssize_t) args->xi;
kernel->y = (ssize_t) args->psi;
scale = 1.0;
}
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values to scale given */
u=(ssize_t) (kernel->width*kernel->height);
for ( i=0; i < u; i++)
kernel->values[i] = scale;
kernel->minimum = kernel->maximum = scale; /* a flat shape */
kernel->positive_range = scale*u;
break;
}
case OctagonKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <=
((long)kernel->x + (long)(kernel->x/2)) )
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case DiskKernel:
{
ssize_t
limit = (ssize_t)(args->rho*args->rho);
if (args->rho < 0.4) /* default radius approx 4.3 */
kernel->width = kernel->height = 9L, limit = 18L;
else
kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ((u*u+v*v) <= limit)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case PlusKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
case CrossKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
/*
HitAndMiss Kernels
*/
case RingKernel:
case PeaksKernel:
{
ssize_t
limit1,
limit2,
scale;
if (args->rho < args->sigma)
{
kernel->width = ((size_t)args->sigma)*2+1;
limit1 = (ssize_t)(args->rho*args->rho);
limit2 = (ssize_t)(args->sigma*args->sigma);
}
else
{
kernel->width = ((size_t)args->rho)*2+1;
limit1 = (ssize_t)(args->sigma*args->sigma);
limit2 = (ssize_t)(args->rho*args->rho);
}
if ( limit2 <= 0 )
kernel->width = 7L, limit1 = 7L, limit2 = 11L;
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ ssize_t radius=u*u+v*v;
if (limit1 < radius && radius <= limit2)
kernel->positive_range += kernel->values[i] = (double) scale;
else
kernel->values[i] = nan;
}
kernel->minimum = kernel->maximum = (double) scale;
if ( type == PeaksKernel ) {
/* set the central point in the middle */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
kernel->maximum = 1.0;
}
break;
}
case EdgesKernel:
{
kernel=AcquireKernelInfo("ThinSE:482",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */
break;
}
case CornersKernel:
{
kernel=AcquireKernelInfo("ThinSE:87",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
break;
}
case DiagonalsKernel:
{
switch ( (int) args->rho ) {
case 0:
default:
{ KernelInfo
*new_kernel;
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
ExpandMirrorKernelInfo(kernel);
return(kernel);
}
case 1:
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
break;
case 2:
kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineEndsKernel:
{ /* Kernels for finding the end of thin lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all end of lines */
return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>",exception));
case 1:
/* kernel for 4-connected line ends - no rotation */
kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
break;
case 2:
/* kernel to add for 8-connected lines - no rotation */
kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
break;
case 3:
/* kernel to add for orthogonal line ends - does not find corners */
kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
break;
case 4:
/* traditional line end - fails on last T end */
kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineJunctionsKernel:
{ /* kernels for finding the junctions of multiple lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all line junctions */
return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>",exception));
case 1:
/* Y Junction */
kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
break;
case 2:
/* Diagonal T Junctions */
kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
break;
case 3:
/* Orthogonal T Junctions */
kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
break;
case 4:
/* Diagonal X Junctions */
kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
break;
case 5:
/* Orthogonal X Junctions - minimal diamond kernel */
kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case RidgesKernel:
{ /* Ridges - Ridge finding kernels */
KernelInfo
*new_kernel;
switch ( (int) args->rho ) {
case 1:
default:
kernel=ParseKernelArray("3x1:0,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
break;
case 2:
kernel=ParseKernelArray("4x1:0,1,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
/* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
/* Unfortunatally we can not yet rotate a non-square kernel */
/* But then we can't flip a non-symetrical kernel either */
new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
break;
}
break;
}
case ConvexHullKernel:
{
KernelInfo
*new_kernel;
/* first set of 8 kernels */
kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0);
/* append the mirror versions too - no flip function yet */
new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
ExpandRotateKernelInfo(new_kernel, 90.0);
LastKernelInfo(kernel)->next = new_kernel;
break;
}
case SkeletonKernel:
{
switch ( (int) args->rho ) {
case 1:
default:
/* Traditional Skeleton...
** A cyclically rotated single kernel
*/
kernel=AcquireKernelInfo("ThinSE:482",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
break;
case 2:
/* HIPR Variation of the cyclic skeleton
** Corners of the traditional method made more forgiving,
** but the retain the same cyclic order.
*/
kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
if (kernel->next == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
kernel->type = type;
kernel->next->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
break;
case 3:
/* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
*/
kernel=AcquireKernelInfo("ThinSE:41; ThinSE:42; ThinSE:43",
exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->next->type = type;
kernel->next->next->type = type;
ExpandMirrorKernelInfo(kernel); /* 12 kernels total */
break;
}
break;
}
case ThinSEKernel:
{ /* Special kernels for general thinning, while preserving connections
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
** And
** http://tpgit.github.com/Leptonica/ccthin_8c_source.html
**
** Note kernels do not specify the origin pixel, allowing them
** to be used for both thickening and thinning operations.
*/
switch ( (int) args->rho ) {
/* SE for 4-connected thinning */
case 41: /* SE_4_1 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1");
break;
case 42: /* SE_4_2 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-");
break;
case 43: /* SE_4_3 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1");
break;
case 44: /* SE_4_4 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-");
break;
case 45: /* SE_4_5 */
kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-");
break;
case 46: /* SE_4_6 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1");
break;
case 47: /* SE_4_7 */
kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-");
break;
case 48: /* SE_4_8 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1");
break;
case 49: /* SE_4_9 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1");
break;
/* SE for 8-connected thinning - negatives of the above */
case 81: /* SE_8_0 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-");
break;
case 82: /* SE_8_2 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-");
break;
case 83: /* SE_8_3 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-");
break;
case 84: /* SE_8_4 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-");
break;
case 85: /* SE_8_5 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-");
break;
case 86: /* SE_8_6 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1");
break;
case 87: /* SE_8_7 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-");
break;
case 88: /* SE_8_8 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-");
break;
case 89: /* SE_8_9 */
kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-");
break;
/* Special combined SE kernels */
case 423: /* SE_4_2 , SE_4_3 Combined Kernel */
kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-");
break;
case 823: /* SE_8_2 , SE_8_3 Combined Kernel */
kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-");
break;
case 481: /* SE_48_1 - General Connected Corner Kernel */
kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-");
break;
default:
case 482: /* SE_48_2 - General Edge Kernel */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
/*
Distance Measuring Kernels
*/
case ChebyshevKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*MagickMax(fabs((double)u),fabs((double)v)) );
kernel->maximum = kernel->values[0];
break;
}
case ManhattanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*(labs((long) u)+labs((long) v)) );
kernel->maximum = kernel->values[0];
break;
}
case OctagonalKernel:
{
if (args->rho < 2.0)
kernel->width = kernel->height = 5; /* default/minimum radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{
double
r1 = MagickMax(fabs((double)u),fabs((double)v)),
r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5);
kernel->positive_range += kernel->values[i] =
args->sigma*MagickMax(r1,r2);
}
kernel->maximum = kernel->values[0];
break;
}
case EuclideanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*sqrt((double)(u*u+v*v)) );
kernel->maximum = kernel->values[0];
break;
}
default:
{
/* No-Op Kernel - Basically just a single pixel on its own */
kernel=ParseKernelArray("1:1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = UndefinedKernel;
break;
}
break;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneKernelInfo() creates a new clone of the given Kernel List so that its
% can be modified without effecting the original. The cloned kernel should
% be destroyed using DestoryKernelInfo() when no longer needed.
%
% The format of the CloneKernelInfo method is:
%
% KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be cloned
%
*/
MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
{
register ssize_t
i;
KernelInfo
*new_kernel;
assert(kernel != (KernelInfo *) NULL);
new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (new_kernel == (KernelInfo *) NULL)
return(new_kernel);
*new_kernel=(*kernel); /* copy values in structure */
/* replace the values with a copy of the values */
new_kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*sizeof(*kernel->values)));
if (new_kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(new_kernel));
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
new_kernel->values[i]=kernel->values[i];
/* Also clone the next kernel in the kernel list */
if ( kernel->next != (KernelInfo *) NULL ) {
new_kernel->next = CloneKernelInfo(kernel->next);
if ( new_kernel->next == (KernelInfo *) NULL )
return(DestroyKernelInfo(new_kernel));
}
return(new_kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyKernelInfo() frees the memory used by a Convolution/Morphology
% kernel.
%
% The format of the DestroyKernelInfo method is:
%
% KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be destroyed
%
*/
MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
{
assert(kernel != (KernelInfo *) NULL);
if (kernel->next != (KernelInfo *) NULL)
kernel->next=DestroyKernelInfo(kernel->next);
kernel->values=(MagickRealType *) RelinquishAlignedMemory(kernel->values);
kernel=(KernelInfo *) RelinquishMagickMemory(kernel);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d M i r r o r K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandMirrorKernelInfo() takes a single kernel, and expands it into a
% sequence of 90-degree rotated kernels but providing a reflected 180
% rotatation, before the -/+ 90-degree rotations.
%
% This special rotation order produces a better, more symetrical thinning of
% objects.
%
% The format of the ExpandMirrorKernelInfo method is:
%
% void ExpandMirrorKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
#if 0
static void FlopKernelInfo(KernelInfo *kernel)
{ /* Do a Flop by reversing each row. */
size_t
y;
register ssize_t
x,r;
register double
*k,t;
for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width)
for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--)
t=k[x], k[x]=k[r], k[r]=t;
kernel->x = kernel->width - kernel->x - 1;
angle = fmod(angle+180.0, 360.0);
}
#endif
static void ExpandMirrorKernelInfo(KernelInfo *kernel)
{
KernelInfo
*clone,
*last;
last = kernel;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flip */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 90); /* transpose */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flop */
LastKernelInfo(last)->next = clone;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating
% incrementally by the angle given, until the kernel repeats.
%
% WARNING: 45 degree rotations only works for 3x3 kernels.
% While 90 degree roatations only works for linear and square kernels
%
% The format of the ExpandRotateKernelInfo method is:
%
% void ExpandRotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
/* Internal Routine - Return true if two kernels are the same */
static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1,
const KernelInfo *kernel2)
{
register size_t
i;
/* check size and origin location */
if ( kernel1->width != kernel2->width
|| kernel1->height != kernel2->height
|| kernel1->x != kernel2->x
|| kernel1->y != kernel2->y )
return MagickFalse;
/* check actual kernel values */
for (i=0; i < (kernel1->width*kernel1->height); i++) {
/* Test for Nan equivalence */
if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) )
return MagickFalse;
if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) )
return MagickFalse;
/* Test actual values are equivalent */
if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon )
return MagickFalse;
}
return MagickTrue;
}
static void ExpandRotateKernelInfo(KernelInfo *kernel, const double angle)
{
KernelInfo
*clone_info,
*last;
last=kernel;
DisableMSCWarning(4127)
while (1) {
RestoreMSCWarning
clone_info=CloneKernelInfo(last);
if (clone_info == (KernelInfo *) NULL)
break;
RotateKernelInfo(clone_info,angle);
if (SameKernelInfo(kernel,clone_info) != MagickFalse)
break;
LastKernelInfo(last)->next=clone_info;
last=clone_info;
}
if (clone_info != (KernelInfo *) NULL)
clone_info=DestroyKernelInfo(clone_info); /* kernel repeated - junk */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a l c M e t a K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only,
% using the kernel values. This should only ne used if it is not possible to
% calculate that meta-data in some easier way.
%
% It is important that the meta-data is correct before ScaleKernelInfo() is
% used to perform kernel normalization.
%
% The format of the CalcKernelMetaData method is:
%
% void CalcKernelMetaData(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% WARNING: Minimum and Maximum values are assumed to include zero, even if
% zero is not part of the kernel (as in Gaussian Derived kernels). This
% however is not true for flat-shaped morphological kernels.
%
% WARNING: Only the specific kernel pointed to is modified, not a list of
% multiple kernels.
%
% This is an internal function and not expected to be useful outside this
% module. This could change however.
*/
static void CalcKernelMetaData(KernelInfo *kernel)
{
register size_t
i;
kernel->minimum = kernel->maximum = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; i < (kernel->width*kernel->height); i++)
{
if ( fabs(kernel->values[i]) < MagickEpsilon )
kernel->values[i] = 0.0;
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y A p p l y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyApply() applies a morphological method, multiple times using
% a list of multiple kernels. This is the method that should be called by
% other 'operators' that internally use morphology operations as part of
% their processing.
%
% It is basically equivalent to as MorphologyImage() (see below) but without
% any user controls. This allows internel programs to use this method to
% perform a specific task without possible interference by any API user
% supplied settings.
%
% It is MorphologyImage() task to extract any such user controls, and
% pass them to this function for processing.
%
% More specifically all given kernels should already be scaled, normalised,
% and blended appropriatally before being parred to this routine. The
% appropriate bias, and compose (typically 'UndefinedComposeOp') given.
%
% The format of the MorphologyApply method is:
%
% Image *MorphologyApply(const Image *image,MorphologyMethod method,
% const ssize_t iterations,const KernelInfo *kernel,
% const CompositeMethod compose,const double bias,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the source image
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
%
% o compose: How to handle or merge multi-kernel results.
% If 'UndefinedCompositeOp' use default for the Morphology method.
% If 'NoCompositeOp' force image to be re-iterated by each kernel.
% Otherwise merge the results using the compose method given.
%
% o bias: Convolution Output Bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t MorphologyPrimitive(const Image *image,Image *morphology_image,
const MorphologyMethod method,const KernelInfo *kernel,const double bias,
ExceptionInfo *exception)
{
#define MorphologyTag "Morphology/Image"
CacheView
*image_view,
*morphology_view;
OffsetInfo
offset;
register ssize_t
j,
y;
size_t
*changes,
changed,
width;
MagickBooleanType
status;
MagickOffsetType
progress;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(morphology_image != (Image *) NULL);
assert(morphology_image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(morphology_image,exception);
width=image->columns+kernel->width-1;
offset.x=0;
offset.y=0;
switch (method)
{
case ConvolveMorphology:
case DilateMorphology:
case DilateIntensityMorphology:
case IterativeDistanceMorphology:
{
/*
Kernel needs to used with reflection about origin.
*/
offset.x=(ssize_t) kernel->width-kernel->x-1;
offset.y=(ssize_t) kernel->height-kernel->y-1;
break;
}
case ErodeMorphology:
case ErodeIntensityMorphology:
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
{
offset.x=kernel->x;
offset.y=kernel->y;
break;
}
default:
{
assert("Not a Primitive Morphology Method" != (char *) NULL);
break;
}
}
changed=0;
changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(),
sizeof(*changes));
if (changes == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changes[j]=0;
if ((method == ConvolveMorphology) && (kernel->width == 1))
{
register ssize_t
x;
/*
Special handling (for speed) of vertical (blur) kernels. This performs
its handling in columns rather than in rows. This is only done
for convolve as it is the only method that generates very large 1-D
vertical kernels (such as a 'BlurKernel')
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,morphology_image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
r;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,x,-offset.y,1,image->rows+
kernel->height-1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,x,0,1,
morphology_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*offset.y;
for (r=0; r < (ssize_t) image->rows; r++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
morphology_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
v;
size_t
count;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
morphology_traits=GetPixelChannelTraits(morphology_image,channel);
if ((traits == UndefinedPixelTrait) ||
(morphology_traits == UndefinedPixelTrait))
continue;
if (((traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p+center) <= (QuantumRange/2)))
{
SetPixelChannel(morphology_image,channel,p[center+i],q);
continue;
}
k=(&kernel->values[kernel->height-1]);
pixels=p;
pixel=bias;
gamma=0.0;
count=0;
if ((morphology_traits & BlendPixelTrait) == 0)
for (v=0; v < (ssize_t) kernel->height; v++)
{
if (!IsNaN(*k))
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
else
for (v=0; v < (ssize_t) kernel->height; v++)
{
if (!IsNaN(*k))
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=alpha*(*k)*pixels[i];
gamma+=alpha*(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
if (fabs(pixel-p[center+i]) > MagickEpsilon)
changes[id]++;
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height/count;
SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*
pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(morphology_image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphologyPrimitive)
#endif
proceed=SetImageProgress(image,MorphologyTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_image->type=image->type;
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changed+=changes[j];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : 0);
}
/*
Normal handling of horizontal or rectangular kernels (row by row).
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,morphology_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,
kernel->height,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,morphology_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) (GetPixelChannels(image)*width*offset.y+
GetPixelChannels(image)*offset.x);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
intensity,
maximum,
minimum,
pixel;
PixelChannel
channel;
PixelTrait
morphology_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
size_t
count;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
morphology_traits=GetPixelChannelTraits(morphology_image,channel);
if ((traits == UndefinedPixelTrait) ||
(morphology_traits == UndefinedPixelTrait))
continue;
if (((traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p+center) <= (QuantumRange/2)))
{
SetPixelChannel(morphology_image,channel,p[center+i],q);
continue;
}
pixels=p;
maximum=0.0;
minimum=(double) QuantumRange;
switch (method)
{
case ConvolveMorphology: pixel=bias; break;
case DilateMorphology:
case ErodeIntensityMorphology:
{
pixel=0.0;
break;
}
default:
{
pixel=(double) p[center+i];
break;
}
}
count=0;
gamma=1.0;
switch (method)
{
case ConvolveMorphology:
{
/*
Weighted Average of pixels using reflected kernel
For correct working of this operation for asymetrical kernels,
the kernel needs to be applied in its reflected form. That is
its values needs to be reversed.
Correlation is actually the same as this but without reflecting
the kernel, and thus 'lower-level' that Convolution. However as
Convolution is the more common method used, and it does not
really cost us much in terms of processing to use a reflected
kernel, so it is Convolution that is implemented.
Correlation will have its kernel reflected before calling this
function to do a Convolve.
For more details of Correlation vs Convolution see
http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
if ((morphology_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
pixel+=(*k)*pixels[i];
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
/*
Alpha blending.
*/
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=alpha*(*k)*pixels[i];
gamma+=alpha*(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case ErodeMorphology:
{
/*
Minimum value within kernel neighbourhood.
The kernel is not reflected for this operation. In normal
Greyscale Morphology, the kernel value should be added
to the real value, this is currently not done, due to the
nature of the boolean kernels being used.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
if ((double) pixels[i] < pixel)
pixel=(double) pixels[i];
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case DilateMorphology:
{
/*
Maximum value within kernel neighbourhood.
For correct working of this operation for asymetrical kernels,
the kernel needs to be applied in its reflected form. That is
its values needs to be reversed.
In normal Greyscale Morphology, the kernel value should be
added to the real value, this is currently not done, due to the
nature of the boolean kernels being used.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k > 0.5))
{
if ((double) pixels[i] > pixel)
pixel=(double) pixels[i];
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
{
/*
Minimum of foreground pixel minus maxumum of background pixels.
The kernel is not reflected for this operation, and consists
of both foreground and background pixel neighbourhoods, 0.0 for
background, and 1.0 for foreground with either Nan or 0.5 values
for don't care.
This never produces a meaningless negative result. Such results
cause Thinning/Thicken to not work correctly when used against a
greyscale image.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if (*k > 0.7)
{
if ((double) pixels[i] < pixel)
pixel=(double) pixels[i];
}
else
if (*k < 0.3)
{
if ((double) pixels[i] > maximum)
maximum=(double) pixels[i];
}
count++;
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
pixel-=maximum;
if (pixel < 0.0)
pixel=0.0;
if (method == ThinningMorphology)
pixel=(double) p[center+i]-pixel;
else
if (method == ThickenMorphology)
pixel+=(double) p[center+i]+pixel;
break;
}
case ErodeIntensityMorphology:
{
/*
Select pixel with minimum intensity within kernel neighbourhood.
The kernel is not reflected for this operation.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
intensity=(double) GetPixelIntensity(image,pixels);
if (intensity < minimum)
{
pixel=(double) pixels[i];
minimum=intensity;
}
count++;
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case DilateIntensityMorphology:
{
/*
Select pixel with maximum intensity within kernel neighbourhood.
The kernel is not reflected for this operation.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
intensity=(double) GetPixelIntensity(image,pixels);
if (intensity > maximum)
{
pixel=(double) pixels[i];
maximum=intensity;
}
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case IterativeDistanceMorphology:
{
/*
Compute th iterative distance from black edge of a white image
shape. Essentually white values are decreased to the smallest
'distance from edge' it can find.
It works by adding kernel values to the neighbourhood, and and
select the minimum value found. The kernel is rotated before
use, so kernel distances match resulting distances, when a user
provided asymmetric kernel is applied.
This code is nearly identical to True GrayScale Morphology but
not quite.
GreyDilate Kernel values added, maximum value found Kernel is
rotated before use.
GrayErode: Kernel values subtracted and minimum value found No
kernel rotation used.
Note the the Iterative Distance method is essentially a
GrayErode, but with negative kernel values, and kernel rotation
applied.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case UndefinedMorphology:
default:
break;
}
if (fabs(pixel-p[center+i]) > MagickEpsilon)
changes[id]++;
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height*kernel->width/count;
SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(morphology_image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphologyPrimitive)
#endif
proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changed+=changes[j];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : -1);
}
/*
This is almost identical to the MorphologyPrimative() function above, but
applies the primitive directly to the actual image using two passes, once in
each direction, with the results of the previous (and current) row being
re-used.
That is after each row is 'Sync'ed' into the image, the next row makes use of
those values as part of the calculation of the next row. It repeats, but
going in the oppisite (bottom-up) direction.
Because of this 're-use of results' this function can not make use of multi-
threaded, parellel processing.
*/
static ssize_t MorphologyPrimitiveDirect(Image *image,
const MorphologyMethod method,const KernelInfo *kernel,
ExceptionInfo *exception)
{
CacheView
*morphology_view,
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
offset;
size_t
width,
changed;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
changed=0;
progress=0;
switch(method)
{
case DistanceMorphology:
case VoronoiMorphology:
{
/*
Kernel reflected about origin.
*/
offset.x=(ssize_t) kernel->width-kernel->x-1;
offset.y=(ssize_t) kernel->height-kernel->y-1;
break;
}
default:
{
offset.x=kernel->x;
offset.y=kernel->y;
break;
}
}
/*
Two views into same image, do not thread.
*/
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(image,exception);
width=image->columns+kernel->width-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
/*
Read virtual pixels, and authentic pixels, from the same image! We read
using virtual to get virtual pixel handling, but write back into the same
image.
Only top half of kernel is processed as we do a single pass downward
through the image iterating the distance function as we go.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,(size_t)
offset.y+1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) (GetPixelChannels(image)*width*offset.y+
GetPixelChannels(image)*offset.x);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelTrait
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
traits=GetPixelChannelTraits(image,(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
if (((traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p+center) <= (QuantumRange/2)))
continue;
pixels=p;
pixel=(double) QuantumRange;
switch (method)
{
case DistanceMorphology:
{
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v <= offset.y; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q-offset.x*GetPixelChannels(image);
for (u=0; u < offset.x; u++)
{
if (!IsNaN(*k) && ((x+u-offset.x) >= 0))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
break;
}
case VoronoiMorphology:
{
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < offset.y; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q-offset.x*GetPixelChannels(image);
for (u=0; u < offset.x; u++)
{
if (!IsNaN(*k) && ((x+u-offset.x) >= 0))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
break;
}
default:
break;
}
if (fabs(pixel-q[i]) > MagickEpsilon)
changed++;
q[i]=ClampToQuantum(pixel);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphologyTag,progress++,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
/*
Do the reverse pass through the image.
*/
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(image,exception);
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
/*
Read virtual pixels, and authentic pixels, from the same image. We
read using virtual to get virtual pixel handling, but write back
into the same image.
Only the bottom half of the kernel is processed as we up the image.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y,width,(size_t)
kernel->y+1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
p+=(image->columns-1)*GetPixelChannels(image);
q+=(image->columns-1)*GetPixelChannels(image);
center=(ssize_t) (offset.x*GetPixelChannels(image));
for (x=(ssize_t) image->columns-1; x >= 0; x--)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelTrait
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
traits=GetPixelChannelTraits(image,(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
if (((traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p+center) <= (QuantumRange/2)))
continue;
pixels=p;
pixel=(double) QuantumRange;
switch (method)
{
case DistanceMorphology:
{
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
for (v=offset.y; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*kernel->y+kernel->x-1]);
pixels=q;
for (u=offset.x+1; u < (ssize_t) kernel->width; u++)
{
pixels+=GetPixelChannels(image);
if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
}
break;
}
case VoronoiMorphology:
{
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
for (v=offset.y; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q;
for (u=offset.x+1; u < (ssize_t) kernel->width; u++)
{
pixels+=GetPixelChannels(image);
if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
}
break;
}
default:
break;
}
if (fabs(pixel-q[i]) > MagickEpsilon)
changed++;
q[i]=ClampToQuantum(pixel);
}
p-=GetPixelChannels(image);
q-=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphologyTag,progress++,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
return(status ? (ssize_t) changed : -1);
}
/*
Apply a Morphology by calling one of the above low level primitive
application functions. This function handles any iteration loops,
composition or re-iteration of results, and compound morphology methods that
is based on multiple low-level (staged) morphology methods.
Basically this provides the complex glue between the requested morphology
method and raw low-level implementation (above).
*/
MagickPrivate Image *MorphologyApply(const Image *image,
const MorphologyMethod method, const ssize_t iterations,
const KernelInfo *kernel, const CompositeOperator compose,const double bias,
ExceptionInfo *exception)
{
CompositeOperator
curr_compose;
Image
*curr_image, /* Image we are working with or iterating */
*work_image, /* secondary image for primitive iteration */
*save_image, /* saved image - for 'edge' method only */
*rslt_image; /* resultant image - after multi-kernel handling */
KernelInfo
*reflected_kernel, /* A reflected copy of the kernel (if needed) */
*norm_kernel, /* the current normal un-reflected kernel */
*rflt_kernel, /* the current reflected kernel (if needed) */
*this_kernel; /* the kernel being applied */
MorphologyMethod
primitive; /* the current morphology primitive being applied */
CompositeOperator
rslt_compose; /* multi-kernel compose method for results to use */
MagickBooleanType
special, /* do we use a direct modify function? */
verbose; /* verbose output of results */
size_t
method_loop, /* Loop 1: number of compound method iterations (norm 1) */
method_limit, /* maximum number of compound method iterations */
kernel_number, /* Loop 2: the kernel number being applied */
stage_loop, /* Loop 3: primitive loop for compound morphology */
stage_limit, /* how many primitives are in this compound */
kernel_loop, /* Loop 4: iterate the kernel over image */
kernel_limit, /* number of times to iterate kernel */
count, /* total count of primitive steps applied */
kernel_changed, /* total count of changed using iterated kernel */
method_changed; /* total count of changed over method iteration */
ssize_t
changed; /* number pixels changed by last primitive operation */
char
v_info[MagickPathExtent];
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
count = 0; /* number of low-level morphology primitives performed */
if ( iterations == 0 )
return((Image *) NULL); /* null operation - nothing to do! */
kernel_limit = (size_t) iterations;
if ( iterations < 0 ) /* negative interations = infinite (well alomst) */
kernel_limit = image->columns>image->rows ? image->columns : image->rows;
verbose = IsStringTrue(GetImageArtifact(image,"debug"));
/* initialise for cleanup */
curr_image = (Image *) image;
curr_compose = image->compose;
(void) curr_compose;
work_image = save_image = rslt_image = (Image *) NULL;
reflected_kernel = (KernelInfo *) NULL;
/* Initialize specific methods
* + which loop should use the given iteratations
* + how many primitives make up the compound morphology
* + multi-kernel compose method to use (by default)
*/
method_limit = 1; /* just do method once, unless otherwise set */
stage_limit = 1; /* assume method is not a compound */
special = MagickFalse; /* assume it is NOT a direct modify primitive */
rslt_compose = compose; /* and we are composing multi-kernels as given */
switch( method ) {
case SmoothMorphology: /* 4 primitive compound morphology */
stage_limit = 4;
break;
case OpenMorphology: /* 2 primitive compound morphology */
case OpenIntensityMorphology:
case TopHatMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case EdgeMorphology:
stage_limit = 2;
break;
case HitAndMissMorphology:
rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */
/* FALL THUR */
case ThinningMorphology:
case ThickenMorphology:
method_limit = kernel_limit; /* iterate the whole method */
kernel_limit = 1; /* do not do kernel iteration */
break;
case DistanceMorphology:
case VoronoiMorphology:
special = MagickTrue; /* use special direct primative */
break;
default:
break;
}
/* Apply special methods with special requirments
** For example, single run only, or post-processing requirements
*/
if ( special != MagickFalse )
{
rslt_image=CloneImage(image,0,0,MagickTrue,exception);
if (rslt_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(rslt_image,DirectClass,exception) == MagickFalse)
goto error_cleanup;
changed=MorphologyPrimitiveDirect(rslt_image,method,kernel,exception);
if (verbose != MagickFalse)
(void) (void) FormatLocaleFile(stderr,
"%s:%.20g.%.20g #%.20g => Changed %.20g\n",
CommandOptionToMnemonic(MagickMorphologyOptions, method),
1.0,0.0,1.0, (double) changed);
if ( changed < 0 )
goto error_cleanup;
if ( method == VoronoiMorphology ) {
/* Preserve the alpha channel of input image - but turned it off */
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel,
exception);
(void) CompositeImage(rslt_image,image,CopyAlphaCompositeOp,
MagickTrue,0,0,exception);
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel,
exception);
}
goto exit_cleanup;
}
/* Handle user (caller) specified multi-kernel composition method */
if ( compose != UndefinedCompositeOp )
rslt_compose = compose; /* override default composition for method */
if ( rslt_compose == UndefinedCompositeOp )
rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */
/* Some methods require a reflected kernel to use with primitives.
* Create the reflected kernel for those methods. */
switch ( method ) {
case CorrelateMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case SmoothMorphology:
reflected_kernel = CloneKernelInfo(kernel);
if (reflected_kernel == (KernelInfo *) NULL)
goto error_cleanup;
RotateKernelInfo(reflected_kernel,180);
break;
default:
break;
}
/* Loops around more primitive morpholgy methods
** erose, dilate, open, close, smooth, edge, etc...
*/
/* Loop 1: iterate the compound method */
method_loop = 0;
method_changed = 1;
while ( method_loop < method_limit && method_changed > 0 ) {
method_loop++;
method_changed = 0;
/* Loop 2: iterate over each kernel in a multi-kernel list */
norm_kernel = (KernelInfo *) kernel;
this_kernel = (KernelInfo *) kernel;
rflt_kernel = reflected_kernel;
kernel_number = 0;
while ( norm_kernel != NULL ) {
/* Loop 3: Compound Morphology Staging - Select Primative to apply */
stage_loop = 0; /* the compound morphology stage number */
while ( stage_loop < stage_limit ) {
stage_loop++; /* The stage of the compound morphology */
/* Select primitive morphology for this stage of compound method */
this_kernel = norm_kernel; /* default use unreflected kernel */
primitive = method; /* Assume method is a primitive */
switch( method ) {
case ErodeMorphology: /* just erode */
case EdgeInMorphology: /* erode and image difference */
primitive = ErodeMorphology;
break;
case DilateMorphology: /* just dilate */
case EdgeOutMorphology: /* dilate and image difference */
primitive = DilateMorphology;
break;
case OpenMorphology: /* erode then dialate */
case TopHatMorphology: /* open and image difference */
primitive = ErodeMorphology;
if ( stage_loop == 2 )
primitive = DilateMorphology;
break;
case OpenIntensityMorphology:
primitive = ErodeIntensityMorphology;
if ( stage_loop == 2 )
primitive = DilateIntensityMorphology;
break;
case CloseMorphology: /* dilate, then erode */
case BottomHatMorphology: /* close and image difference */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
if ( stage_loop == 2 )
primitive = ErodeMorphology;
break;
case CloseIntensityMorphology:
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateIntensityMorphology;
if ( stage_loop == 2 )
primitive = ErodeIntensityMorphology;
break;
case SmoothMorphology: /* open, close */
switch ( stage_loop ) {
case 1: /* start an open method, which starts with Erode */
primitive = ErodeMorphology;
break;
case 2: /* now Dilate the Erode */
primitive = DilateMorphology;
break;
case 3: /* Reflect kernel a close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
break;
case 4: /* Finish the Close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ErodeMorphology;
break;
}
break;
case EdgeMorphology: /* dilate and erode difference */
primitive = DilateMorphology;
if ( stage_loop == 2 ) {
save_image = curr_image; /* save the image difference */
curr_image = (Image *) image;
primitive = ErodeMorphology;
}
break;
case CorrelateMorphology:
/* A Correlation is a Convolution with a reflected kernel.
** However a Convolution is a weighted sum using a reflected
** kernel. It may seem stange to convert a Correlation into a
** Convolution as the Correlation is the simplier method, but
** Convolution is much more commonly used, and it makes sense to
** implement it directly so as to avoid the need to duplicate the
** kernel when it is not required (which is typically the
** default).
*/
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ConvolveMorphology;
break;
default:
break;
}
assert( this_kernel != (KernelInfo *) NULL );
/* Extra information for debugging compound operations */
if (verbose != MagickFalse) {
if ( stage_limit > 1 )
(void) FormatLocaleString(v_info,MagickPathExtent,"%s:%.20g.%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions,method),(double)
method_loop,(double) stage_loop);
else if ( primitive != method )
(void) FormatLocaleString(v_info, MagickPathExtent, "%s:%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions, method),(double)
method_loop);
else
v_info[0] = '\0';
}
/* Loop 4: Iterate the kernel with primitive */
kernel_loop = 0;
kernel_changed = 0;
changed = 1;
while ( kernel_loop < kernel_limit && changed > 0 ) {
kernel_loop++; /* the iteration of this kernel */
/* Create a clone as the destination image, if not yet defined */
if ( work_image == (Image *) NULL )
{
work_image=CloneImage(image,0,0,MagickTrue,exception);
if (work_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(work_image,DirectClass,exception) == MagickFalse)
goto error_cleanup;
}
/* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
count++;
changed = MorphologyPrimitive(curr_image, work_image, primitive,
this_kernel, bias, exception);
if (verbose != MagickFalse) {
if ( kernel_loop > 1 )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */
(void) (void) FormatLocaleFile(stderr,
"%s%s%s:%.20g.%.20g #%.20g => Changed %.20g",
v_info,CommandOptionToMnemonic(MagickMorphologyOptions,
primitive),(this_kernel == rflt_kernel ) ? "*" : "",
(double) (method_loop+kernel_loop-1),(double) kernel_number,
(double) count,(double) changed);
}
if ( changed < 0 )
goto error_cleanup;
kernel_changed += changed;
method_changed += changed;
/* prepare next loop */
{ Image *tmp = work_image; /* swap images for iteration */
work_image = curr_image;
curr_image = tmp;
}
if ( work_image == image )
work_image = (Image *) NULL; /* replace input 'image' */
} /* End Loop 4: Iterate the kernel with primitive */
if (verbose != MagickFalse && kernel_changed != (size_t)changed)
(void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed);
if (verbose != MagickFalse && stage_loop < stage_limit)
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */
#if 0
(void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image);
(void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image);
(void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image);
(void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image);
(void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image);
#endif
} /* End Loop 3: Primative (staging) Loop for Coumpound Methods */
/* Final Post-processing for some Compound Methods
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** Turn off SVG composition 'alpha blending'.
*/
switch( method ) {
case EdgeOutMorphology:
case EdgeInMorphology:
case TopHatMorphology:
case BottomHatMorphology:
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,
"\n%s: Difference with original image",CommandOptionToMnemonic(
MagickMorphologyOptions, method) );
(void) CompositeImage(curr_image,image,DifferenceCompositeOp,
MagickTrue,0,0,exception);
break;
case EdgeMorphology:
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,
"\n%s: Difference of Dilate and Erode",CommandOptionToMnemonic(
MagickMorphologyOptions, method) );
(void) CompositeImage(curr_image,save_image,DifferenceCompositeOp,
MagickTrue,0,0,exception);
save_image = DestroyImage(save_image); /* finished with save image */
break;
default:
break;
}
/* multi-kernel handling: re-iterate, or compose results */
if ( kernel->next == (KernelInfo *) NULL )
rslt_image = curr_image; /* just return the resulting image */
else if ( rslt_compose == NoCompositeOp )
{ if (verbose != MagickFalse) {
if ( this_kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " (re-iterate)");
else
(void) FormatLocaleFile(stderr, " (done)");
}
rslt_image = curr_image; /* return result, and re-iterate */
}
else if ( rslt_image == (Image *) NULL)
{ if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, " (save for compose)");
rslt_image = curr_image;
curr_image = (Image *) image; /* continue with original image */
}
else
{ /* Add the new 'current' result to the composition
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** IE: Turn off SVG composition 'alpha blending'.
*/
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, " (compose \"%s\")",
CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) );
(void) CompositeImage(rslt_image,curr_image,rslt_compose,MagickTrue,
0,0,exception);
curr_image = DestroyImage(curr_image);
curr_image = (Image *) image; /* continue with original image */
}
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, "\n");
/* loop to the next kernel in a multi-kernel list */
norm_kernel = norm_kernel->next;
if ( rflt_kernel != (KernelInfo *) NULL )
rflt_kernel = rflt_kernel->next;
kernel_number++;
} /* End Loop 2: Loop over each kernel */
} /* End Loop 1: compound method interation */
goto exit_cleanup;
/* Yes goto's are bad, but it makes cleanup lot more efficient */
error_cleanup:
if ( curr_image == rslt_image )
curr_image = (Image *) NULL;
if ( rslt_image != (Image *) NULL )
rslt_image = DestroyImage(rslt_image);
exit_cleanup:
if ( curr_image == rslt_image || curr_image == image )
curr_image = (Image *) NULL;
if ( curr_image != (Image *) NULL )
curr_image = DestroyImage(curr_image);
if ( work_image != (Image *) NULL )
work_image = DestroyImage(work_image);
if ( save_image != (Image *) NULL )
save_image = DestroyImage(save_image);
if ( reflected_kernel != (KernelInfo *) NULL )
reflected_kernel = DestroyKernelInfo(reflected_kernel);
return(rslt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyImage() applies a user supplied kernel to the image according to
% the given mophology method.
%
% This function applies any and all user defined settings before calling
% the above internal function MorphologyApply().
%
% User defined settings include...
% * Output Bias for Convolution and correlation ("-define convolve:bias=??")
% * Kernel Scale/normalize settings ("-define convolve:scale=??")
% This can also includes the addition of a scaled unity kernel.
% * Show Kernel being applied ("-define morphology:showkernel=1")
%
% Other operators that do not want user supplied options interfering,
% especially "convolve:bias" and "morphology:showkernel" should use
% MorphologyApply() directly.
%
% The format of the MorphologyImage method is:
%
% Image *MorphologyImage(const Image *image,MorphologyMethod method,
% const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o kernel: An array of double representing the morphology kernel.
% Warning: kernel may be normalized for the Convolve method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphologyImage(const Image *image,
const MorphologyMethod method,const ssize_t iterations,
const KernelInfo *kernel,ExceptionInfo *exception)
{
const char
*artifact;
CompositeOperator
compose;
double
bias;
Image
*morphology_image;
KernelInfo
*curr_kernel;
curr_kernel = (KernelInfo *) kernel;
bias=0.0;
compose = UndefinedCompositeOp; /* use default for method */
/* Apply Convolve/Correlate Normalization and Scaling Factors.
* This is done BEFORE the ShowKernelInfo() function is called so that
* users can see the results of the 'option:convolve:scale' option.
*/
if ( method == ConvolveMorphology || method == CorrelateMorphology ) {
/* Get the bias value as it will be needed */
artifact = GetImageArtifact(image,"convolve:bias");
if ( artifact != (const char *) NULL) {
if (IsGeometry(artifact) == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"convolve:bias",artifact);
else
bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0);
}
/* Scale kernel according to user wishes */
artifact = GetImageArtifact(image,"convolve:scale");
if ( artifact != (const char *) NULL ) {
if (IsGeometry(artifact) == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"convolve:scale",artifact);
else {
if ( curr_kernel == kernel )
curr_kernel = CloneKernelInfo(kernel);
if (curr_kernel == (KernelInfo *) NULL)
return((Image *) NULL);
ScaleGeometryKernelInfo(curr_kernel, artifact);
}
}
}
/* display the (normalized) kernel via stderr */
artifact=GetImageArtifact(image,"morphology:showkernel");
if (IsStringTrue(artifact) != MagickFalse)
ShowKernelInfo(curr_kernel);
/* Override the default handling of multi-kernel morphology results
* If 'Undefined' use the default method
* If 'None' (default for 'Convolve') re-iterate previous result
* Otherwise merge resulting images using compose method given.
* Default for 'HitAndMiss' is 'Lighten'.
*/
{
ssize_t
parse;
artifact = GetImageArtifact(image,"morphology:compose");
if ( artifact != (const char *) NULL) {
parse=ParseCommandOption(MagickComposeOptions,
MagickFalse,artifact);
if ( parse < 0 )
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"UnrecognizedComposeOperator","'%s' '%s'",
"morphology:compose",artifact);
else
compose=(CompositeOperator)parse;
}
}
/* Apply the Morphology */
morphology_image = MorphologyApply(image,method,iterations,
curr_kernel,compose,bias,exception);
/* Cleanup and Exit */
if ( curr_kernel != kernel )
curr_kernel=DestroyKernelInfo(curr_kernel);
return(morphology_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateKernelInfo() rotates the kernel by the angle given.
%
% Currently it is restricted to 90 degree angles, of either 1D kernels
% or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels.
% It will ignore usless rotations for specific 'named' built-in kernels.
%
% The format of the RotateKernelInfo method is:
%
% void RotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is currently internal to this module only, but can be exported
% to other modules if needed.
*/
static void RotateKernelInfo(KernelInfo *kernel, double angle)
{
/* angle the lower kernels first */
if ( kernel->next != (KernelInfo *) NULL)
RotateKernelInfo(kernel->next, angle);
/* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical
**
** TODO: expand beyond simple 90 degree rotates, flips and flops
*/
/* Modulus the angle */
angle = fmod(angle, 360.0);
if ( angle < 0 )
angle += 360.0;
if ( 337.5 < angle || angle <= 22.5 )
return; /* Near zero angle - no change! - At least not at this time */
/* Handle special cases */
switch (kernel->type) {
/* These built-in kernels are cylindrical kernels, rotating is useless */
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case DiskKernel:
case PeaksKernel:
case LaplacianKernel:
case ChebyshevKernel:
case ManhattanKernel:
case EuclideanKernel:
return;
/* These may be rotatable at non-90 angles in the future */
/* but simply rotating them in multiples of 90 degrees is useless */
case SquareKernel:
case DiamondKernel:
case PlusKernel:
case CrossKernel:
return;
/* These only allows a +/-90 degree rotation (by transpose) */
/* A 180 degree rotation is useless */
case BlurKernel:
if ( 135.0 < angle && angle <= 225.0 )
return;
if ( 225.0 < angle && angle <= 315.0 )
angle -= 180;
break;
default:
break;
}
/* Attempt rotations by 45 degrees -- 3x3 kernels only */
if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 )
{
if ( kernel->width == 3 && kernel->height == 3 )
{ /* Rotate a 3x3 square by 45 degree angle */
double t = kernel->values[0];
kernel->values[0] = kernel->values[3];
kernel->values[3] = kernel->values[6];
kernel->values[6] = kernel->values[7];
kernel->values[7] = kernel->values[8];
kernel->values[8] = kernel->values[5];
kernel->values[5] = kernel->values[2];
kernel->values[2] = kernel->values[1];
kernel->values[1] = t;
/* rotate non-centered origin */
if ( kernel->x != 1 || kernel->y != 1 ) {
ssize_t x,y;
x = (ssize_t) kernel->x-1;
y = (ssize_t) kernel->y-1;
if ( x == y ) x = 0;
else if ( x == 0 ) x = -y;
else if ( x == -y ) y = 0;
else if ( y == 0 ) y = x;
kernel->x = (ssize_t) x+1;
kernel->y = (ssize_t) y+1;
}
angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */
kernel->angle = fmod(kernel->angle+45.0, 360.0);
}
else
perror("Unable to rotate non-3x3 kernel by 45 degrees");
}
if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 )
{
if ( kernel->width == 1 || kernel->height == 1 )
{ /* Do a transpose of a 1 dimensional kernel,
** which results in a fast 90 degree rotation of some type.
*/
ssize_t
t;
t = (ssize_t) kernel->width;
kernel->width = kernel->height;
kernel->height = (size_t) t;
t = kernel->x;
kernel->x = kernel->y;
kernel->y = t;
if ( kernel->width == 1 ) {
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
} else {
angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */
kernel->angle = fmod(kernel->angle+270.0, 360.0);
}
}
else if ( kernel->width == kernel->height )
{ /* Rotate a square array of values by 90 degrees */
{ register ssize_t
i,j,x,y;
register MagickRealType
*k,t;
k=kernel->values;
for( i=0, x=(ssize_t) kernel->width-1; i<=x; i++, x--)
for( j=0, y=(ssize_t) kernel->height-1; j<y; j++, y--)
{ t = k[i+j*kernel->width];
k[i+j*kernel->width] = k[j+x*kernel->width];
k[j+x*kernel->width] = k[x+y*kernel->width];
k[x+y*kernel->width] = k[y+i*kernel->width];
k[y+i*kernel->width] = t;
}
}
/* rotate the origin - relative to center of array */
{ register ssize_t x,y;
x = (ssize_t) (kernel->x*2-kernel->width+1);
y = (ssize_t) (kernel->y*2-kernel->height+1);
kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2;
kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2;
}
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
}
else
perror("Unable to rotate a non-square, non-linear kernel 90 degrees");
}
if ( 135.0 < angle && angle <= 225.0 )
{
/* For a 180 degree rotation - also know as a reflection
* This is actually a very very common operation!
* Basically all that is needed is a reversal of the kernel data!
* And a reflection of the origon
*/
MagickRealType
t;
register MagickRealType
*k;
ssize_t
i,
j;
k=kernel->values;
j=(ssize_t) (kernel->width*kernel->height-1);
for (i=0; i < j; i++, j--)
t=k[i], k[i]=k[j], k[j]=t;
kernel->x = (ssize_t) kernel->width - kernel->x - 1;
kernel->y = (ssize_t) kernel->height - kernel->y - 1;
angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */
kernel->angle = fmod(kernel->angle+180.0, 360.0);
}
/* At this point angle should at least between -45 (315) and +45 degrees
* In the future some form of non-orthogonal angled rotates could be
* performed here, posibily with a linear kernel restriction.
*/
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e G e o m e t r y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleGeometryKernelInfo() takes a geometry argument string, typically
% provided as a "-set option:convolve:scale {geometry}" user setting,
% and modifies the kernel according to the parsed arguments of that setting.
%
% The first argument (and any normalization flags) are passed to
% ScaleKernelInfo() to scale/normalize the kernel. The second argument
% is then passed to UnityAddKernelInfo() to add a scled unity kernel
% into the scaled/normalized kernel.
%
% The format of the ScaleGeometryKernelInfo method is:
%
% void ScaleGeometryKernelInfo(KernelInfo *kernel,
% const double scaling_factor,const MagickStatusType normalize_flags)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% o geometry:
% The geometry string to parse, typically from the user provided
% "-set option:convolve:scale {geometry}" setting.
%
*/
MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel,
const char *geometry)
{
MagickStatusType
flags;
GeometryInfo
args;
SetGeometryInfo(&args);
flags = ParseGeometry(geometry, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/
args.rho *= 0.01, args.sigma *= 0.01;
if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */
args.rho = 1.0;
if ( (flags & SigmaValue) == 0 )
args.sigma = 0.0;
/* Scale/Normalize the input kernel */
ScaleKernelInfo(kernel, args.rho, (GeometryFlags) flags);
/* Add Unity Kernel, for blending with original */
if ( (flags & SigmaValue) != 0 )
UnityAddKernelInfo(kernel, args.sigma);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleKernelInfo() scales the given kernel list by the given amount, with or
% without normalization of the sum of the kernel values (as per given flags).
%
% By default (no flags given) the values within the kernel is scaled
% directly using given scaling factor without change.
%
% If either of the two 'normalize_flags' are given the kernel will first be
% normalized and then further scaled by the scaling factor value given.
%
% Kernel normalization ('normalize_flags' given) is designed to ensure that
% any use of the kernel scaling factor with 'Convolve' or 'Correlate'
% morphology methods will fall into -1.0 to +1.0 range. Note that for
% non-HDRI versions of IM this may cause images to have any negative results
% clipped, unless some 'bias' is used.
%
% More specifically. Kernels which only contain positive values (such as a
% 'Gaussian' kernel) will be scaled so that those values sum to +1.0,
% ensuring a 0.0 to +1.0 output range for non-HDRI images.
%
% For Kernels that contain some negative values, (such as 'Sharpen' kernels)
% the kernel will be scaled by the absolute of the sum of kernel values, so
% that it will generally fall within the +/- 1.0 range.
%
% For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel
% will be scaled by just the sum of the postive values, so that its output
% range will again fall into the +/- 1.0 range.
%
% For special kernels designed for locating shapes using 'Correlate', (often
% only containing +1 and -1 values, representing foreground/brackground
% matching) a special normalization method is provided to scale the positive
% values separately to those of the negative values, so the kernel will be
% forced to become a zero-sum kernel better suited to such searches.
%
% WARNING: Correct normalization of the kernel assumes that the '*_range'
% attributes within the kernel structure have been correctly set during the
% kernels creation.
%
% NOTE: The values used for 'normalize_flags' have been selected specifically
% to match the use of geometry options, so that '!' means NormalizeValue, '^'
% means CorrelateNormalizeValue. All other GeometryFlags values are ignored.
%
% The format of the ScaleKernelInfo method is:
%
% void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,
% const MagickStatusType normalize_flags )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scaling_factor:
% multiply all values (after normalization) by this factor if not
% zero. If the kernel is normalized regardless of any flags.
%
% o normalize_flags:
% GeometryFlags defining normalization method to use.
% specifically: NormalizeValue, CorrelateNormalizeValue,
% and/or PercentValue
%
*/
MagickExport void ScaleKernelInfo(KernelInfo *kernel,
const double scaling_factor,const GeometryFlags normalize_flags)
{
register double
pos_scale,
neg_scale;
register ssize_t
i;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags);
/* Normalization of Kernel */
pos_scale = 1.0;
if ( (normalize_flags&NormalizeValue) != 0 ) {
if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon )
/* non-zero-summing kernel (generally positive) */
pos_scale = fabs(kernel->positive_range + kernel->negative_range);
else
/* zero-summing kernel */
pos_scale = kernel->positive_range;
}
/* Force kernel into a normalized zero-summing kernel */
if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) {
pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon )
? kernel->positive_range : 1.0;
neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon )
? -kernel->negative_range : 1.0;
}
else
neg_scale = pos_scale;
/* finialize scaling_factor for positive and negative components */
pos_scale = scaling_factor/pos_scale;
neg_scale = scaling_factor/neg_scale;
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
if (!IsNaN(kernel->values[i]))
kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale;
/* convolution output range */
kernel->positive_range *= pos_scale;
kernel->negative_range *= neg_scale;
/* maximum and minimum values in kernel */
kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale;
kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale;
/* swap kernel settings if user's scaling factor is negative */
if ( scaling_factor < MagickEpsilon ) {
double t;
t = kernel->positive_range;
kernel->positive_range = kernel->negative_range;
kernel->negative_range = t;
t = kernel->maximum;
kernel->maximum = kernel->minimum;
kernel->minimum = 1;
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h o w K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShowKernelInfo() outputs the details of the given kernel defination to
% standard error, generally due to a users 'morphology:showkernel' option
% request.
%
% The format of the ShowKernel method is:
%
% void ShowKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickPrivate void ShowKernelInfo(const KernelInfo *kernel)
{
const KernelInfo
*k;
size_t
c, i, u, v;
for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) {
(void) FormatLocaleFile(stderr, "Kernel");
if ( kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c );
(void) FormatLocaleFile(stderr, " \"%s",
CommandOptionToMnemonic(MagickKernelOptions, k->type) );
if ( fabs(k->angle) >= MagickEpsilon )
(void) FormatLocaleFile(stderr, "@%lg", k->angle);
(void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long)
k->width,(unsigned long) k->height,(long) k->x,(long) k->y);
(void) FormatLocaleFile(stderr,
" with values from %.*lg to %.*lg\n",
GetMagickPrecision(), k->minimum,
GetMagickPrecision(), k->maximum);
(void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg",
GetMagickPrecision(), k->negative_range,
GetMagickPrecision(), k->positive_range);
if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Zero-Summing)\n");
else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Normalized)\n");
else
(void) FormatLocaleFile(stderr, " (Sum %.*lg)\n",
GetMagickPrecision(), k->positive_range+k->negative_range);
for (i=v=0; v < k->height; v++) {
(void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v );
for (u=0; u < k->width; u++, i++)
if (IsNaN(k->values[i]))
(void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan");
else
(void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3,
GetMagickPrecision(), (double) k->values[i]);
(void) FormatLocaleFile(stderr,"\n");
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n i t y A d d K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel
% to the given pre-scaled and normalized Kernel. This in effect adds that
% amount of the original image into the resulting convolution kernel. This
% value is usually provided by the user as a percentage value in the
% 'convolve:scale' setting.
%
% The resulting effect is to convert the defined kernels into blended
% soft-blurs, unsharp kernels or into sharpening kernels.
%
% The format of the UnityAdditionKernelInfo method is:
%
% void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scale:
% scaling factor for the unity kernel to be added to
% the given kernel.
%
*/
MagickExport void UnityAddKernelInfo(KernelInfo *kernel,
const double scale)
{
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
UnityAddKernelInfo(kernel->next, scale);
/* Add the scaled unity kernel to the existing kernel */
kernel->values[kernel->x+kernel->y*kernel->width] += scale;
CalcKernelMetaData(kernel); /* recalculate the meta-data */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z e r o K e r n e l N a n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroKernelNans() replaces any special 'nan' value that may be present in
% the kernel with a zero value. This is typically done when the kernel will
% be used in special hardware (GPU) convolution processors, to simply
% matters.
%
% The format of the ZeroKernelNans method is:
%
% void ZeroKernelNans (KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickPrivate void ZeroKernelNans(KernelInfo *kernel)
{
register size_t
i;
/* do the other kernels in a multi-kernel list first */
if (kernel->next != (KernelInfo *) NULL)
ZeroKernelNans(kernel->next);
for (i=0; i < (kernel->width*kernel->height); i++)
if (IsNaN(kernel->values[i]))
kernel->values[i]=0.0;
return;
}
|
block-2.c | // { dg-do compile }
void foo()
{
int i, j;
#pragma omp for
for (i = 0; i < 10; ++i)
break; // { dg-error "break" }
bad1:
#pragma omp for
for (i = 0; i < 10; ++i)
goto bad1; // { dg-error "invalid exit" }
goto bad2; // { dg-error "invalid entry" }
#pragma omp for
for (i = 0; i < 10; ++i)
{
bad2: ;
}
#pragma omp for
for (i = 0; i < 10; ++i)
for (j = 0; j < 10; ++j)
if (i == j)
break;
#pragma omp for
for (i = 0; i < 10; ++i)
continue;
}
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 4;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,8);t1++) {
lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16));
ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(16*t2-Nz,4)),2*t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(8*t1+Ny+13,4)),floord(16*t2+Ny+12,4)),floord(16*t1-16*t2+Nz+Ny+11,4));t3++) {
for (t4=max(max(max(0,ceild(t1-15,16)),ceild(16*t2-Nz-124,128)),ceild(4*t3-Ny-124,128));t4<=min(min(min(min(floord(4*t3+Nx,128),floord(Nt+Nx-4,128)),floord(8*t1+Nx+13,128)),floord(16*t2+Nx+12,128)),floord(16*t1-16*t2+Nz+Nx+11,128));t4++) {
for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),4*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),4*t3+2),128*t4+126),16*t1-16*t2+Nz+13);t5++) {
for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) {
lbv=max(128*t4,t5+1);
ubv=min(128*t4+127,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
CALPHADConcSolverTernary.h | #ifndef included_CALPHADConcSolverTernary
#define included_CALPHADConcSolverTernary
#include "NewtonSolver.h"
#include "datatypes.h"
namespace Thermo4PFM
{
class CALPHADConcSolverTernary
: public NewtonSolver<4, CALPHADConcSolverTernary, JacobianDataType>
{
public:
#ifdef HAVE_OPENMP_OFFLOAD
#pragma omp declare target
#endif
/// compute "internal" concentrations cL, cS by solving KKK
/// equations
/// conc: initial guess and final solution (concentration in each phase)
int ComputeConcentration(double* const conc, const double tol,
const int max_iters, const double alpha = 1.)
{
return NewtonSolver::ComputeSolution(conc, tol, max_iters, alpha);
}
/// setup model paramater values to be used by solver,
/// including composition "c0" and phase fraction "hphi"
/// to solve for
void setup(const double c0, const double c1, const double hphi,
const double RTinv, const CalphadDataType* const L_AB_L,
const CalphadDataType* const L_AC_L,
const CalphadDataType* const L_BC_L,
const CalphadDataType* const L_AB_S,
const CalphadDataType* const L_AC_S,
const CalphadDataType* const L_BC_S,
const CalphadDataType* const L_ABC_L,
const CalphadDataType* const L_ABC_S, const CalphadDataType* const fA,
const CalphadDataType* const fB, const CalphadDataType* const fC);
/// evaluate RHS of the system of eqautions to solve for
/// specific to this solver
void RHS(const double* const c, double* const fvec);
/// evaluate Jacobian of system of equations
/// specific to this solver
void Jacobian(const double* const c, JacobianDataType** const fjac);
#ifdef HAVE_OPENMP_OFFLOAD
#pragma omp end declare target
#endif
private:
///
/// Nominal composition to solve for
///
double c0_[2];
///
/// phase fraction to solve for
///
double hphi_;
///
/// energies of 3 species, in two phase each
///
CalphadDataType fA_[2];
CalphadDataType fB_[2];
CalphadDataType fC_[2];
///
/// L coefficients for phase L
///
CalphadDataType L_AB_L_[4];
CalphadDataType L_AC_L_[4];
CalphadDataType L_BC_L_[4];
CalphadDataType L_ABC_L_[3];
///
/// L coefficients for phase S
///
CalphadDataType L_AB_S_[4];
CalphadDataType L_AC_S_[4];
CalphadDataType L_BC_S_[4];
CalphadDataType L_ABC_S_[3];
double RT_;
};
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.