source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
kkt.h | #ifndef COMPLIANT_UTILS_KKT_H
#define COMPLIANT_UTILS_KKT_H
#include <Eigen/Core>
#include <Eigen/Sparse>
namespace sofa {
namespace component {
namespace linearsolver {
class AssembledSystem;
}
}
}
// | Q A^T |
// | A C |
struct kkt {
// Should not these types be templates? to let utils/ktt.h outside of sofa
typedef sofa::component::linearsolver::AssembledSystem sys_type;
typedef sys_type::real real;
typedef sys_type::rmat rmat;
typedef sys_type::vec vec;
struct matrixQ {
const sys_type& sys;
matrixQ(const sys_type& sys)
: sys(sys) {
}
mutable vec tmp, result;
template<class Vec>
const vec& operator()(const Vec& x) const {
result.noalias() = sys.P.selfadjointView<Eigen::Upper>() * x;
tmp.noalias() = sys.H * result;
result.noalias() = sys.P.selfadjointView<Eigen::Upper>() * tmp;
return result;
}
};
struct matrixA {
const sys_type& sys;
matrixA(const sys_type& sys)
: sys(sys) {
}
mutable vec tmp, result;
template<class Vec>
const vec& operator()(const Vec& x) const {
tmp.noalias() = sys.P.selfadjointView<Eigen::Upper>() * x;
result.noalias() = sys.J * tmp;
return result;
}
};
struct matrixAT {
const sys_type& sys;
matrixAT(const sys_type& sys)
: sys(sys) {
}
mutable vec tmp, result;
template<class Vec>
const vec& operator()(const Vec& x) const {
tmp.noalias() = sys.J.transpose() * x;
result.noalias() = sys.P * tmp;
return result;
}
};
struct matrixC {
const sys_type& sys;
const real damping;
matrixC(const sys_type& sys,
real damping = 0)
: sys(sys),
damping(damping) {
}
mutable vec result;
template<class Vec>
const vec& operator()(const Vec& x) const {
result.noalias() = sys.C.selfadjointView<Eigen::Upper>() * x;
if( damping ) result += damping * x;
return result;
}
};
private:
matrixQ Q;
matrixA A;
matrixAT AT;
matrixC C;
unsigned m, n;
mutable vec storage;
public:
bool parallel;
kkt( const sys_type& sys, bool parallel = false, real damping = 0)
: Q(sys),
A(sys),
AT(sys),
C(sys, damping),
m( sys.m ),
n( sys.n ),
parallel(parallel) {
storage.resize( m + n );
}
private:
template<class Vec>
const vec& call(const Vec& x) const {
storage.head(m) = Q(x.head(m));
if( n ) {
storage.head(m) -= AT( x.tail(n) );
storage.tail(n) = - A( x.head(m) ) - C(x.tail(n));
// if( damping ) storage.tail(n) -= damping * x.tail(n);
}
return storage;
}
template<class Vec>
const vec& omp_call(const Vec& x) const {
if( n ) {
#ifdef _OPENMP
#pragma omp parallel sections
#endif
{
#ifdef _OPENMP
#pragma omp section
#endif
Q( x.head(m) );
#ifdef _OPENMP
#pragma omp section
#endif
AT( x.tail(n) );
#ifdef _OPENMP
#pragma omp section
#endif
A( x.head(m) );
#ifdef _OPENMP
#pragma omp section
#endif
C( x.tail(n) );
}
#ifdef _OPENMP
#pragma omp parallel sections
#endif
{
#ifdef _OPENMP
#pragma omp section
#endif
storage.head(m) = Q.result - AT.result;
#ifdef _OPENMP
#pragma omp section
#endif
storage.tail(n) = -A.result - C.result;
// if( damping ) storage.tail(n) -= damping * x.tail(n);
}
} else {
storage.head(m) = Q(x.head(m));
}
return storage;
}
public:
template<class Vec>
const vec& operator()(const Vec& x) const {
return parallel ? omp_call(x) : call(x);
}
};
struct kkt_opt {
typedef sofa::component::linearsolver::AssembledSystem sys_type;
typedef sys_type::rmat rmat;
typedef sys_type::vec vec;
const sys_type& sys;
mutable vec result, tmp, Pv, HPv, JTlambda, JPv, Clambda ;
const unsigned m;
const unsigned n;
kkt_opt(const sys_type& sys)
: sys(sys),
m(sys.m),
n(sys.n)
{
result.resize( m + n );
assert( n );
// std::cerr << sys.C << std::endl;
}
template<class Vec>
const vec& operator()(const Vec& x) const {
Pv.noalias() = sys.P * x.head(m);
// parallelizable
{
HPv.noalias() = sys.H * Pv;
JTlambda.noalias() = sys.J.transpose() * x.tail(n);
JPv.noalias() = sys.J * Pv;
Clambda.noalias() = sys.C * x.tail(n);
}
tmp.noalias() = HPv - JTlambda;
result.head(sys.m).noalias() = sys.P * tmp;
result.tail(sys.n).noalias() = -JPv - Clambda;
return result;
}
};
#endif
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 4;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 24;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
c_jacobi02.c | /* ***********************************************************************
This program is part of the
OpenMP Source Code Repository
http://www.pcg.ull.es/ompscr/
e-mail: ompscr@etsii.ull.es
Copyright (c) 2004, OmpSCR Group
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of La Laguna nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
FILE: c_jacobi02.c
VERSION: 1.1
DATE: Oct 2004
AUTHORS: Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998
Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998
This version: Dieter an Mey, Aachen University (RWTH), 1999 - 2003
anmey@rz.rwth-aachen.de
http://www.rwth-aachen.de/People/D.an.Mey.html
COMMENTS TO: ompscr@etsii.ull.es
DESCRIPTION: program to solve a finite difference discretization of Helmholtz equation :
(d2/dx2)u + (d2/dy2)u - alpha u = f using Jacobi iterative method.
COMMENTS: OpenMP version 2: 2 parallel loops in one parallel region (PR)
Directives are used in this code to achieve paralleism.
All do loops are parallized with default 'static' scheduling.
REFERENCES: http://www.rz.rwth-aachen.de/computing/hpc/prog/par/openmp/jacobi.html
BASIC PRAGMAS: parallel for
USAGE: ./c_jacobi02.par 5000 5000 0.8 1.0 1000
INPUT: n - grid dimension in x direction
m - grid dimension in y direction
alpha - Helmholtz constant (always greater than 0.0)
tol - error tolerance for iterative solver
relax - Successice over relaxation parameter
mits - Maximum iterations for iterative solver
OUTPUT: Residual and error
u(n,m) - Dependent variable (solutions)
f(n,m) - Right hand side function
FILE FORMATS: -
RESTRICTIONS: -
REVISION HISTORY:
**************************************************************************/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include "OmpSCR.h"
#define U(i,j) u[(i)*n+(j)]
#define F(i,j) f[(i)*n+(j)]
#define NUM_ARGS 6
#define NUM_TIMERS 1
int n, m, mits;
double tol, relax, alpha;
void jacobi (int n, int m, double dx, double dy,
double alpha, double omega,
double *u, double *f,
double tol, int maxit );
/******************************************************
* Initializes data
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2)
*
******************************************************/
void initialize(
int n,
int m,
double alpha,
double *dx,
double *dy,
double *u,
double *f)
{
int i,j,xx,yy;
*dx = 2.0 / (n-1);
*dy = 2.0 / (m-1);
/* Initilize initial condition and RHS */
for (j=0; j<m; j++){
for (i=0; i<n; i++){
xx = -1.0 + *dx * (i-1);
yy = -1.0 + *dy * (j-1);
U(j,i) = 0.0;
F(j,i) = -alpha * (1.0 - xx*xx) * (1.0 - yy*yy)
- 2.0 * (1.0 - xx*xx) - 2.0 * (1.0 - yy*yy);
}
}
}
/************************************************************
* Checks error between numerical and exact solution
*
************************************************************/
void error_check(
int n,
int m,
double alpha,
double dx,
double dy,
double *u,
double *f)
{
int i,j;
double xx, yy, temp, error;
dx = 2.0 / (n-1);
dy = 2.0 / (n-2);
error = 0.0;
for (j=0; j<m; j++){
for (i=0; i<n; i++){
xx = -1.0 + dx * (i-1);
yy = -1.0 + dy * (j-1);
temp = U(j,i) - (1.0 - xx*xx) * (1.0 - yy*yy);
error += temp*temp;
}
}
error = sqrt(error)/(n*m);
printf("Solution Error : %g\n", error);
}
int main(int argc, char **argv){
double *u, *f, dx, dy;
double dt, mflops;
int NUMTHREADS;
char *PARAM_NAMES[NUM_ARGS] = {"Grid dimension: X dir =", "Grid dimension: Y dir =", "Helmhotlz constant =",
"Successive over-relaxation parameter =",
"error tolerance for iterative solver =", "Maximum iterations for solver ="};
char *TIMERS_NAMES[NUM_TIMERS] = {"Total_time"};
char *DEFAULT_VALUES[NUM_ARGS] = {"5000", "5000", "0.8", "1.0", "1e-7", "1000"};
NUMTHREADS = omp_get_max_threads();
OSCR_init (NUMTHREADS, "Jacobi Solver v1", "Use 'jacobi02' <n> <m> <alpha> <relax> <tol> <mits>", NUM_ARGS,
PARAM_NAMES, DEFAULT_VALUES , NUM_TIMERS, NUM_TIMERS, TIMERS_NAMES,
argc, argv);
n = OSCR_getarg_int(1);
m = OSCR_getarg_int(2);
alpha = OSCR_getarg_double(3);
relax = OSCR_getarg_double(4);
tol = OSCR_getarg_double(5);
mits = OSCR_getarg_int(6);
printf("-> %d, %d, %g, %g, %g, %d\n",
n, m, alpha, relax, tol, mits);
u = (double *) OSCR_malloc(n*m*sizeof(double));
f = (double *) OSCR_malloc(n*m*sizeof(double));
/* arrays are allocated and initialzed */
initialize(n, m, alpha, &dx, &dy, u, f);
/* Solve Helmholtz eqiation */
OSCR_timer_start(0);
jacobi(n, m, dx, dy, alpha, relax, u,f, tol, mits);
OSCR_timer_stop(0);
dt = OSCR_timer_read(0);
printf(" elapsed time : %12.6f\n", dt);
mflops = (0.000001*mits*(m-2)*(n-2)*13) / dt;
printf(" MFlops : %12.6g (%d, %d, %d, %g)\n",mflops, mits, m, n, dt);
error_check(n, m, alpha, dx, dy, u, f);
OSCR_report(1, TIMERS_NAMES);
return 0;
}
/*
subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit)
******************************************************************
* Subroutine HelmholtzJ
* Solves poisson equation on rectangular grid assuming :
* (1) Uniform discretization in each direction, and
* (2) Dirichlect boundary conditions
*
* Jacobi method is used in this routine
*
* Input : n,m Number of grid points in the X/Y directions
* dx,dy Grid spacing in the X/Y directions
* alpha Helmholtz eqn. coefficient
* omega Relaxation factor
* f(n,m) Right hand side function
* u(n,m) Dependent variable/Solution
* tol Tolerance for iterative solver
* maxit Maximum number of iterations
*
* Output : u(n,m) - Solution
*****************************************************************
*/
void jacobi ( const int n, const int m, double dx, double dy, double alpha,
double omega, double *u, double *f, double tol, int maxit )
{
int i,j,k;
double error, resid, ax, ay, b;
double *uold;
/* wegen Array-Kompatibilitaet, werden die Zeilen und Spalten (im Kopf)
getauscht, zB uold[spalten_num][zeilen_num]; bzw. wir tuen so, als ob wir das
gespiegelte Problem loesen wollen */
uold = (double *)OSCR_malloc(sizeof(double) * n *m);
ax = 1.0/(dx * dx); /* X-direction coef */
ay = 1.0/(dy*dy); /* Y_direction coef */
b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */
error = 10.0 * tol;
k = 1;
while (k <= maxit && error > tol) {
error = 0.0;
#pragma omp parallel private(resid, i)
{
/* copy new solution into old */
#pragma omp for
for (j=0; j<m; j++)
for (i=0; i<n; i++)
uold[i + m*j] = u[i + m*j];
/* compute stencil, residual and update */
#pragma omp for reduction(+:error)
for (j=1; j<m-1; j++)
for (i=1; i<n-1; i++){
resid =(
ax * (uold[i-1 + m*j] + uold[i+1 + m*j])
+ ay * (uold[i + m*(j-1)] + uold[i + m*(j+1)])
+ b * uold[i + m*j] - f[i + m*j]
) / b;
/* update solution */
u[i + m*j] = uold[i + m*j] - omega * resid;
/* accumulate residual error */
error =error + resid*resid;
}
} /* end parallel */
/* error check */
k++;
error = sqrt(error) /(n*m);
} /* while */
printf("Total Number of Iteratuons %d\n", k);
printf("Residual %.15f\n", error);
free(uold);
}
|
GB_builder.c | //------------------------------------------------------------------------------
// GB_builder: build a matrix from tuples
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// CALLED BY: GB_build, GB_wait, GB_transpose, GB_concat_hyper
// This function is called by GB_build to build a matrix T for GrB_Matrix_build
// or GrB_Vector_build, by GB_wait to build a matrix T from the list of pending
// tuples, and by GB_transpose to transpose a matrix or vector. Duplicates can
// appear if called by GB_build or GB_wait, but not GB_transpose.
// The indices are provided either as (I_input,J_input) or (I_work,J_work), not
// both. The values are provided as S_input or S_work, not both. On return,
// the *work arrays are either transplanted into T, or freed, since they are
// temporary workspaces.
// The work is done in major 5 Steps, some of which can be skipped, depending
// on how the tuples are provided (*_work or *_input), and whether or not they
// are sorted, or have duplicates. If vdim <= 1, some work is skipped (for
// GrB_Vectors, and single-vector GrB_Matrices). Let e be the of tuples on
// input. Let p be the # of threads used.
// STEP 1: copy user input. O(e/p) read/write per thread, or skipped.
// STEP 2: sort the tuples. Time: O((e log e)/p), read/write, or skipped if
// the tuples are already sorted.
// STEP 3: count vectors and duplicates. O(e/p) reads, per thread, if no
// duplicates, or skipped if already done. O(e/p) read/writes
// per thread if duplicates appear.
// STEP 4: construct T->h and T->p. O(e/p) reads per thread, or skipped if
// T is a vector.
// STEP 5: assemble the tuples. O(e/p) read/writes per thread, or O(1) if the
// values can be transplanted into T as-is.
// For GrB_Matrix_build: If the input (I_input, J_input, S_input) is already
// sorted with no duplicates, and no typecasting needs to be done, then Step 1
// still must be done (each thread does O(e/p) reads of (I_input,J_input) and
// writes to I_work), but Step 1 also does the work for Step 3. Step 2 and 3
// are skipped. Step 4 does O(e/p) reads per thread (J_input only). Then
// I_work is transplanted into T->i. Step 5 does O(e/p) read/writes per thread
// to copy Sx into T->x.
// For GrB_Vector_build: as GrB_Matrix_build, Step 1 does O(e/p) read/writes
// per thread. The input is always a vector, so vdim == 1 always holds. Step
// 2 is skipped if the indices are already sorted, and Step 3 does no work at
// all unless duplicates appear. Step 4 takes no time, for any vector. Step 5
// does O(e/p) reads/writes per thread.
// For GB_wait: the pending tuples are provided as I_work, J_work, and S_work,
// so Step 1 is skipped (no need to check for invalid indices). The input
// J_work may be null (vdim can be anything, since GB_wait is used for both
// vectors and matrices). The tuples might be in sorted order already, which
// is known precisely known from A->Pending->sorted. Step 2 does
// O((e log e)/p) work to sort the tuples. Duplicates may appear, and
// out-of-order tuples are likely. Step 3 does O(e/p) read/writes. Step 4
// does O(e/p) reads per thread of (I_work,J_work), or just I_work. Step 5
// does O(e/p) read/writes per thread, or O(1) time if S_work can be
// transplanted into T->x.
// For GB_transpose: uses I_work, J_work, and either S_input (if no op applied
// to the values) or S_work (if an op was applied to the A->x values). This is
// only done for matrices, not vectors, so vdim > 1 will always hold. The
// indices are valid so Step 1 is skipped. The tuples are not sorted, so Step
// 2 takes O((e log e)/p) time to do the sort. There are no duplicates, so
// Step 3 only does O(e/p) reads of J_work to count the vectors in each slice.
// Step 4 only does O(e/p) reads of J_work to compute T->h and T->p. Step 5
// does O(e/p) read/writes per thread, but it uses the simpler case in
// GB_reduce_build_template since no duplicates can appear. It is unlikely
// able to transplant S_work into T->x since the input will almost always be
// unsorted.
// For GB_concat_hyper: uses I_work, J_work, and S_work. No duplicates
// appear. Tuples are not sorted on input. I_work is transplanted into C->i.
// J_work and S_work are freed on output. S_work is not transplanted into
// C->x.
// For iso inputs/outputs: T and Sx have the same iso property. If
// they are iso, then dup is always NULL. Duplicates may or may not appear
// if T and Sx are iso.
// (1) GrB_Matrix_build, GrB_Vector_build, and GB_wait do not pass in an iso
// Sx array, where Sx is S_input for GrB*build, and S_work for GB_wait.
// Sx and Tx are not iso. Duplicates may appear. dup is always present
// for GrB*build, but may be either NULL or non-NULL for GB_wait.
// (2) GxB_Matrix_build_Scalar and GxB_Vector_build_Scalar: always construct
// iso matrices. For those methods Sx and Tx are always iso, and no dup
// operator is be passed in (dup is NULL here, which is the implied 2nd
// operator). Duplicates may appear.
// (3) GB_transpose and GB_concat_hyper can pass in Sx as iso or
// non-iso, and always passes in dup as NULL since there are no
// duplicates. Sx and Tx are either both iso, or both non-iso.
// This method always returns T as hypersparse, and T is iso if and only
// if Sx is iso.
#include "GB_build.h"
#include "GB_sort.h"
#include "GB_binop.h"
#ifndef GBCUDA_DEV
#include "GB_red__include.h"
#endif
#define GB_I_WORK(t) (((t) < 0) ? -1 : I_work [t])
#define GB_J_WORK(t) (((t) < 0) ? -1 : ((J_work == NULL) ? 0 : J_work [t]))
#define GB_K_WORK(t) (((t) < 0) ? -1 : ((K_work == NULL) ? t : K_work [t]))
#define GB_FREE_WORKSPACE \
{ \
GB_WERK_POP (Work, int64_t) ; \
GB_FREE (I_work_handle, *I_work_size_handle) ; \
GB_FREE (J_work_handle, *J_work_size_handle) ; \
GB_FREE (S_work_handle, *S_work_size_handle) ; \
GB_FREE_WORK (&K_work, K_work_size) ; \
}
//------------------------------------------------------------------------------
// GB_builder
//------------------------------------------------------------------------------
GrB_Info GB_builder // build a matrix from tuples
(
GrB_Matrix T, // matrix to build, static or dynamic header
const GrB_Type ttype, // type of output matrix T
const int64_t vlen, // length of each vector of T
const int64_t vdim, // number of vectors in T
const bool is_csc, // true if T is CSC, false if CSR
int64_t **I_work_handle, // for (i,k) or (j,i,k) tuples
size_t *I_work_size_handle,
int64_t **J_work_handle, // for (j,i,k) tuples
size_t *J_work_size_handle,
GB_void **S_work_handle, // array of values of tuples, size ijslen,
// or size 1 if S is iso
size_t *S_work_size_handle,
bool known_sorted, // true if tuples known to be sorted
bool known_no_duplicates, // true if tuples known to not have dupl
int64_t ijslen, // size of I_work and J_work arrays
const bool is_matrix, // true if T a GrB_Matrix, false if vector
const int64_t *restrict I_input,// original indices, size nvals
const int64_t *restrict J_input,// original indices, size nvals
const GB_void *restrict S_input,// array of values of tuples, size nvals,
// or size 1 if S_input or S_work are iso
const bool S_iso, // true if S_input or S_work are iso
const int64_t nvals, // number of tuples, and size of K_work
const GrB_BinaryOp dup, // binary function to assemble duplicates,
// if NULL use the SECOND operator to
// keep the most recent duplicate.
const GrB_Type stype, // the type of S_work or S_input
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (T != NULL) ; // T is a static or dynamic header on input
ASSERT (nvals >= 0) ;
ASSERT_TYPE_OK (ttype, "ttype for builder", GB0) ;
ASSERT_BINARYOP_OK_OR_NULL (dup, "dup for builder", GB0) ;
ASSERT (I_work_handle != NULL) ;
ASSERT (J_work_handle != NULL) ;
ASSERT (S_work_handle != NULL) ;
ASSERT (!GB_OP_IS_POSITIONAL (dup)) ;
ASSERT (I_work_size_handle != NULL) ;
ASSERT (J_work_size_handle != NULL) ;
ASSERT (S_work_size_handle != NULL) ;
//--------------------------------------------------------------------------
// get Sx
//--------------------------------------------------------------------------
GB_void *restrict S_work = (*S_work_handle) ;
const GB_void *restrict Sx = (S_work == NULL) ? S_input : S_work ;
ASSERT (GB_IMPLIES (nvals > 0, Sx != NULL)) ;
ASSERT (GB_IMPLIES (S_iso, ttype == stype)) ;
ASSERT (GB_IMPLIES (S_iso, dup == NULL)) ;
//==========================================================================
// symbolic phase of the build =============================================
//==========================================================================
// The symbolic phase sorts the tuples and finds any duplicates. The
// output matrix T is constructed (not including T->i and T->x), and T->h
// and T->p are computed. Then I_work is transplanted into T->i, or T->i is
// allocated. T->x is then allocated. It is not computed until the
// numeric phase.
// When this function returns, I_work is either freed or transplanted into
// T->i. J_work is freed, and the I_work and J_work pointers (in the
// caller) are set to NULL by setting their handles to NULL. Note that
// J_work may already be NULL on input, if T has one or zero vectors
// (J_work_handle is always non-NULL however).
GrB_Info info ;
int64_t *restrict I_work = (*I_work_handle) ;
int64_t *restrict J_work = (*J_work_handle) ;
int64_t *restrict K_work = NULL ; size_t K_work_size = 0 ;
ASSERT (*J_work_size_handle == GB_Global_memtable_size (J_work)) ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (nvals, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
GB_WERK_DECLARE (Work, int64_t) ;
GB_WERK_PUSH (Work, 5*(nthreads+1), int64_t) ;
if (Work == NULL)
{
// out of memory
GB_FREE_WORKSPACE ;
return (GrB_OUT_OF_MEMORY) ;
}
memset (Work, 0, Work_nitems * sizeof (int64_t)) ;
int64_t *restrict tstart_slice = Work ; // nthreads+1
int64_t *restrict tnvec_slice = Work + (nthreads+1) ; // nthreads+1
int64_t *restrict tnz_slice = Work + 2*(nthreads+1) ; // nthreads+1
int64_t *restrict kbad = Work + 3*(nthreads+1) ; // nthreads
int64_t *restrict ilast_slice = Work + 4*(nthreads+1) ; // nthreads
//--------------------------------------------------------------------------
// partition the tuples for the threads
//--------------------------------------------------------------------------
// Thread tid handles tuples tstart_slice [tid] to tstart_slice [tid+1]-1.
// Each thread handles about the same number of tuples. This partition
// depends only on nvals.
GB_eslice (tstart_slice, nvals, nthreads) ;
// tstart_slice [tid]: first tuple in slice tid
// tnvec_slice [tid]: # of vectors that start in a slice. If a vector
// starts in one slice and ends in another, it is
// counted as being in the first slice.
// tnz_slice [tid]: # of entries in a slice after removing duplicates
// sentinel values for the final cumulative sum
tnvec_slice [nthreads] = 0 ;
tnz_slice [nthreads] = 0 ;
// this becomes true if the first pass computes tnvec_slice and tnz_slice,
// and if the (I_input,J_input) tuples were found to be already sorted with
// no duplicates present.
bool tnvec_and_tnz_slice_computed = false ;
//--------------------------------------------------------------------------
// STEP 1: copy user input and check if valid
//--------------------------------------------------------------------------
// If the indices are provided by (I_input,J_input), then import them into
// (I_work,J_work) and check if they are valid, and sorted. If the input
// happens to be already sorted, then duplicates are detected and the # of
// vectors in each slice is counted.
if (I_work == NULL)
{
//----------------------------------------------------------------------
// allocate I_work
//----------------------------------------------------------------------
// allocate workspace to load and sort the index tuples:
// vdim <= 1: I_work and K_work for (i,k) tuples, where i = I_input [k]
// vdim > 1: also J_work for (j,i,k) tuples where i = I_input [k] and
// j = J_input [k]. If the tuples are found to be already sorted on
// input, then J_work is not allocated, and J_input is used instead.
// The k value in the tuple gives the position in the original set of
// tuples: I_input [k] and Sx [k] when vdim <= 1, and also J_input [k]
// for matrices with vdim > 1.
// The workspace I_work and J_work are allocated here but freed (or
// transplanted) inside GB_builder. K_work is allocated, used, and
// freed in GB_builder.
ASSERT (J_work == NULL) ;
I_work = GB_MALLOC (nvals, int64_t, I_work_size_handle) ;
(*I_work_handle) = I_work ;
ijslen = nvals ;
if (I_work == NULL)
{
// out of memory
GB_FREE_WORKSPACE ;
return (GrB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// create the tuples to sort, and check for any invalid indices
//----------------------------------------------------------------------
known_sorted = true ;
bool no_duplicates_found = true ;
if (nvals == 0)
{
// nothing to do
}
else if (is_matrix)
{
//------------------------------------------------------------------
// C is a matrix; check both I_input and J_input
//------------------------------------------------------------------
ASSERT (J_input != NULL) ;
ASSERT (I_work != NULL) ;
ASSERT (vdim >= 0) ;
ASSERT (I_input != NULL) ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(&&:known_sorted) reduction(&&:no_duplicates_found)
for (tid = 0 ; tid < nthreads ; tid++)
{
kbad [tid] = -1 ;
int64_t my_tnvec = 0 ;
int64_t kstart = tstart_slice [tid] ;
int64_t kend = tstart_slice [tid+1] ;
int64_t ilast = (kstart == 0) ? -1 : I_input [kstart-1] ;
int64_t jlast = (kstart == 0) ? -1 : J_input [kstart-1] ;
for (int64_t k = kstart ; k < kend ; k++)
{
// get k-th index from user input: (i,j)
int64_t i = I_input [k] ;
int64_t j = J_input [k] ;
if (i < 0 || i >= vlen || j < 0 || j >= vdim)
{
// halt if out of bounds
kbad [tid] = k ;
break ;
}
// check if the tuples are already sorted
known_sorted = known_sorted &&
((jlast < j) || (jlast == j && ilast <= i)) ;
// check if this entry is a duplicate of the one before it
no_duplicates_found = no_duplicates_found &&
(!(jlast == j && ilast == i)) ;
// copy the tuple into I_work. J_work is done later.
I_work [k] = i ;
if (j > jlast)
{
// vector j starts in this slice (but this is
// valid only if J_input is sorted on input)
my_tnvec++ ;
}
// log the last index seen
ilast = i ; jlast = j ;
}
// these are valid only if I_input and J_input are sorted on
// input, with no duplicates present.
tnvec_slice [tid] = my_tnvec ;
tnz_slice [tid] = kend - kstart ;
}
// collect the report from each thread
for (int tid = 0 ; tid < nthreads ; tid++)
{
if (kbad [tid] >= 0)
{
// invalid index
int64_t i = I_input [kbad [tid]] ;
int64_t j = J_input [kbad [tid]] ;
int64_t row = is_csc ? i : j ;
int64_t col = is_csc ? j : i ;
int64_t nrows = is_csc ? vlen : vdim ;
int64_t ncols = is_csc ? vdim : vlen ;
GB_FREE_WORKSPACE ;
GB_ERROR (GrB_INDEX_OUT_OF_BOUNDS,
"index (" GBd "," GBd ") out of bounds,"
" must be < (" GBd ", " GBd ")",
row, col, nrows, ncols) ;
}
}
// if the tuples were found to be already in sorted order, and if
// no duplicates were found, then tnvec_slice and tnz_slice are now
// valid, Otherwise, they can only be computed after sorting.
tnvec_and_tnz_slice_computed = known_sorted && no_duplicates_found ;
//------------------------------------------------------------------
// allocate J_work, if needed
//------------------------------------------------------------------
if (vdim > 1 && !known_sorted)
{
// copy J_input into J_work, so the tuples can be sorted
J_work = GB_MALLOC (nvals, int64_t, J_work_size_handle) ;
(*J_work_handle) = J_work ;
if (J_work == NULL)
{
// out of memory
GB_FREE_WORKSPACE ;
return (GrB_OUT_OF_MEMORY) ;
}
GB_memcpy (J_work, J_input, nvals * sizeof (int64_t), nthreads);
}
else
{
// J_work is a shallow copy of J_input. The pointer is not
// copied into (*J_work_handle), so it will not be freed.
// J_input is not modified, even though it is typecast to the
// int64_t *J_work, since J_work is not modified in this case.
J_work = (int64_t *) J_input ;
}
}
else
{
//------------------------------------------------------------------
// C is a typecasted GrB_Vector; check only I_input
//------------------------------------------------------------------
ASSERT (I_input != NULL) ;
ASSERT (J_input == NULL) ;
ASSERT (vdim == 1) ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(&&:known_sorted) reduction(&&:no_duplicates_found)
for (tid = 0 ; tid < nthreads ; tid++)
{
kbad [tid] = -1 ;
int64_t kstart = tstart_slice [tid] ;
int64_t kend = tstart_slice [tid+1] ;
int64_t ilast = (kstart == 0) ? -1 : I_input [kstart-1] ;
for (int64_t k = kstart ; k < kend ; k++)
{
// get k-th index from user input: (i)
int64_t i = I_input [k] ;
if (i < 0 || i >= vlen)
{
// halt if out of bounds
kbad [tid] = k ;
break ;
}
// check if the tuples are already sorted
known_sorted = known_sorted && (ilast <= i) ;
// check if this entry is a duplicate of the one before it
no_duplicates_found = no_duplicates_found &&
(!(ilast == i)) ;
// copy the tuple into the work arrays to be sorted
I_work [k] = i ;
// log the last index seen
ilast = i ;
}
}
// collect the report from each thread
for (int tid = 0 ; tid < nthreads ; tid++)
{
if (kbad [tid] >= 0)
{
// invalid index
int64_t i = I_input [kbad [tid]] ;
GB_FREE_WORKSPACE ;
GB_ERROR (GrB_INDEX_OUT_OF_BOUNDS,
"index (" GBd ") out of bounds, must be < (" GBd ")",
i, vlen) ;
}
}
}
//----------------------------------------------------------------------
// determine if duplicates are possible
//----------------------------------------------------------------------
// The input is now known to be sorted, or not. If it is sorted, and
// if no duplicates were found, then it is known to have no duplicates.
// Otherwise, duplicates might appear, but a sort is required first to
// check for duplicates.
known_no_duplicates = known_sorted && no_duplicates_found ;
}
//--------------------------------------------------------------------------
// STEP 2: sort the tuples in ascending order
//--------------------------------------------------------------------------
// If the tuples are known to already be sorted, Step 2 is skipped. In
// that case, K_work is NULL (not allocated), which implicitly means that
// K_work [k] = k for all k = 0:nvals-1. K_work is always NULL if Sx and
// Tx are iso.
if (!known_sorted)
{
//----------------------------------------------------------------------
// allocate K_work workspace (not needed if T and Sx are iso)
//----------------------------------------------------------------------
if (!S_iso)
{
// create the k part of each tuple
K_work = GB_MALLOC_WORK (nvals, int64_t, &K_work_size) ;
if (K_work == NULL)
{
// out of memory
GB_FREE_WORKSPACE ;
return (GrB_OUT_OF_MEMORY) ;
}
// The k part of each tuple (i,k) or (j,i,k) records the original
// position of the tuple in the input list. This allows an
// unstable sorting algorithm to be used. Since k is unique, it
// forces the result of the sort to be stable regardless of whether
// or not the sorting algorithm is stable. It also keeps track of
// where the numerical value of the tuple can be found; it is in
// Sx[k] for the tuple (i,k) or (j,i,k), regardless of where the
// tuple appears in the list after it is sorted.
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < nvals ; k++)
{
K_work [k] = k ;
}
}
//----------------------------------------------------------------------
// sort all the tuples
//----------------------------------------------------------------------
if (vdim > 1)
{
//------------------------------------------------------------------
// sort a set of (j,i,k) tuples
//------------------------------------------------------------------
if (S_iso)
{
// K_work is NULL; only sort (j,i)
info = GB_msort_2 (J_work, I_work, nvals, nthreads) ;
}
else
{
info = GB_msort_3 (J_work, I_work, K_work, nvals, nthreads) ;
}
#ifdef GB_DEBUG
if (info == GrB_SUCCESS)
{
int64_t ilast = -1 ;
int64_t jlast = -1 ;
for (int64_t k = 0 ; k < nvals ; k++)
{
int64_t i = I_work [k] ;
int64_t j = J_work [k] ;
ASSERT ((jlast < j) || (jlast == j && ilast <= i)) ;
ilast = i ;
jlast = j ;
}
}
#endif
}
else
{
//------------------------------------------------------------------
// sort a set of (i,k) tuples
//------------------------------------------------------------------
if (S_iso)
{
// K_work is NULL; only sort (i)
info = GB_msort_1 (I_work, nvals, nthreads) ;
}
else
{
info = GB_msort_2 (I_work, K_work, nvals, nthreads) ;
}
#ifdef GB_DEBUG
if (info == GrB_SUCCESS)
{
int64_t ilast = -1 ;
for (int64_t k = 0 ; k < nvals ; k++)
{
int64_t i = I_work [k] ;
ASSERT (ilast <= i) ;
ilast = i ;
}
}
#endif
}
if (info != GrB_SUCCESS)
{
// out of memory in GB_msort_*
GB_FREE_WORKSPACE ;
return (GrB_OUT_OF_MEMORY) ;
}
}
//--------------------------------------------------------------------------
// STEP 3: count vectors and duplicates in each slice
//--------------------------------------------------------------------------
// Duplicates are located, counted and their indices negated. The # of
// vectors in each slice is counted. If the indices are known to not have
// duplicates, then only the vectors are counted. Counting the # of
// vectors is skipped if already done by Step 1.
if (known_no_duplicates)
{
//----------------------------------------------------------------------
// no duplicates: just count # vectors in each slice
//----------------------------------------------------------------------
// This is much faster, particularly if the # of vectors in each slice
// has already been computed.
#ifdef GB_DEBUG
{
// assert that there are no duplicates
int64_t ilast = -1, jlast = -1 ;
for (int64_t t = 0 ; t < nvals ; t++)
{
int64_t i = GB_I_WORK (t), j = GB_J_WORK (t) ;
bool is_duplicate = (i == ilast && j == jlast) ;
ASSERT (!is_duplicate) ;
ilast = i ; jlast = j ;
}
}
#endif
if (vdim <= 1)
{
// all tuples appear in at most one vector, and there are no
// duplicates, so there is no need to scan I_work or J_work.
for (int tid = 0 ; tid < nthreads ; tid++)
{
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
tnvec_slice [tid] = 0 ;
tnz_slice [tid] = tend - tstart ;
}
tnvec_slice [0] = (nvals == 0) ? 0 : 1 ;
}
else
{
// count the # of unique vector indices in J_work. No need to scan
// I_work since there are no duplicates to be found. Also no need
// to compute them if already found in Step 1.
if (!tnvec_and_tnz_slice_computed)
{
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t my_tnvec = 0 ;
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
int64_t jlast = GB_J_WORK (tstart-1) ;
for (int64_t t = tstart ; t < tend ; t++)
{
// get the t-th tuple
int64_t j = J_work [t] ;
if (j > jlast)
{
// vector j starts in this slice
my_tnvec++ ;
jlast = j ;
}
}
tnvec_slice [tid] = my_tnvec ;
tnz_slice [tid] = tend - tstart ;
}
}
}
}
else
{
//----------------------------------------------------------------------
// look for duplicates and count # vectors in each slice
//----------------------------------------------------------------------
for (int tid = 0 ; tid < nthreads ; tid++)
{
int64_t tstart = tstart_slice [tid] ;
ilast_slice [tid] = GB_I_WORK (tstart-1) ;
}
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t my_tnvec = 0 ;
int64_t my_ndupl = 0 ;
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
int64_t ilast = ilast_slice [tid] ;
int64_t jlast = GB_J_WORK (tstart-1) ;
for (int64_t t = tstart ; t < tend ; t++)
{
// get the t-th tuple
int64_t i = I_work [t] ;
int64_t j = GB_J_WORK (t) ;
// tuples are now sorted but there may be duplicates
ASSERT ((jlast < j) || (jlast == j && ilast <= i)) ;
// check if (j,i,k) is a duplicate
if (i == ilast && j == jlast)
{
// flag the tuple as a duplicate
I_work [t] = -1 ;
my_ndupl++ ;
// the sort places earlier duplicate tuples (with smaller
// k) after later ones (with larger k).
ASSERT (GB_K_WORK (t-1) < GB_K_WORK (t)) ;
}
else
{
// this is a new tuple
if (j > jlast)
{
// vector j starts in this slice
my_tnvec++ ;
jlast = j ;
}
ilast = i ;
}
}
tnvec_slice [tid] = my_tnvec ;
tnz_slice [tid] = (tend - tstart) - my_ndupl ;
}
}
//--------------------------------------------------------------------------
// find total # of vectors and duplicates in all tuples
//--------------------------------------------------------------------------
// Replace tnvec_slice with its cumulative sum, after which each slice tid
// will be responsible for the # vectors in T that range from tnvec_slice
// [tid] to tnvec_slice [tid+1]-1.
GB_cumsum (tnvec_slice, nthreads, NULL, 1, NULL) ;
int64_t tnvec = tnvec_slice [nthreads] ;
// Replace tnz_slice with its cumulative sum
GB_cumsum (tnz_slice, nthreads, NULL, 1, NULL) ;
// find the total # of final entries, after assembling duplicates
int64_t tnz = tnz_slice [nthreads] ;
int64_t ndupl = nvals - tnz ;
//--------------------------------------------------------------------------
// allocate T; always hypersparse
//--------------------------------------------------------------------------
// allocate T; allocate T->p and T->h but do not initialize them.
// T is always hypersparse. The header T always exists on input, as
// either a static or dynamic header.
info = GB_new (&T, // always hyper, existing header
ttype, vlen, vdim, GB_Ap_malloc, is_csc,
GxB_HYPERSPARSE, GB_ALWAYS_HYPER, tnvec, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_WORKSPACE ;
return (info) ;
}
ASSERT (T->p != NULL) ;
ASSERT (T->h != NULL) ;
ASSERT (T->b == NULL) ;
ASSERT (T->i == NULL) ;
ASSERT (T->x == NULL) ;
T->iso = S_iso ; // OK: T is iso if and only if Sx is iso
bool do_burble = (vlen > 1 || vdim > 1) && (nvals > 1) ;
if (do_burble)
{
if (S_iso)
{
GBURBLE ("(iso build) ") ;
}
else
{
GBURBLE ("(build) ") ;
}
}
//--------------------------------------------------------------------------
// STEP 4: construct the vector pointers and hyperlist for T
//--------------------------------------------------------------------------
// Step 4 scans the J_work indices and constructs T->h and T->p.
int64_t *restrict Th = T->h ;
int64_t *restrict Tp = T->p ;
if (vdim <= 1)
{
//----------------------------------------------------------------------
// special case for vectors
//----------------------------------------------------------------------
ASSERT (tnvec == 0 || tnvec == 1) ;
if (tnvec > 0)
{
Th [0] = 0 ;
Tp [0] = 0 ;
}
}
else if (ndupl == 0)
{
//----------------------------------------------------------------------
// no duplicates appear
//----------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t my_tnvec = tnvec_slice [tid] ;
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
int64_t jlast = GB_J_WORK (tstart-1) ;
for (int64_t t = tstart ; t < tend ; t++)
{
// get the t-th tuple
int64_t j = GB_J_WORK (t) ;
if (j > jlast)
{
// vector j starts in this slice
Th [my_tnvec] = j ;
Tp [my_tnvec] = t ;
my_tnvec++ ;
jlast = j ;
}
}
}
}
else
{
//----------------------------------------------------------------------
// it is known that at least one duplicate appears
//----------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t my_tnz = tnz_slice [tid] ;
int64_t my_tnvec = tnvec_slice [tid] ;
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
int64_t jlast = GB_J_WORK (tstart-1) ;
for (int64_t t = tstart ; t < tend ; t++)
{
// get the t-th tuple
int64_t i = I_work [t] ;
int64_t j = GB_J_WORK (t) ;
if (i >= 0)
{
// this is a new tuple
if (j > jlast)
{
// vector j starts in this slice
Th [my_tnvec] = j ;
Tp [my_tnvec] = my_tnz ;
my_tnvec++ ;
jlast = j ;
}
my_tnz++ ;
}
}
}
}
// log the end of the last vector
T->nvec_nonempty = tnvec ;
T->nvec = tnvec ;
Tp [tnvec] = tnz ;
ASSERT (T->nvec == T->plen) ;
T->magic = GB_MAGIC ;
//--------------------------------------------------------------------------
// free J_work if it exists
//--------------------------------------------------------------------------
ASSERT (J_work_handle != NULL) ;
GB_FREE (J_work_handle, *J_work_size_handle) ;
J_work = NULL ;
//--------------------------------------------------------------------------
// allocate T->i
//--------------------------------------------------------------------------
if (ndupl == 0)
{
// shrink I_work from size ijslen to size tnz
if (tnz < ijslen)
{
// this cannot fail since the size is shrinking.
bool ok ;
GB_REALLOC (I_work, tnz, int64_t, I_work_size_handle, &ok, Context);
ASSERT (ok) ;
}
// transplant I_work into T->i
T->i = I_work ; T->i_size = (*I_work_size_handle) ;
I_work = NULL ;
(*I_work_handle) = NULL ;
(*I_work_size_handle) = 0 ;
}
else
{
// duplicates exist, so allocate a new T->i. I_work must be freed later
T->i = GB_MALLOC (tnz, int64_t, &(T->i_size)) ;
if (T->i == NULL)
{
// out of memory
GB_phbix_free (T) ;
GB_FREE_WORKSPACE ;
return (GrB_OUT_OF_MEMORY) ;
}
}
int64_t *restrict Ti = T->i ;
//==========================================================================
// numerical phase of the build: assemble any duplicates
//==========================================================================
// The tuples have been sorted. Assemble any duplicates with a switch
// factory of built-in workers, or four generic workers. The vector
// pointers T->p and hyperlist T->h (if hypersparse) have already been
// computed.
// If there are no duplicates, T->i holds the row indices of the tuple.
// Otherwise, the row indices are still in I_work. K_work holds the
// positions of each tuple in the array Sx. The tuples are sorted so that
// duplicates are adjacent to each other and they appear in the order they
// appeared in the original tuples. This method assembles the duplicates
// and computes T->i and T->x from I_work, K_work, and Sx. into T, becoming
// T->i. If no duplicates appear, T->i is already computed, and Sx just
// needs to be copied and permuted into T->x.
// The (i,k,Sx[k]) tuples are held in two integer arrays: (1) I_work or
// T->i, and (2) K_work, and an array Sx of numerical values. Sx has not
// been sorted, nor even accessed yet. It is identical to the original
// unsorted tuples. The (i,k,Sx[k]) tuple holds the row index i, the
// position k, and the value Sx [k]. This entry becomes T(i,j) = Sx [k] in
// the matrix T, and duplicates (if any) are assembled via the dup
// operator.
//--------------------------------------------------------------------------
// get opcodes and check types
//--------------------------------------------------------------------------
// With GB_build, there can be 1 to 2 different types.
// T->type is identical to the types of x,y,z for z=dup(x,y).
// dup is never NULL and all its three types are the same
// The type of Sx (stype) can different but must be compatible
// with T->type
// With GB_wait, there can be 1 to 5 different types:
// The pending tuples are in Sx, of type stype which must be
// compatible with dup->ytype and T->type
// z = dup (x,y): can be NULL or have 1 to 3 different types
// T->type: must be compatible with all above types.
// dup may be NULL, in which case it is assumed be the implicit SECOND
// operator, with all three types equal to T->type
GrB_Type xtype, ytype, ztype ;
GxB_binary_function fdup ;
#ifndef GBCUDA_DEV
GB_Opcode opcode ;
#endif
GB_Type_code tcode = ttype->code ;
const size_t tsize = ttype->size ;
bool op_2nd ;
ASSERT_TYPE_OK (ttype, "ttype for build_factory", GB0) ;
if (dup == NULL)
{
//----------------------------------------------------------------------
// dup is the implicit SECOND operator
//----------------------------------------------------------------------
// z = SECOND (x,y) where all three types are the same as ttype
// T(i,j) = (ttype) Sx(k) will be done for all tuples.
#ifndef GBCUDA_DEV
opcode = GB_SECOND_binop_code ;
#endif
xtype = ttype ;
ytype = ttype ;
ztype = ttype ;
fdup = NULL ;
op_2nd = true ;
ASSERT (GB_op_is_second (dup, ttype)) ;
}
else
{
//----------------------------------------------------------------------
// dup is an explicit operator
//----------------------------------------------------------------------
// T(i,j) = (ttype) Sx[k] will be done for the first tuple.
// for subsequent tuples: T(i,j) += Sx[k], via the dup operator and
// typecasting:
//
// y = (dup->ytype) Sx[k]
// x = (dup->xtype) T(i,j)
// z = (dup->ztype) dup (x,y)
// T(i,j) = (ttype) z
ASSERT_BINARYOP_OK (dup, "dup for build_factory", GB0) ;
ASSERT (!S_iso) ;
#ifndef GBCUDA_DEV
opcode = dup->opcode ;
#endif
xtype = dup->xtype ;
ytype = dup->ytype ;
ztype = dup->ztype ;
fdup = dup->binop_function ;
op_2nd = GB_op_is_second (dup, ttype) ;
}
//--------------------------------------------------------------------------
// get the sizes and codes of each type
//--------------------------------------------------------------------------
GB_Type_code zcode = ztype->code ;
GB_Type_code xcode = xtype->code ;
GB_Type_code ycode = ytype->code ;
ASSERT (GB_Type_compatible (ttype, stype)) ; // T(i,j) = (ttype) Sx
ASSERT (GB_Type_compatible (ytype, stype)) ; // y = (ytype) Sx
ASSERT (GB_Type_compatible (xtype, ttype)) ; // x = (xtype) T(i,j)
ASSERT (GB_Type_compatible (ttype, ztype)) ; // T(i,j) = (ttype) z
size_t zsize = ztype->size ;
size_t xsize = xtype->size ;
size_t ysize = ytype->size ;
// no typecasting if all 5 types are the same
bool nocasting = (ttype == stype) &&
(ttype == xtype) && (ttype == ytype) && (ttype == ztype) ;
ASSERT_TYPE_OK (ttype, "ttype for build_factory", GB0) ;
ASSERT_TYPE_OK (stype, "stype for build_factory", GB0) ;
ASSERT_TYPE_OK (xtype, "xtype for build_factory", GB0) ;
ASSERT_TYPE_OK (ytype, "ytype for build_factory", GB0) ;
ASSERT_TYPE_OK (ztype, "ztype for build_factory", GB0) ;
//--------------------------------------------------------------------------
// STEP 5: assemble the tuples
//--------------------------------------------------------------------------
bool copy_S_into_T = (nocasting && known_sorted && ndupl == 0) ;
if (copy_S_into_T && S_work != NULL)
{
//----------------------------------------------------------------------
// transplant S_work into T->x
//----------------------------------------------------------------------
// No typecasting is needed, the tuples were originally in sorted
// order, and no duplicates appear. All that is required is to copy Sx
// into Tx. Sx can be directly transplanted into T->x since Sx is
// provided as S_work. GB_builder must either transplant or free
// S_work. The transplant can be used by GB_wait, whenever the tuples
// are already sorted, with no duplicates, and no typecasting is
// needed, since S_work is always A->Pending->x. T and Sx may be iso
// or non-iso.
T->x = S_work ; T->x_size = (*S_work_size_handle) ;
S_work = NULL ;
(*S_work_handle) = NULL ;
(*S_work_size_handle) = 0 ;
int64_t tx_size_required = tnz * tsize ;
if (2 * tx_size_required < T->x_size)
{
// shrink the size of T->x
bool ok = true ;
GB_REALLOC (T->x, tx_size_required, GB_void, &(T->x_size), &ok,
Context) ;
}
}
else
{
//----------------------------------------------------------------------
// allocate T->x
//----------------------------------------------------------------------
T->x = GB_XALLOC (false, S_iso, tnz, tsize, &(T->x_size)) ; // x:OK
if (T->x == NULL)
{
// out of memory
GB_phbix_free (T) ;
GB_FREE_WORKSPACE ;
return (GrB_OUT_OF_MEMORY) ;
}
GB_void *restrict Tx = (GB_void *) T->x ;
ASSERT (GB_IMPLIES (nvals > 0, Sx != NULL)) ;
if (nvals == 0)
{
// nothing to do
}
else if (copy_S_into_T)
{
//------------------------------------------------------------------
// copy Sx into T->x
//------------------------------------------------------------------
// No typecasting is needed, the tuples were originally in sorted
// order, and no duplicates appear. All that is required is to
// copy Sx into Tx. Sx cannot be transplanted into T->x since
// S_work is NULL and S_input cannot be modified by GB_builder.
ASSERT (S_work == NULL) ;
ASSERT (Sx == S_input) ;
GB_memcpy (Tx, Sx, (S_iso ? 1 : nvals) * tsize, nthreads) ;
}
else if (nocasting)
{
//------------------------------------------------------------------
// assemble the values, Sx, into T, no typecasting needed
//------------------------------------------------------------------
// Sx (either S_work or S_input) must be permuted and copied into
// T->x, since the tuples had to be sorted, or duplicates appear.
// Any duplicates are now assembled.
// There are 44 common cases of this function for built-in types
// and 8 associative operators: MIN, MAX, PLUS, TIMES for 10 types
// (all but boolean; and OR, AND, XOR, and EQ for boolean.
// In addition, the FIRST and SECOND operators are hard-coded, for
// another 22 workers, since SECOND is used by GB_wait and since
// FIRST is useful for keeping the first tuple seen. It is
// controlled by the GB_INCLUDE_SECOND_OPERATOR definition, so they
// do not appear in GB_reduce_to_* where the FIRST and SECOND
// operators are not needed.
// Early exit cannot be exploited, so the terminal is ignored.
bool done = false ;
if (S_iso)
{
//--------------------------------------------------------------
// T and Sx are iso; set iso value and delete duplicates
//--------------------------------------------------------------
memcpy (Tx, Sx, tsize) ;
#define GB_ISO_BUILD
#include "GB_reduce_build_template.c"
done = true ;
}
else
{
//--------------------------------------------------------------
// T and Sx are not iso; call in the workers
//--------------------------------------------------------------
#ifndef GBCUDA_DEV
//----------------------------------------------------------
// define the worker for the switch factory
//----------------------------------------------------------
#define GB_INCLUDE_SECOND_OPERATOR
#define GB_red(opname,aname) \
GB (_red_build_ ## opname ## aname)
#define GB_RED_WORKER(opname,aname,atype) \
{ \
info = GB_red (opname, aname) ((atype *) Tx, Ti, \
(atype *) Sx, nvals, ndupl, I_work, K_work, \
tstart_slice, tnz_slice, nthreads) ; \
done = (info != GrB_NO_VALUE) ; \
} \
break ;
//----------------------------------------------------------
// launch the switch factory
//----------------------------------------------------------
// controlled by opcode and typecode
GB_Type_code typecode = tcode ;
#include "GB_red_factory.c"
#endif
}
//------------------------------------------------------------------
// generic worker
//------------------------------------------------------------------
if (!done)
{
if (do_burble) GBURBLE ("(generic build) ") ;
//--------------------------------------------------------------
// no typecasting, but use the fdup function pointer and memcpy
//--------------------------------------------------------------
// Either the fdup operator or type of Sx and T are
// user-defined, or fdup is not an associative operator handled
// by the GB_red_factory, or some combination of these
// conditions. User-defined types cannot be typecasted, so
// this handles all user-defined types.
// Tx [p] = (ttype) Sx [k], but with no typecasting
#undef GB_CAST_ARRAY_TO_ARRAY
#define GB_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \
memcpy (Tx +((p)*tsize), Sx +((k)*tsize), tsize) ;
if (op_2nd)
{
//----------------------------------------------------------
// dup is the SECOND operator, with no typecasting
//----------------------------------------------------------
// Tx [p] += (ttype) Sx [k], but 2nd op and no typecasting
#undef GB_ADD_CAST_ARRAY_TO_ARRAY
#define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \
GB_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k)
#include "GB_reduce_build_template.c"
}
else
{
//----------------------------------------------------------
// dup is another operator, with no typecasting needed
//----------------------------------------------------------
// Tx [p] += (ttype) Sx [k], but with no typecasting
#undef GB_ADD_CAST_ARRAY_TO_ARRAY
#define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \
fdup (Tx +((p)*tsize), Tx +((p)*tsize), Sx+((k)*tsize));
#include "GB_reduce_build_template.c"
}
}
}
else
{
//------------------------------------------------------------------
// assemble the values Sx into T, typecasting as needed
//------------------------------------------------------------------
if (do_burble)
{
GBURBLE ("(generic build with typecast) ") ;
}
// If T and Sx are iso, no typecasting is ever done, so this method
// is not used in that case.
ASSERT (!S_iso) ;
// Sx (either S_work or S_input) must be permuted and copied into
// T->x, since the tuples had to be sorted, or duplicates appear.
// Any duplicates are now assembled. Not all of the 5 types are
// the same, but all of them are built-in since user-defined types
// cannot be typecasted.
const GB_Type_code scode = stype->code ;
const size_t ssize = stype->size ;
GB_cast_function cast_S_to_T = GB_cast_factory (tcode, scode) ;
GB_cast_function cast_S_to_Y = GB_cast_factory (ycode, scode) ;
GB_cast_function cast_T_to_X = GB_cast_factory (xcode, tcode) ;
GB_cast_function cast_Z_to_T = GB_cast_factory (tcode, zcode) ;
ASSERT (scode <= GB_FC64_code) ;
ASSERT (tcode <= GB_FC64_code) ;
ASSERT (xcode <= GB_FC64_code) ;
ASSERT (ycode <= GB_FC64_code) ;
ASSERT (zcode <= GB_FC64_code) ;
// Tx [p] = (ttype) Sx [k], with typecasting
#undef GB_CAST_ARRAY_TO_ARRAY
#define GB_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \
cast_S_to_T (Tx +((p)*tsize), Sx +((k)*ssize), ssize) ;
if (op_2nd)
{
//--------------------------------------------------------------
// dup operator is the SECOND operator, with typecasting
//--------------------------------------------------------------
// Tx [p] += (ttype) Sx [k], but 2nd op, with typecasting
#undef GB_ADD_CAST_ARRAY_TO_ARRAY
#define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \
GB_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k)
#include "GB_reduce_build_template.c"
}
else
{
//--------------------------------------------------------------
// dup is another operator, with typecasting required
//--------------------------------------------------------------
// Tx [p] += Sx [k], with typecasting
#undef GB_ADD_CAST_ARRAY_TO_ARRAY
#define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \
{ \
/* ywork = (ytype) Sx [k] */ \
GB_void ywork [GB_VLA(ysize)] ; \
cast_S_to_Y (ywork, Sx +((k)*ssize), ssize) ; \
/* xwork = (xtype) Tx [p] */ \
GB_void xwork [GB_VLA(xsize)] ; \
cast_T_to_X (xwork, Tx +((p)*tsize), tsize) ; \
/* zwork = f (xwork, ywork) */ \
GB_void zwork [GB_VLA(zsize)] ; \
fdup (zwork, xwork, ywork) ; \
/* Tx [tnz-1] = (ttype) zwork */ \
cast_Z_to_T (Tx +((p)*tsize), zwork, zsize) ; \
}
#include "GB_reduce_build_template.c"
}
}
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORKSPACE ;
T->jumbled = false ;
ASSERT_MATRIX_OK (T, "T built", GB0) ;
ASSERT (GB_IS_HYPERSPARSE (T)) ;
return (GrB_SUCCESS) ;
}
|
colorspace.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE %
% C O O L O O R R SS P P A A C E %
% C O O L O O RRRR SSS PPPP AAAAA C EEE %
% C O O L O O R R SS P A A C E %
% CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE %
% %
% %
% MagickCore Image Colorspace Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/property.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/utility.h"
/*
Typedef declarations.
*/
typedef struct _TransformPacket
{
MagickRealType
x,
y,
z;
} TransformPacket;
/*
Forward declarations.
*/
static MagickBooleanType
TransformsRGBImage(Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C o l o r s p a c e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageColorspaceType() returns the potential type of image:
% sRGBColorspaceType, RGBColorspaceType, GRAYColorspaceType, etc.
%
% To ensure the image type matches its potential, use SetImageColorspaceType():
%
% (void) SetImageColorspaceType(image,GetImageColorspaceType(image),
% exception);
%
% The format of the GetImageColorspaceType method is:
%
% ColorspaceType GetImageColorspaceType(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ColorspaceType GetImageColorspaceType(const Image *image,
ExceptionInfo *exception)
{
ColorspaceType
colorspace;
ImageType
type;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
colorspace=image->colorspace;
type=IdentifyImageType(image,exception);
if ((type == BilevelType) || (type == GrayscaleType) ||
(type == GrayscaleAlphaType))
colorspace=GRAYColorspace;
return(colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ s R G B T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% sRGBTransformImage() converts the reference image from sRGB to an alternate
% colorspace. The transformation matrices are not the standard ones: the
% weights are rescaled to normalized the range of the transformed values to
% be [0..QuantumRange].
%
% The format of the sRGBTransformImage method is:
%
% MagickBooleanType sRGBTransformImage(Image *image,
% const ColorspaceType colorspace,EsceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace to transform the image to.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ConvertRGBToCMY(const double red,const double green,
const double blue,double *cyan,double *magenta,double *yellow)
{
*cyan=QuantumScale*(QuantumRange-red);
*magenta=QuantumScale*(QuantumRange-green);
*yellow=QuantumScale*(QuantumRange-blue);
}
static inline void ConvertXYZToLMS(const double x,const double y,
const double z,double *L,double *M,double *S)
{
*L=0.7328*x+0.4296*y-0.1624*z;
*M=(-0.7036*x+1.6975*y+0.0061*z);
*S=0.0030*x+0.0136*y+0.9834*z;
}
static void ConvertRGBToLMS(const double red,const double green,
const double blue,double *L,double *M,double *S)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLMS(X,Y,Z,L,M,S);
}
static void ConvertRGBToLab(const double red,const double green,
const double blue,double *L,double *a,double *b)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLab(X,Y,Z,L,a,b);
}
static void ConvertRGBToLuv(const double red,const double green,
const double blue,double *L,double *u,double *v)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLuv(X,Y,Z,L,u,v);
}
static void ConvertRGBToxyY(const double red,const double green,
const double blue,double *low_x,double *low_y,double *cap_Y)
{
double
gamma,
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
gamma=PerceptibleReciprocal(X+Y+Z);
*low_x=gamma*X;
*low_y=gamma*Y;
*cap_Y=Y;
}
static void inline ConvertXYZToJzazbz(const double X,const double Y,
const double Z,const double white_luminance,double *Jz,double *az,double *bz)
{
#define Jzazbz_b 1.15 /* https://observablehq.com/@jrus/jzazbz */
#define Jzazbz_g 0.66
#define Jzazbz_c1 (3424.0/4096.0)
#define Jzazbz_c2 (2413.0/128.0)
#define Jzazbz_c3 (2392.0/128.0)
#define Jzazbz_n (2610.0/16384.0)
#define Jzazbz_p (1.7*2523.0/32.0)
#define Jzazbz_d (-0.56)
#define Jzazbz_d0 (1.6295499532821566e-11)
double
gamma,
Iz,
L,
Lp,
M,
Mp,
S,
Sp,
Xp,
Yp,
Zp;
Xp=(Jzazbz_b*X-Z*(Jzazbz_b-1));
Yp=(Jzazbz_g*Y-X*(Jzazbz_g-1));
Zp=Z;
L=0.41478972*Xp+0.579999*Yp+0.0146480*Zp;
M=(-0.2015100)*Xp+1.120649*Yp+0.0531008*Zp;
S=(-0.0166008)*Xp+0.264800*Yp+0.6684799*Zp;
gamma=pow(L/white_luminance,Jzazbz_n);
Lp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p);
gamma=pow(M/white_luminance,Jzazbz_n);
Mp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p);
gamma=pow(S/white_luminance,Jzazbz_n);
Sp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p);
Iz=0.5*Lp+0.5*Mp;
*az=3.52400*Lp-4.066708*Mp+0.542708*Sp+0.5;
*bz=0.199076*Lp+1.096799*Mp-1.295875*Sp+0.5;
*Jz=((Jzazbz_d+1.0)*Iz)/(Jzazbz_d*Iz+1.0)-Jzazbz_d0;
}
static void inline ConvertJzazbzToXYZ(const double Jz,const double az,
const double bz,const double white_luminance,double *X,double *Y,double *Z)
{
double
azz,
bzz,
gamma,
Iz,
L,
Lp,
M,
Mp,
S,
Sp,
Xp,
Yp,
Zp;
gamma=Jz+Jzazbz_d0;
Iz=gamma/(Jzazbz_d-Jzazbz_d*gamma+1.0);
azz=az-0.5;
bzz=bz-0.5;
Lp=Iz+0.138605043271539*azz+0.0580473161561189*bzz;
Mp=Iz-0.138605043271539*azz-0.0580473161561189*bzz;
Sp=Iz-0.0960192420263189*azz-0.811891896056039*bzz;
gamma=pow(Lp,1.0/Jzazbz_p);
L=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/
Jzazbz_n);
gamma=pow(Mp,1.0/Jzazbz_p);
M=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/
Jzazbz_n);
gamma=pow(Sp,1.0/Jzazbz_p);
S=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/
Jzazbz_n);
Xp=1.92422643578761*L-1.00479231259537*M+0.037651404030618*S;
Yp=0.350316762094999*L+0.726481193931655*M-0.065384422948085*S;
Zp=(-0.0909828109828476)*L-0.312728290523074*M+1.52276656130526*S;
*X=(Xp+(Jzazbz_b-1.0)*Zp)/Jzazbz_b;
*Y=(Yp+(Jzazbz_g-1.0)**X)/Jzazbz_g;
*Z=Zp;
}
static void ConvertRGBToJzazbz(const double red,const double green,
const double blue,const double white_luminance,double *Jz,double *az,
double *bz)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,blue,green,&X,&Y,&Z);
ConvertXYZToJzazbz(X,Y,Z,white_luminance,Jz,az,bz);
}
static void ConvertJzazbzToRGB(const double Jz,const double az,
const double bz,const double white_luminance,double *red,double *green,
double *blue)
{
double
X,
Y,
Z;
ConvertJzazbzToXYZ(Jz,az,bz,white_luminance,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,blue,green);
}
static void ConvertRGBToYDbDr(const double red,const double green,
const double blue,double *Y,double *Db,double *Dr)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5;
*Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5;
}
static void ConvertRGBToYIQ(const double red,const double green,
const double blue,double *Y,double *I,double *Q)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5;
*Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5;
}
static void ConvertRGBToYPbPr(const double red,const double green,
const double blue,double *Y,double *Pb,double *Pr)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5;
*Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5;
}
static void ConvertRGBToYCbCr(const double red,const double green,
const double blue,double *Y,double *Cb,double *Cr)
{
ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr);
}
static void ConvertRGBToYUV(const double red,const double green,
const double blue,double *Y,double *U,double *V)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5;
*V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5;
}
static MagickBooleanType sRGBTransformImage(Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
#define sRGBTransformImageTag "RGBTransform/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PrimaryInfo
primary_info;
register ssize_t
i;
ssize_t
y;
TransformPacket
*x_map,
*y_map,
*z_map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(colorspace != sRGBColorspace);
assert(colorspace != TransparentColorspace);
assert(colorspace != UndefinedColorspace);
status=MagickTrue;
progress=0;
switch (colorspace)
{
case CMYKColorspace:
{
PixelInfo
zero;
/*
Convert RGB to CMYK colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
ConvertRGBToCMYK(&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->type=image->alpha_trait == UndefinedPixelTrait ?
ColorSeparationType : ColorSeparationAlphaType;
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LinearGRAYColorspace:
{
/*
Transform image from sRGB to GRAY.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
gray;
gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+
0.072186*GetPixelBlue(image,q);
SetPixelGray(image,ClampToQuantum(DecodePixelGamma(gray)),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
image->type=GrayscaleType;
return(status);
}
case GRAYColorspace:
{
/*
Transform image from sRGB to GRAY.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
gray;
gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+
0.072186*GetPixelBlue(image,q);
SetPixelGray(image,ClampToQuantum(gray),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
image->type=GrayscaleType;
return(status);
}
case CMYColorspace:
case HCLColorspace:
case HCLpColorspace:
case HSBColorspace:
case HSIColorspace:
case HSLColorspace:
case HSVColorspace:
case HWBColorspace:
case JzazbzColorspace:
case LabColorspace:
case LCHColorspace:
case LCHabColorspace:
case LCHuvColorspace:
case LMSColorspace:
case LuvColorspace:
case xyYColorspace:
case XYZColorspace:
case YCbCrColorspace:
case YDbDrColorspace:
case YIQColorspace:
case YPbPrColorspace:
case YUVColorspace:
{
const char
*value;
double
white_luminance;
/*
Transform image from sRGB to target colorspace.
*/
white_luminance=10000.0;
value=GetImageProperty(image,"white-luminance",exception);
if (value != (const char *) NULL)
white_luminance=StringToDouble(value,(char **) NULL);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red,
X,
Y,
Z;
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
switch (colorspace)
{
case CMYColorspace:
{
ConvertRGBToCMY(red,green,blue,&X,&Y,&Z);
break;
}
case HCLColorspace:
{
ConvertRGBToHCL(red,green,blue,&X,&Y,&Z);
break;
}
case HCLpColorspace:
{
ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z);
break;
}
case HSBColorspace:
{
ConvertRGBToHSB(red,green,blue,&X,&Y,&Z);
break;
}
case HSIColorspace:
{
ConvertRGBToHSI(red,green,blue,&X,&Y,&Z);
break;
}
case HSLColorspace:
{
ConvertRGBToHSL(red,green,blue,&X,&Y,&Z);
break;
}
case HSVColorspace:
{
ConvertRGBToHSV(red,green,blue,&X,&Y,&Z);
break;
}
case HWBColorspace:
{
ConvertRGBToHWB(red,green,blue,&X,&Y,&Z);
break;
}
case JzazbzColorspace:
{
ConvertRGBToJzazbz(red,green,blue,white_luminance,&X,&Y,&Z);
break;
}
case LabColorspace:
{
ConvertRGBToLab(red,green,blue,&X,&Y,&Z);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ConvertRGBToLCHab(red,green,blue,&X,&Y,&Z);
break;
}
case LCHuvColorspace:
{
ConvertRGBToLCHuv(red,green,blue,&X,&Y,&Z);
break;
}
case LMSColorspace:
{
ConvertRGBToLMS(red,green,blue,&X,&Y,&Z);
break;
}
case LuvColorspace:
{
ConvertRGBToLuv(red,green,blue,&X,&Y,&Z);
break;
}
case xyYColorspace:
{
ConvertRGBToxyY(red,green,blue,&X,&Y,&Z);
break;
}
case XYZColorspace:
{
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
break;
}
case YCbCrColorspace:
{
ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z);
break;
}
case YDbDrColorspace:
{
ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z);
break;
}
case YIQColorspace:
{
ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z);
break;
}
case YPbPrColorspace:
{
ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z);
break;
}
case YUVColorspace:
{
ConvertRGBToYUV(red,green,blue,&X,&Y,&Z);
break;
}
default:
{
X=QuantumScale*red;
Y=QuantumScale*green;
Z=QuantumScale*blue;
break;
}
}
SetPixelRed(image,ClampToQuantum(QuantumRange*X),q);
SetPixelGreen(image,ClampToQuantum(QuantumRange*Y),q);
SetPixelBlue(image,ClampToQuantum(QuantumRange*Z),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LogColorspace:
{
#define DisplayGamma (1.0/1.7)
#define FilmGamma 0.6
#define ReferenceBlack 95.0
#define ReferenceWhite 685.0
const char
*value;
double
black,
density,
film_gamma,
gamma,
reference_black,
reference_white;
Quantum
*logmap;
/*
Transform RGB to Log colorspace.
*/
density=DisplayGamma;
gamma=DisplayGamma;
value=GetImageProperty(image,"gamma",exception);
if (value != (const char *) NULL)
gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL));
film_gamma=FilmGamma;
value=GetImageProperty(image,"film-gamma",exception);
if (value != (const char *) NULL)
film_gamma=StringToDouble(value,(char **) NULL);
reference_black=ReferenceBlack;
value=GetImageProperty(image,"reference-black",exception);
if (value != (const char *) NULL)
reference_black=StringToDouble(value,(char **) NULL);
reference_white=ReferenceWhite;
value=GetImageProperty(image,"reference-white",exception);
if (value != (const char *) NULL)
reference_white=StringToDouble(value,(char **) NULL);
logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*logmap));
if (logmap == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/
film_gamma);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
logmap[i]=ScaleMapToQuantum((double) (MaxMap*(reference_white+
log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002/
film_gamma))/1024.0));
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
double
blue,
green,
red;
red=(double) DecodePixelGamma((MagickRealType)
GetPixelRed(image,q));
green=(double) DecodePixelGamma((MagickRealType)
GetPixelGreen(image,q));
blue=(double) DecodePixelGamma((MagickRealType)
GetPixelBlue(image,q));
SetPixelRed(image,logmap[ScaleQuantumToMap(ClampToQuantum(red))],q);
SetPixelGreen(image,logmap[ScaleQuantumToMap(ClampToQuantum(green))],
q);
SetPixelBlue(image,logmap[ScaleQuantumToMap(ClampToQuantum(blue))],q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
logmap=(Quantum *) RelinquishMagickMemory(logmap);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case RGBColorspace:
case scRGBColorspace:
{
/*
Transform image from sRGB to linear RGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red;
red=DecodePixelGamma((MagickRealType) GetPixelRed(image,q));
green=DecodePixelGamma((MagickRealType) GetPixelGreen(image,q));
blue=DecodePixelGamma((MagickRealType) GetPixelBlue(image,q));
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
default:
break;
}
/*
Allocate the tables.
*/
x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*x_map));
y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*y_map));
z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*z_map));
if ((x_map == (TransformPacket *) NULL) ||
(y_map == (TransformPacket *) NULL) ||
(z_map == (TransformPacket *) NULL))
{
if (x_map != (TransformPacket *) NULL)
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (y_map != (TransformPacket *) NULL)
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
if (z_map != (TransformPacket *) NULL)
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(&primary_info,0,sizeof(primary_info));
switch (colorspace)
{
case OHTAColorspace:
{
/*
Initialize OHTA tables:
I1 = 0.33333*R+0.33334*G+0.33333*B
I2 = 0.50000*R+0.00000*G-0.50000*B
I3 =-0.25000*R+0.50000*G-0.25000*B
I and Q, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.33333*(double) i);
x_map[i].y=(MagickRealType) (0.50000*(double) i);
x_map[i].z=(MagickRealType) (-0.25000*(double) i);
y_map[i].x=(MagickRealType) (0.33334*(double) i);
y_map[i].y=(MagickRealType) (0.00000*(double) i);
y_map[i].z=(MagickRealType) (0.50000*(double) i);
z_map[i].x=(MagickRealType) (0.33333*(double) i);
z_map[i].y=(MagickRealType) (-0.50000*(double) i);
z_map[i].z=(MagickRealType) (-0.25000*(double) i);
}
break;
}
case Rec601YCbCrColorspace:
{
/*
Initialize YCbCr tables (ITU-R BT.601):
Y = 0.2988390*R+0.5868110*G+0.1143500*B
Cb= -0.1687367*R-0.3312640*G+0.5000000*B
Cr= 0.5000000*R-0.4186880*G-0.0813120*B
Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.298839*(double) i);
x_map[i].y=(MagickRealType) (-0.1687367*(double) i);
x_map[i].z=(MagickRealType) (0.500000*(double) i);
y_map[i].x=(MagickRealType) (0.586811*(double) i);
y_map[i].y=(MagickRealType) (-0.331264*(double) i);
y_map[i].z=(MagickRealType) (-0.418688*(double) i);
z_map[i].x=(MagickRealType) (0.114350*(double) i);
z_map[i].y=(MagickRealType) (0.500000*(double) i);
z_map[i].z=(MagickRealType) (-0.081312*(double) i);
}
break;
}
case Rec709YCbCrColorspace:
{
/*
Initialize YCbCr tables (ITU-R BT.709):
Y = 0.212656*R+0.715158*G+0.072186*B
Cb= -0.114572*R-0.385428*G+0.500000*B
Cr= 0.500000*R-0.454153*G-0.045847*B
Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.212656*(double) i);
x_map[i].y=(MagickRealType) (-0.114572*(double) i);
x_map[i].z=(MagickRealType) (0.500000*(double) i);
y_map[i].x=(MagickRealType) (0.715158*(double) i);
y_map[i].y=(MagickRealType) (-0.385428*(double) i);
y_map[i].z=(MagickRealType) (-0.454153*(double) i);
z_map[i].x=(MagickRealType) (0.072186*(double) i);
z_map[i].y=(MagickRealType) (0.500000*(double) i);
z_map[i].z=(MagickRealType) (-0.045847*(double) i);
}
break;
}
case YCCColorspace:
{
/*
Initialize YCC tables:
Y = 0.298839*R+0.586811*G+0.114350*B
C1= -0.298839*R-0.586811*G+0.88600*B
C2= 0.70100*R-0.586811*G-0.114350*B
YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137.
*/
primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156));
primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137));
for (i=0; i <= (ssize_t) (0.018*MaxMap); i++)
{
x_map[i].x=0.005382*i;
x_map[i].y=(-0.003296)*i;
x_map[i].z=0.009410*i;
y_map[i].x=0.010566*i;
y_map[i].y=(-0.006471)*i;
y_map[i].z=(-0.007880)*i;
z_map[i].x=0.002052*i;
z_map[i].y=0.009768*i;
z_map[i].z=(-0.001530)*i;
}
for ( ; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.298839*(1.099*i-0.099);
x_map[i].y=(-0.298839)*(1.099*i-0.099);
x_map[i].z=0.70100*(1.099*i-0.099);
y_map[i].x=0.586811*(1.099*i-0.099);
y_map[i].y=(-0.586811)*(1.099*i-0.099);
y_map[i].z=(-0.586811)*(1.099*i-0.099);
z_map[i].x=0.114350*(1.099*i-0.099);
z_map[i].y=0.88600*(1.099*i-0.099);
z_map[i].z=(-0.114350)*(1.099*i-0.099);
}
break;
}
default:
{
/*
Linear conversion tables.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
x_map[i].y=(MagickRealType) 0.0;
x_map[i].z=(MagickRealType) 0.0;
y_map[i].x=(MagickRealType) 0.0;
y_map[i].y=(MagickRealType) (1.0*(double) i);
y_map[i].z=(MagickRealType) 0.0;
z_map[i].x=(MagickRealType) 0.0;
z_map[i].y=(MagickRealType) 0.0;
z_map[i].z=(MagickRealType) (1.0*(double) i);
}
break;
}
}
/*
Convert from sRGB.
*/
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Convert DirectClass image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
register unsigned int
blue,
green,
red;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelRed(image,q)));
green=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelGreen(image,q)));
blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelBlue(image,q)));
pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+
primary_info.x;
pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+
primary_info.y;
pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+
primary_info.z;
SetPixelRed(image,ScaleMapToQuantum(pixel.red),q);
SetPixelGreen(image,ScaleMapToQuantum(pixel.green),q);
SetPixelBlue(image,ScaleMapToQuantum(pixel.blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,sRGBTransformImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
break;
}
case PseudoClass:
{
register unsigned int
blue,
green,
red;
/*
Convert PseudoClass image.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
PixelInfo
pixel;
red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red));
green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green));
blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z;
image->colormap[i].red=(double) ScaleMapToQuantum(pixel.red);
image->colormap[i].green=(double) ScaleMapToQuantum(pixel.green);
image->colormap[i].blue=(double) ScaleMapToQuantum(pixel.blue);
}
(void) SyncImage(image,exception);
break;
}
}
/*
Relinquish resources.
*/
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorspace() sets the colorspace member of the Image structure.
%
% The format of the SetImageColorspace method is:
%
% MagickBooleanType SetImageColorspace(Image *image,
% const ColorspaceType colorspace,ExceptiionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColorspace(Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
ImageType
type;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (image->colorspace == colorspace)
return(MagickTrue);
image->colorspace=colorspace;
image->rendering_intent=UndefinedIntent;
image->gamma=1.000/2.200;
(void) memset(&image->chromaticity,0,sizeof(image->chromaticity));
type=image->type;
if (IsGrayColorspace(colorspace) != MagickFalse)
{
if (colorspace == LinearGRAYColorspace)
image->gamma=1.000;
type=GrayscaleType;
}
else
if ((IsRGBColorspace(colorspace) != MagickFalse) ||
(colorspace == XYZColorspace) || (colorspace == xyYColorspace))
image->gamma=1.000;
else
{
image->rendering_intent=PerceptualIntent;
image->chromaticity.red_primary.x=0.6400;
image->chromaticity.red_primary.y=0.3300;
image->chromaticity.red_primary.z=0.0300;
image->chromaticity.green_primary.x=0.3000;
image->chromaticity.green_primary.y=0.6000;
image->chromaticity.green_primary.z=0.1000;
image->chromaticity.blue_primary.x=0.1500;
image->chromaticity.blue_primary.y=0.0600;
image->chromaticity.blue_primary.z=0.7900;
image->chromaticity.white_point.x=0.3127;
image->chromaticity.white_point.y=0.3290;
image->chromaticity.white_point.z=0.3583;
}
status=SyncImagePixelCache(image,exception);
image->type=type;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageGray() returns MagickTrue if all the pixels in the image have the
% same red, green, and blue intensities and changes the type of the image to
% bi-level or grayscale.
%
% The format of the SetImageGray method is:
%
% MagickBooleanType SetImageGray(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageGray(Image *image,
ExceptionInfo *exception)
{
const char
*value;
ImageType
type;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsImageGray(image) != MagickFalse)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
value=GetImageProperty(image,"colorspace:auto-grayscale",exception);
if (IsStringFalse(value) != MagickFalse)
return(MagickFalse);
type=IdentifyImageGray(image,exception);
if (type == UndefinedType)
return(MagickFalse);
image->colorspace=GRAYColorspace;
if (SyncImagePixelCache((Image *) image,exception) == MagickFalse)
return(MagickFalse);
image->type=type;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMonochrome() returns MagickTrue if all the pixels in the image have
% the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange and changes the type of the image to bi-level.
%
% The format of the SetImageMonochrome method is:
%
% MagickBooleanType SetImageMonochrome(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageMonochrome(Image *image,
ExceptionInfo *exception)
{
const char
*value;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
value=GetImageProperty(image,"colorspace:auto-grayscale",exception);
if (IsStringFalse(value) != MagickFalse)
return(MagickFalse);
if (IdentifyImageMonochrome(image,exception) == MagickFalse)
return(MagickFalse);
image->colorspace=GRAYColorspace;
if (SyncImagePixelCache((Image *) image,exception) == MagickFalse)
return(MagickFalse);
image->type=BilevelType;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImageColorspace() transforms an image colorspace, changing the
% image data to reflect the new colorspace.
%
% The format of the TransformImageColorspace method is:
%
% MagickBooleanType TransformImageColorspace(Image *image,
% const ColorspaceType colorspace,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType TransformImageColorspace(Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == colorspace)
return(SetImageColorspace(image,colorspace,exception));
(void) DeleteImageProfile(image,"icc");
(void) DeleteImageProfile(image,"icm");
if (colorspace == UndefinedColorspace)
return(SetImageColorspace(image,colorspace,exception));
/*
Convert the reference image from an alternate colorspace to sRGB.
*/
if (IssRGBColorspace(colorspace) != MagickFalse)
return(TransformsRGBImage(image,exception));
status=MagickTrue;
if (IssRGBColorspace(image->colorspace) == MagickFalse)
status=TransformsRGBImage(image,exception);
if (status == MagickFalse)
return(status);
/*
Convert the reference image from sRGB to an alternate colorspace.
*/
if (sRGBTransformImage(image,colorspace,exception) == MagickFalse)
status=MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a n s f o r m s R G B I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformsRGBImage() converts the reference image from an alternate
% colorspace to sRGB. The transformation matrices are not the standard ones:
% the weights are rescaled to normalize the range of the transformed values
% to be [0..QuantumRange].
%
% The format of the TransformsRGBImage method is:
%
% MagickBooleanType TransformsRGBImage(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ConvertCMYToRGB(const double cyan,const double magenta,
const double yellow,double *red,double *green,double *blue)
{
*red=QuantumRange*(1.0-cyan);
*green=QuantumRange*(1.0-magenta);
*blue=QuantumRange*(1.0-yellow);
}
static inline void ConvertLMSToXYZ(const double L,const double M,const double S,
double *X,double *Y,double *Z)
{
*X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S;
*Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S;
*Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S;
}
static inline void ConvertLMSToRGB(const double L,const double M,
const double S,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertLMSToXYZ(L,M,S,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertLuvToRGB(const double L,const double u,
const double v,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline ssize_t RoundToYCC(const double value)
{
if (value <= 0.0)
return(0);
if (value >= 1388.0)
return(1388);
return((ssize_t) (value+0.5));
}
static inline void ConvertLabToRGB(const double L,const double a,
const double b,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertxyYToRGB(const double low_x,const double low_y,
const double cap_Y,double *red,double *green,double *blue)
{
double
gamma,
X,
Y,
Z;
gamma=PerceptibleReciprocal(low_y);
X=gamma*cap_Y*low_x;
Y=cap_Y;
Z=gamma*cap_Y*(1.0-low_x-low_y);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr,
double *red,double *green,double *blue)
{
*red=QuantumRange*(0.99999999999914679361*Y-1.2188941887145875e-06*(Pb-0.5)+
1.4019995886561440468*(Pr-0.5));
*green=QuantumRange*(0.99999975910502514331*Y-0.34413567816504303521*(Pb-0.5)-
0.71413649331646789076*(Pr-0.5));
*blue=QuantumRange*(1.00000124040004623180*Y+1.77200006607230409200*(Pb-0.5)+
2.1453384174593273e-06*(Pr-0.5));
}
static void ConvertYCbCrToRGB(const double Y,const double Cb,
const double Cr,double *red,double *green,double *blue)
{
ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue);
}
static void ConvertYIQToRGB(const double Y,const double I,const double Q,
double *red,double *green,double *blue)
{
*red=QuantumRange*(Y+0.9562957197589482261*(I-0.5)+0.6210244164652610754*
(Q-0.5));
*green=QuantumRange*(Y-0.2721220993185104464*(I-0.5)-0.6473805968256950427*
(Q-0.5));
*blue=QuantumRange*(Y-1.1069890167364901945*(I-0.5)+1.7046149983646481374*
(Q-0.5));
}
static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr,
double *red,double *green,double *blue)
{
*red=QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)-
0.52591263066186533*(Dr-0.5));
*green=QuantumRange*(Y-0.12913289889050927*(Db-0.5)+
0.26789932820759876*(Dr-0.5));
*blue=QuantumRange*(Y+0.66467905997895482*(Db-0.5)-
7.9202543533108e-05*(Dr-0.5));
}
static void ConvertYUVToRGB(const double Y,const double U,const double V,
double *red,double *green,double *blue)
{
*red=QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+1.1398279671717170825*
(V-0.5));
*green=QuantumRange*(Y-0.3946101641414141437*(U-0.5)-0.5805003156565656797*
(V-0.5));
*blue=QuantumRange*(Y+2.0319996843434342537*(U-0.5)-4.813762626262513e-04*
(V-0.5));
}
static MagickBooleanType TransformsRGBImage(Image *image,
ExceptionInfo *exception)
{
#define TransformsRGBImageTag "Transform/Image"
static const float
YCCMap[1389] =
{
0.000000f, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f,
0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f,
0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f,
0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f,
0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f,
0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f,
0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f,
0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f,
0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f,
0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f,
0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f,
0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f,
0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f,
0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f,
0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f,
0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f,
0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f,
0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f,
0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f,
0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f,
0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f,
0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f,
0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f,
0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f,
0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f,
0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f,
0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f,
0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f,
0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f,
0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f,
0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f,
0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f,
0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f,
0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f,
0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f,
0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f,
0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f,
0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f,
0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f,
0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f,
0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f,
0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f,
0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f,
0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f,
0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f,
0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f,
0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f,
0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f,
0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f,
0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f,
0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f,
0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f,
0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f,
0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f,
0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f,
0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f,
0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f,
0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f,
0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f,
0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f,
0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f,
0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f,
0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f,
0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f,
0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f,
0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f,
0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f,
0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f,
0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f,
0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f,
0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f,
0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f,
0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f,
0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f,
0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f,
0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f,
0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f,
0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f,
0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f,
0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f,
0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f,
0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f,
0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f,
0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f,
0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f,
0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f,
0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f,
0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f,
0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f,
0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f,
0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f,
0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f,
0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f,
0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f,
0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f,
0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f,
0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f,
0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f,
0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f,
0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f,
0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f,
0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f,
0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f,
0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f,
0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f,
0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f,
0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f,
0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f,
0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f,
0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f,
0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f,
0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f,
0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f,
0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f,
0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f,
0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f,
0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f,
0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f,
0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f,
0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f,
0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f,
0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f,
0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f,
0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f,
0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f,
0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f,
0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f,
0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f,
0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f,
0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f,
0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f,
0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f,
0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f,
0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f,
0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f,
0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f,
0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f,
0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f,
0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f,
0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f,
0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f,
0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f,
0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f,
0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f,
0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f,
0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f,
0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f,
0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f,
0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f,
0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f,
0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f,
0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f,
0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f,
0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f,
0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f,
0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f,
0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f,
0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f,
0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f,
0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f,
0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f,
0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f,
0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f,
0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f,
0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f,
0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f,
0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f,
0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f,
0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f,
0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f,
0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f,
0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f,
0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f,
0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f,
0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f,
0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f,
0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f,
0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f,
0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f,
0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f,
0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f,
0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f,
0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f,
0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f,
0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f,
0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f,
0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f,
0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f,
0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f,
0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f,
0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f,
0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f,
0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f,
0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f,
0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f,
0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f,
0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f,
0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f,
0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f,
0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f,
0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f,
0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f,
0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f,
0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f,
0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f,
0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f,
0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f,
0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f,
0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f,
0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f,
0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f,
0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f,
0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f,
0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f,
0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f,
0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f,
0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f,
0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f,
0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f,
0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f,
0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f,
0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f,
0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f,
0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f,
0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f,
0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f,
0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f,
0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f,
0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f,
0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f,
0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f,
0.998559f, 0.999280f, 1.000000f
};
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
TransformPacket
*y_map,
*x_map,
*z_map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
progress=0;
switch (image->colorspace)
{
case CMYKColorspace:
{
PixelInfo
zero;
/*
Transform image from CMYK to sRGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
ConvertCMYKToRGB(&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LinearGRAYColorspace:
{
/*
Transform linear GRAY to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
MagickRealType
gray;
gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+
0.072186*GetPixelBlue(image,q);
gray=EncodePixelGamma(gray);
SetPixelRed(image,ClampToQuantum(gray),q);
SetPixelGreen(image,ClampToQuantum(gray),q);
SetPixelBlue(image,ClampToQuantum(gray),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case GRAYColorspace:
{
/*
Transform linear GRAY to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
MagickRealType
gray;
gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+
0.072186*GetPixelBlue(image,q);
SetPixelRed(image,ClampToQuantum(gray),q);
SetPixelGreen(image,ClampToQuantum(gray),q);
SetPixelBlue(image,ClampToQuantum(gray),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case CMYColorspace:
case HCLColorspace:
case HCLpColorspace:
case HSBColorspace:
case HSIColorspace:
case HSLColorspace:
case HSVColorspace:
case HWBColorspace:
case JzazbzColorspace:
case LabColorspace:
case LCHColorspace:
case LCHabColorspace:
case LCHuvColorspace:
case LMSColorspace:
case LuvColorspace:
case xyYColorspace:
case XYZColorspace:
case YCbCrColorspace:
case YDbDrColorspace:
case YIQColorspace:
case YPbPrColorspace:
case YUVColorspace:
{
const char
*value;
double
white_luminance;
/*
Transform image from source colorspace to sRGB.
*/
white_luminance=10000.0;
value=GetImageProperty(image,"white-luminance",exception);
if (value != (const char *) NULL)
white_luminance=StringToDouble(value,(char **) NULL);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red,
X,
Y,
Z;
X=QuantumScale*GetPixelRed(image,q);
Y=QuantumScale*GetPixelGreen(image,q);
Z=QuantumScale*GetPixelBlue(image,q);
switch (image->colorspace)
{
case CMYColorspace:
{
ConvertCMYToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HCLColorspace:
{
ConvertHCLToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSBColorspace:
{
ConvertHSBToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSIColorspace:
{
ConvertHSIToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSLColorspace:
{
ConvertHSLToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSVColorspace:
{
ConvertHSVToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HWBColorspace:
{
ConvertHWBToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case JzazbzColorspace:
{
ConvertJzazbzToRGB(X,Y,Z,white_luminance,&red,&green,&blue);
break;
}
case LabColorspace:
{
ConvertLabToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ConvertLCHabToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LCHuvColorspace:
{
ConvertLCHuvToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LMSColorspace:
{
ConvertLMSToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LuvColorspace:
{
ConvertLuvToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case xyYColorspace:
{
ConvertxyYToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case XYZColorspace:
{
ConvertXYZToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YCbCrColorspace:
{
ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YDbDrColorspace:
{
ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YIQColorspace:
{
ConvertYIQToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YPbPrColorspace:
{
ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YUVColorspace:
{
ConvertYUVToRGB(X,Y,Z,&red,&green,&blue);
break;
}
default:
{
red=QuantumRange*X;
green=QuantumRange*Y;
blue=QuantumRange*Z;
break;
}
}
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LogColorspace:
{
const char
*value;
double
black,
density,
film_gamma,
gamma,
reference_black,
reference_white;
Quantum
*logmap;
/*
Transform Log to sRGB colorspace.
*/
density=DisplayGamma;
gamma=DisplayGamma;
value=GetImageProperty(image,"gamma",exception);
if (value != (const char *) NULL)
gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL));
film_gamma=FilmGamma;
value=GetImageProperty(image,"film-gamma",exception);
if (value != (const char *) NULL)
film_gamma=StringToDouble(value,(char **) NULL);
reference_black=ReferenceBlack;
value=GetImageProperty(image,"reference-black",exception);
if (value != (const char *) NULL)
reference_black=StringToDouble(value,(char **) NULL);
reference_white=ReferenceWhite;
value=GetImageProperty(image,"reference-white",exception);
if (value != (const char *) NULL)
reference_white=StringToDouble(value,(char **) NULL);
logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*logmap));
if (logmap == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/
film_gamma);
for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++)
logmap[i]=(Quantum) 0;
for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++)
logmap[i]=ClampToQuantum(QuantumRange/(1.0-black)*
(pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002/
film_gamma)-black));
for ( ; i <= (ssize_t) MaxMap; i++)
logmap[i]=QuantumRange;
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
double
blue,
green,
red;
red=(double) logmap[ScaleQuantumToMap(GetPixelRed(image,q))];
green=(double) logmap[ScaleQuantumToMap(GetPixelGreen(image,q))];
blue=(double) logmap[ScaleQuantumToMap(GetPixelBlue(image,q))];
SetPixelRed(image,ClampToQuantum(EncodePixelGamma((MagickRealType)
red)),q);
SetPixelGreen(image,ClampToQuantum(EncodePixelGamma((MagickRealType)
green)),q);
SetPixelBlue(image,ClampToQuantum(EncodePixelGamma((MagickRealType)
blue)),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
logmap=(Quantum *) RelinquishMagickMemory(logmap);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case RGBColorspace:
case scRGBColorspace:
{
/*
Transform linear RGB to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
double
blue,
green,
red;
red=EncodePixelGamma((MagickRealType) GetPixelRed(image,q));
green=EncodePixelGamma((MagickRealType) GetPixelGreen(image,q));
blue=EncodePixelGamma((MagickRealType) GetPixelBlue(image,q));
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
default:
break;
}
/*
Allocate the tables.
*/
x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*x_map));
y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*y_map));
z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*z_map));
if ((x_map == (TransformPacket *) NULL) ||
(y_map == (TransformPacket *) NULL) ||
(z_map == (TransformPacket *) NULL))
{
if (z_map != (TransformPacket *) NULL)
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
if (y_map != (TransformPacket *) NULL)
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
if (x_map != (TransformPacket *) NULL)
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
switch (image->colorspace)
{
case OHTAColorspace:
{
/*
Initialize OHTA tables:
I1 = 0.33333*R+0.33334*G+0.33333*B
I2 = 0.50000*R+0.00000*G-0.50000*B
I3 =-0.25000*R+0.50000*G-0.25000*B
R = I1+1.00000*I2-0.66668*I3
G = I1+0.00000*I2+1.33333*I3
B = I1-1.00000*I2-0.66668*I3
I and Q, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
y_map[i].x=(MagickRealType) (0.5*1.00000*(2.0*(double) i-MaxMap));
z_map[i].x=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap));
x_map[i].y=(MagickRealType) (1.0*(double) i);
y_map[i].y=(MagickRealType) (0.5*0.00000*(2.0*(double) i-MaxMap));
z_map[i].y=(MagickRealType) (0.5*1.33333*(2.0*(double) i-MaxMap));
x_map[i].z=(MagickRealType) (1.0*(double) i);
y_map[i].z=(MagickRealType) (-0.5*1.00000*(2.0*(double) i-MaxMap));
z_map[i].z=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap));
}
break;
}
case Rec601YCbCrColorspace:
{
/*
Initialize YCbCr tables:
R = Y +1.402000*Cr
G = Y-0.344136*Cb-0.714136*Cr
B = Y+1.772000*Cb
Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,image->rows,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.99999999999914679361*(double) i;
y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap);
z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap);
x_map[i].y=0.99999975910502514331*(double) i;
y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap);
z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap);
x_map[i].z=1.00000124040004623180*(double) i;
y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap);
z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap);
}
break;
}
case Rec709YCbCrColorspace:
{
/*
Initialize YCbCr tables:
R = Y +1.574800*Cr
G = Y-0.187324*Cb-0.468124*Cr
B = Y+1.855600*Cb
Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,image->rows,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*i);
y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap));
z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*i-MaxMap));
x_map[i].y=(MagickRealType) (1.0*i);
y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*i-MaxMap));
z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*i-MaxMap));
x_map[i].z=(MagickRealType) (1.0*i);
y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*i-MaxMap));
z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap));
}
break;
}
case YCCColorspace:
{
/*
Initialize YCC tables:
R = Y +1.340762*C2
G = Y-0.317038*C1-0.682243*C2
B = Y+1.632639*C1
YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,image->rows,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.3584000*(double) i);
y_map[i].x=(MagickRealType) 0.0000000;
z_map[i].x=(MagickRealType) (1.8215000*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(137))));
x_map[i].y=(MagickRealType) (1.3584000*(double) i);
y_map[i].y=(MagickRealType) (-0.4302726*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(156))));
z_map[i].y=(MagickRealType) (-0.9271435*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(137))));
x_map[i].z=(MagickRealType) (1.3584000*(double) i);
y_map[i].z=(MagickRealType) (2.2179000*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(156))));
z_map[i].z=(MagickRealType) 0.0000000;
}
break;
}
default:
{
/*
Linear conversion tables.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,image->rows,1)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
y_map[i].x=(MagickRealType) 0.0;
z_map[i].x=(MagickRealType) 0.0;
x_map[i].y=(MagickRealType) 0.0;
y_map[i].y=(MagickRealType) (1.0*(double) i);
z_map[i].y=(MagickRealType) 0.0;
x_map[i].z=(MagickRealType) 0.0;
y_map[i].z=(MagickRealType) 0.0;
z_map[i].z=(MagickRealType) (1.0*(double) i);
}
break;
}
}
/*
Convert to sRGB.
*/
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Convert DirectClass image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
blue,
green,
red;
red=ScaleQuantumToMap(GetPixelRed(image,q));
green=ScaleQuantumToMap(GetPixelGreen(image,q));
blue=ScaleQuantumToMap(GetPixelBlue(image,q));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z;
if (image->colorspace == YCCColorspace)
{
pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/
(double) MaxMap)];
pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/
(double) MaxMap)];
pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/
(double) MaxMap)];
}
else
{
pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red);
pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green);
pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue);
}
SetPixelRed(image,ClampToQuantum(pixel.red),q);
SetPixelGreen(image,ClampToQuantum(pixel.green),q);
SetPixelBlue(image,ClampToQuantum(pixel.blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransformsRGBImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
break;
}
case PseudoClass:
{
/*
Convert PseudoClass image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
PixelInfo
pixel;
register size_t
blue,
green,
red;
red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red));
green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green));
blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z;
if (image->colorspace == YCCColorspace)
{
pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/
(double) MaxMap)];
pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/
(double) MaxMap)];
pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/
(double) MaxMap)];
}
else
{
pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red);
pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green);
pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue);
}
image->colormap[i].red=(double) ClampToQuantum(pixel.red);
image->colormap[i].green=(double) ClampToQuantum(pixel.green);
image->colormap[i].blue=(double) ClampToQuantum(pixel.blue);
}
(void) SyncImage(image,exception);
break;
}
}
/*
Relinquish resources.
*/
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(MagickTrue);
}
|
isx_original.c | /*
Copyright (c) 2015, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#define _POSIX_C_SOURCE 199309L
#include <shmem.h>
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <math.h>
#include <string.h>
#include <unistd.h> // sleep()
#include <sys/stat.h>
#include <stdint.h>
#include "params.h"
#include "isx_original.h"
#include "timer.h"
#include "pcg_basic.h"
#if defined(_OPENMP)
#include <omp.h>
#endif
#define ROOT_PE 0
// Needed for shmem collective operations
int pWrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE];
double dWrk[_SHMEM_REDUCE_SYNC_SIZE];
long long int llWrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE];
long pSync[_SHMEM_REDUCE_SYNC_SIZE];
uint64_t NUM_PES; // Number of parallel workers
uint64_t TOTAL_KEYS; // Total number of keys across all PEs
uint64_t NUM_KEYS_PER_PE; // Number of keys generated on each PE
uint64_t NUM_BUCKETS; // The number of buckets in the bucket sort
uint64_t BUCKET_WIDTH; // The size of each bucket
uint64_t MAX_KEY_VAL; // The maximum possible generated key value
volatile int whose_turn;
long long int receive_offset = 0;
long long int my_bucket_size = 0;
#include <sys/time.h>
#include <time.h>
static unsigned long long current_time_ns() {
struct timespec t ={0,0};
clock_gettime(CLOCK_MONOTONIC, &t);
unsigned long long s = 1000000000ULL * (unsigned long long)t.tv_sec;
return (((unsigned long long)t.tv_nsec)) + s;
}
#define PARALLEL_FOR_MODE SHMEM_PARALLEL_FOR_RECURSIVE_MODE
#define CHUNKS_COUNT_LOCAL_KEYS (actual_num_workers)
#define CHUNKS_MAKE_INPUT CHUNKS_PER_PE
int actual_num_workers;
int** local_bucket_sizes_chunk;
int ** my_local_key_counts;
KEY_TYPE*** my_local_bucketed_keys_chunk;
int** local_bucket_offsets_chunk;
/*
* This variable sets the maximum number of chunks allowed
* to participate in computation per pe.
*/
int CHUNKS_PER_PE=1;
#define GET_VIRTUAL_RANK(rank, chunk) ((rank * actual_num_workers) + (chunk))
#define SHMEM_BARRIER_AT_START { timer_start(&timers[TIMER_BARRIER_START]); shmem_barrier_all(); timer_stop(&timers[TIMER_BARRIER_START]); }
#define SHMEM_BARRIER_AT_EXCHANGE { timer_start(&timers[TIMER_BARRIER_EXCHANGE]); shmem_barrier_all(); timer_stop(&timers[TIMER_BARRIER_EXCHANGE]); }
#define SHMEM_BARRIER_AT_END { timer_start(&timers[TIMER_BARRIER_END]); shmem_barrier_all(); timer_stop(&timers[TIMER_BARRIER_END]); }
// This is done due to current limitation that entrypoint function
// cannot accept arguments. This will be resolved in future version of
// AsyncSHMEM
int m_argc;
char** m_argv;
#define EXTRA_STATS
#ifdef EXTRA_STATS
float avg_time=0, avg_time_all2all = 0;
#endif
// #define KEY_BUFFER_SIZE (1uLL<<28uLL)
#define KEY_BUFFER_SIZE ((1uLL<<28uLL) + 70000)
// The receive array for the All2All exchange
// KEY_TYPE my_bucket_keys[KEY_BUFFER_SIZE];
KEY_TYPE *my_bucket_keys;
#ifdef PERMUTE
int * permute_array;
#endif
void entrypoint(void *arg) {
char * log_file = parse_params(m_argc, m_argv);
init_shmem_sync_array(pSync);
bucket_sort();
log_times(log_file);
//return err;
}
int main (int argc, char ** argv) {
shmem_init ();
m_argc = argc;
m_argv = argv;
#ifdef EXTRA_STATS
_timer_t stage_time;
if(shmem_my_pe() == 0) {
printf("\n-----\nmkdir timedrun fake\n\n");
timer_start(&stage_time);
}
#endif
#if defined(_SHMEM_WORKERS)
shmem_workers_init(entrypoint, NULL);
#else
entrypoint(NULL);
#endif
#ifdef EXTRA_STATS
if(shmem_my_pe() == 0) {
just_timer_stop(&stage_time);
double tTime = ( stage_time.stop.tv_sec - stage_time.start.tv_sec ) + ( stage_time.stop.tv_nsec - stage_time.start.tv_nsec )/1E9;
avg_time *= 1000;
avg_time_all2all *= 1000;
printf("\n============================ MMTk Statistics Totals ============================\n");
if(NUM_ITERATIONS == 1) { //TODO: fix time calculation below for more number of iterations
printf("time.mu\tt.ATA_KEYS\tt.MAKE_INPUT\tt.COUNT_BUCKET_SIZES\tt.BUCKETIZE\tt.COMPUTE_OFFSETS\tt.LOCAL_SORT\tBARRIER_AT_START\tBARRIER_AT_EXCHANGE\tBARRIER_AT_END\tnWorkers\tnPEs\n");
double TIMES[TIMER_NTIMERS];
memset(TIMES, 0x00, sizeof(double) * TIMER_NTIMERS);
for(uint64_t i=0; i<NUM_PES; i++) {
for(int t = 0; t < TIMER_NTIMERS; ++t){
if(timers[t].all_times != NULL){
TIMES[t] += timers[t].all_times[i];
}
}
}
for(int t = 0; t < TIMER_NTIMERS; ++t){
printf("%.3f\t", (TIMES[t]/NUM_PES)*1000);
}
printf("%d\t%d\n",actual_num_workers,NUM_PES);
printf("Total time: %.3f\n",(TIMES[0]/NUM_PES)*1000);
}
else {
printf("time.mu\ttimeAll2All\tnWorkers\tnPEs\n");
printf("%.3f\t%.3f\t%d\t%d\n",avg_time,avg_time_all2all,actual_num_workers,NUM_PES);
printf("Total time: %.3f\n",avg_time);
}
printf("------------------------------ End MMTk Statistics -----------------------------\n");
printf("===== TEST PASSED in %.3f msec =====\n",(tTime*1000));
}
#endif
shmem_finalize ();
return 0;
}
// Parses all of the command line input and definitions in params.h
// to set all necessary runtime values and options
static char * parse_params(const int argc, char ** argv)
{
if(argc != 3)
{
if( shmem_my_pe() == 0){
printf("Usage: \n");
printf(" ./%s <total num keys(strong) | keys per pe(weak)> <log_file>\n",argv[0]);
}
shmem_finalize();
exit(1);
}
const char* chunks_env = getenv("ISX_PE_CHUNKS");
CHUNKS_PER_PE = chunks_env ? atoi(chunks_env) : 1;
#if defined(_OPENMP)
#pragma omp parallel
actual_num_workers = omp_get_num_threads();
#elif defined(_SHMEM_WORKERS)
actual_num_workers = shmem_n_workers();
#else
CHUNKS_PER_PE = 1;
actual_num_workers = 1;
#endif
NUM_PES = (uint64_t) shmem_n_pes();
MAX_KEY_VAL = DEFAULT_MAX_KEY;
NUM_BUCKETS = NUM_PES;
BUCKET_WIDTH = (uint64_t) ceil((double)MAX_KEY_VAL/NUM_BUCKETS);
char * log_file = argv[2];
char scaling_msg[64];
switch(SCALING_OPTION){
case STRONG:
{
TOTAL_KEYS = (uint64_t) atoi(argv[1]);
NUM_KEYS_PER_PE = (uint64_t) ceil((double)TOTAL_KEYS/NUM_PES);
sprintf(scaling_msg,"STRONG");
break;
}
case WEAK:
{
NUM_KEYS_PER_PE = (uint64_t) (atoi(argv[1])) * actual_num_workers;
sprintf(scaling_msg,"WEAK");
break;
}
case WEAK_ISOBUCKET:
{
NUM_KEYS_PER_PE = (uint64_t) (atoi(argv[1])) * actual_num_workers;
BUCKET_WIDTH = ISO_BUCKET_WIDTH;
MAX_KEY_VAL = (uint64_t) (NUM_PES * actual_num_workers * BUCKET_WIDTH);
sprintf(scaling_msg,"WEAK_ISOBUCKET");
break;
}
default:
{
if(shmem_my_pe() == 0){
printf("Invalid scaling option! See params.h to define the scaling option.\n");
}
shmem_finalize();
exit(1);
break;
}
}
assert(NUM_KEYS_PER_PE % actual_num_workers == 0);
assert(MAX_KEY_VAL > 0);
assert(NUM_KEYS_PER_PE > 0);
assert(NUM_PES > 0);
assert(MAX_KEY_VAL > NUM_PES);
assert(NUM_BUCKETS > 0);
assert(BUCKET_WIDTH > 0);
if(shmem_my_pe() == 0){
printf("ISx v%1d.%1d\n",MAJOR_VERSION_NUMBER,MINOR_VERSION_NUMBER);
#ifdef PERMUTE
printf("Random Permute Used in ATA.\n");
#endif
printf(" Number of Keys per PE: %" PRIu64 "\n", NUM_KEYS_PER_PE);
printf(" Number of Chunks per PE (ISX_PE_CHUNKS): %d\n",CHUNKS_PER_PE);
#if defined(_OPENMP)
printf(" OpenMP Version, total workers: %d\n",actual_num_workers);
#elif defined(_SHMEM_WORKERS)
printf(" AsyncSHMEM Version, total workers: %d\n",actual_num_workers);
#else
printf(" AsyncSHMEM Sequential version\n");
#endif
printf(" Max Key Value: %" PRIu64 "\n", MAX_KEY_VAL);
printf(" Bucket Width: %" PRIu64 "\n", BUCKET_WIDTH);
printf(" Number of Iterations: %u\n", NUM_ITERATIONS);
printf(" Number of PEs: %" PRIu64 "\n", NUM_PES);
printf(" %s Scaling!\n",scaling_msg);
}
return log_file;
}
/*
* The primary compute function for the bucket sort
* Executes the sum of NUM_ITERATIONS + BURN_IN iterations, as defined in params.h
* Only iterations after the BURN_IN iterations are timed
* Only the final iteration calls the verification function
*/
static int bucket_sort(void)
{
int err = 0;
init_timers(NUM_ITERATIONS);
#ifdef PERMUTE
create_permutation_array();
#endif
my_bucket_keys = (KEY_TYPE*) shmem_malloc(KEY_BUFFER_SIZE * sizeof(KEY_TYPE));
assert(my_bucket_keys);
// if (shmem_my_pe() == 0) fprintf(stderr, "mallocing %llu bytes\n", KEY_BUFFER_SIZE * sizeof(KEY_TYPE));
my_local_key_counts = malloc(CHUNKS_COUNT_LOCAL_KEYS * sizeof(int*));
assert(my_local_key_counts);
for(int i=0; i<CHUNKS_COUNT_LOCAL_KEYS; i++) {
my_local_key_counts[i] = malloc(BUCKET_WIDTH * sizeof(int));
assert(my_local_key_counts[i]);
}
for(uint64_t i = 0; i < (NUM_ITERATIONS + BURN_IN); ++i)
{
for(int i=0; i<CHUNKS_COUNT_LOCAL_KEYS; i++) memset(my_local_key_counts[i], 0x00, BUCKET_WIDTH * sizeof(int));
local_bucket_sizes_chunk = malloc(CHUNKS_PER_PE* sizeof(int*));
assert(local_bucket_sizes_chunk);
// Reset timers after burn in
if(i == BURN_IN){ init_timers(NUM_ITERATIONS); }
SHMEM_BARRIER_AT_START;
timer_start(&timers[TIMER_TOTAL]);
KEY_TYPE * my_keys = make_input();
int * local_bucket_sizes = count_local_bucket_sizes(my_keys);
int * send_offsets;
int * local_bucket_offsets = compute_local_bucket_offsets(local_bucket_sizes,
&send_offsets);
KEY_TYPE * my_local_bucketed_keys = bucketize_local_keys(my_keys, local_bucket_offsets);
KEY_TYPE * my_bucket_keys = exchange_keys(send_offsets,
local_bucket_sizes,
my_local_bucketed_keys);
my_bucket_size = receive_offset;
count_local_keys(my_bucket_keys);
SHMEM_BARRIER_AT_END;
timer_stop(&timers[TIMER_TOTAL]);
for(int chunk=0; chunk<CHUNKS_PER_PE; chunk++) {
free(local_bucket_sizes_chunk[chunk]);
}
free(local_bucket_sizes_chunk);
// Only the last iteration is verified
if(i == NUM_ITERATIONS) {
err = verify_results(my_bucket_keys);
}
// Reset receive_offset used in exchange_keys
receive_offset = 0;
free(my_local_bucketed_keys);
free(my_keys);
free(local_bucket_sizes);
free(local_bucket_offsets);
free(send_offsets);
shmem_barrier_all();
}
for(int i=0; i<CHUNKS_COUNT_LOCAL_KEYS; i++) free(my_local_key_counts[i]);
free(my_local_key_counts);
return err;
}
#if defined(_SHMEM_WORKERS)
void make_input_async(void *args, int chunk) {
KEY_TYPE * restrict const my_keys = *((KEY_TYPE **) args);
const uint64_t keys_per_chunk = NUM_KEYS_PER_PE / CHUNKS_MAKE_INPUT;
const uint64_t start_index = chunk * keys_per_chunk;
const uint64_t max_index = start_index + keys_per_chunk;
pcg32_random_t rng = seed_my_chunk(chunk);
KEY_TYPE * restrict my_keys_1D = &(my_keys[start_index]);
for(uint64_t i=start_index; i<max_index; i++) {
*my_keys_1D = pcg32_boundedrand_r(&rng, MAX_KEY_VAL);
my_keys_1D += 1;
}
}
#endif
/*
* Generates uniformly random keys [0, MAX_KEY_VAL] on each rank using the time and rank
* number as a seed
*/
static KEY_TYPE * make_input(void)
{
timer_start(&timers[TIMER_INPUT]);
KEY_TYPE * restrict const my_keys = malloc(NUM_KEYS_PER_PE * sizeof(KEY_TYPE));
assert(my_keys);
#if defined(_SHMEM_WORKERS)
int lowBound = 0;
int highBound = CHUNKS_MAKE_INPUT;
int stride = 1;
int tile_size = 1;
int loop_dimension = 1;
shmem_task_scope_begin();
shmem_parallel_for_nbi(make_input_async, (void*)(&my_keys), NULL, lowBound, highBound, stride, tile_size, loop_dimension, SHMEM_PARALLEL_FOR_FLAT_MODE);
shmem_task_scope_end();
#else
#if defined(_OPENMP)
int chunk;
#pragma omp parallel for private(chunk) schedule (dynamic,1)
#endif
for(chunk=0; chunk<CHUNKS_MAKE_INPUT; chunk++) {
const uint64_t keys_per_chunk = NUM_KEYS_PER_PE / CHUNKS_MAKE_INPUT;
const uint64_t start_index = chunk * keys_per_chunk;
const uint64_t max_index = start_index + keys_per_chunk;
pcg32_random_t rng = seed_my_chunk(chunk);
KEY_TYPE * restrict my_keys_1D = &(my_keys[start_index]);
for(uint64_t i=start_index; i<max_index; i++) {
*my_keys_1D = pcg32_boundedrand_r(&rng, MAX_KEY_VAL);
my_keys_1D += 1;
}
}
#endif
timer_stop(&timers[TIMER_INPUT]);
#ifdef DEBUG
wait_my_turn();
char msg[1024];
const int my_rank = shmem_my_pe();
sprintf(msg,"Rank %d: Initial Keys: ", my_rank);
for(uint64_t i = 0; i < NUM_KEYS_PER_PE; ++i){
if(i < PRINT_MAX)
sprintf(msg + strlen(msg),"%d ", my_keys[i]);
}
sprintf(msg + strlen(msg),"\n");
printf("%s",msg);
fflush(stdout);
my_turn_complete();
#endif
return my_keys;
}
#if defined(_SHMEM_WORKERS)
void count_local_bucket_sizes_async(void* args, int chunk) {
KEY_TYPE const * restrict const my_keys = (KEY_TYPE *) args;
local_bucket_sizes_chunk[chunk] = malloc(NUM_BUCKETS * sizeof(int));
assert(local_bucket_sizes_chunk[chunk]);
memset(local_bucket_sizes_chunk[chunk], 0x00, NUM_BUCKETS * sizeof(int));
int * restrict const local_bucket_sizes = local_bucket_sizes_chunk[chunk];
const uint32_t keys_per_chunk = NUM_KEYS_PER_PE / CHUNKS_PER_PE;
const uint32_t start_index = chunk * keys_per_chunk;
KEY_TYPE const * restrict const my_keys_1D = &(my_keys[start_index]);
for(uint64_t i = 0; i < keys_per_chunk; ++i){
const uint32_t bucket_index = my_keys_1D[i]/BUCKET_WIDTH;
local_bucket_sizes[bucket_index]++;
}
}
#endif
/*
* Computes the size of each bucket by iterating all keys and incrementing
* their corresponding bucket's size
*/
static inline int * count_local_bucket_sizes(KEY_TYPE const * restrict const my_keys)
{
int * restrict const local_bucket_sizes = malloc(NUM_BUCKETS * sizeof(int));
assert(local_bucket_sizes);
memset(local_bucket_sizes, 0x00, NUM_BUCKETS * sizeof(int));
timer_start(&timers[TIMER_BCOUNT]);
init_array(local_bucket_sizes, NUM_BUCKETS);
if(NUM_BUCKETS == 1) {
local_bucket_sizes[0] = NUM_KEYS_PER_PE;
}
else {
#if defined(_SHMEM_WORKERS)
int lowBound = 0;
int highBound = CHUNKS_PER_PE;
int stride = 1;
int tile_size = 1;
int loop_dimension = 1;
shmem_task_scope_begin();
shmem_parallel_for_nbi(count_local_bucket_sizes_async, (void*)(my_keys), NULL, lowBound, highBound, stride, tile_size, loop_dimension, SHMEM_PARALLEL_FOR_FLAT_MODE);
shmem_task_scope_end();
#else
#if defined(_OPENMP)
int chunk;
#pragma omp parallel for private(chunk) schedule (dynamic,1)
#endif
for(chunk=0; chunk<CHUNKS_PER_PE; chunk++) {
local_bucket_sizes_chunk[chunk] = malloc(NUM_BUCKETS * sizeof(int));
assert(local_bucket_sizes_chunk[chunk]);
memset(local_bucket_sizes_chunk[chunk], 0x00, NUM_BUCKETS * sizeof(int));
int * restrict const local_bucket_sizes = local_bucket_sizes_chunk[chunk];
const uint32_t keys_per_chunk = NUM_KEYS_PER_PE / CHUNKS_PER_PE;
const uint32_t start_index = chunk * keys_per_chunk;
KEY_TYPE const * restrict const my_keys_1D = &(my_keys[start_index]);
for(uint64_t i = 0; i < keys_per_chunk; ++i){
const uint32_t bucket_index = my_keys_1D[i]/BUCKET_WIDTH;
local_bucket_sizes[bucket_index]++;
}
}
#endif
for(int chunk=0; chunk<CHUNKS_PER_PE; chunk++) {
for(int i=0; i<NUM_BUCKETS; i++) {
local_bucket_sizes[i] += local_bucket_sizes_chunk[chunk][i];
}
}
}
timer_stop(&timers[TIMER_BCOUNT]);
#ifdef DEBUG
wait_my_turn();
char msg[1024];
const int my_rank = shmem_my_pe();
sprintf(msg,"Rank %d: local bucket sizes: ", my_rank);
for(uint64_t i = 0; i < NUM_BUCKETS; ++i){
if(i < PRINT_MAX)
sprintf(msg + strlen(msg),"%d ", local_bucket_sizes[i]);
}
sprintf(msg + strlen(msg),"\n");
printf("%s",msg);
fflush(stdout);
my_turn_complete();
#endif
return local_bucket_sizes;
}
/*
* Computes the prefix scan of the bucket sizes to determine the starting locations
* of each bucket in the local bucketed array
* Stores a copy of the bucket offsets for use in exchanging keys because the
* original bucket_offsets array is modified in the bucketize function
*/
static inline int * compute_local_bucket_offsets(int const * restrict const local_bucket_sizes,
int ** restrict send_offsets)
{
int * restrict const local_bucket_offsets = malloc(NUM_BUCKETS * sizeof(int));
assert(local_bucket_offsets);
timer_start(&timers[TIMER_BOFFSET]);
(*send_offsets) = malloc(NUM_BUCKETS * sizeof(int));
assert(*send_offsets);
// NOTE: This is a very small computation and hence we are not parallelizing this
local_bucket_offsets[0] = 0;
(*send_offsets)[0] = 0;
int temp = 0;
for(uint64_t i = 1; i < NUM_BUCKETS; i++){
temp = local_bucket_offsets[i-1] + local_bucket_sizes[i-1];
local_bucket_offsets[i] = temp;
(*send_offsets)[i] = temp;
}
timer_stop(&timers[TIMER_BOFFSET]);
#ifdef DEBUG
wait_my_turn();
char msg[1024];
const int my_rank = shmem_my_pe();
sprintf(msg,"Rank %d: local bucket offsets: ", my_rank);
for(uint64_t i = 0; i < NUM_BUCKETS; ++i){
if(i < PRINT_MAX)
sprintf(msg + strlen(msg),"%d ", local_bucket_offsets[i]);
}
sprintf(msg + strlen(msg),"\n");
printf("%s",msg);
fflush(stdout);
my_turn_complete();
#endif
return local_bucket_offsets;
}
#if defined(_SHMEM_WORKERS)
void bucketize_local_keys_async(void* args, int chunk) {
KEY_TYPE const * restrict const my_keys = (KEY_TYPE*) args;
my_local_bucketed_keys_chunk[chunk] = malloc(NUM_BUCKETS * sizeof(KEY_TYPE*));
assert(my_local_bucketed_keys_chunk[chunk]);
local_bucket_offsets_chunk[chunk] = malloc(NUM_BUCKETS * sizeof(int));
assert(local_bucket_offsets_chunk[chunk]);
memset(local_bucket_offsets_chunk[chunk], 0x00, NUM_BUCKETS * sizeof(int));
for(int bucket=0; bucket<NUM_BUCKETS; bucket++) {
my_local_bucketed_keys_chunk[chunk][bucket] = malloc(sizeof(KEY_TYPE) * local_bucket_sizes_chunk[chunk][bucket]);
assert(my_local_bucketed_keys_chunk[chunk][bucket]);
}
const uint32_t keys_per_chunk = NUM_KEYS_PER_PE / CHUNKS_PER_PE;
const uint32_t start_index = chunk * keys_per_chunk;
KEY_TYPE const * restrict const my_keys_1D = &(my_keys[start_index]);
int * restrict local_bucket_offsets_chunk_1D = local_bucket_offsets_chunk[chunk];
int const * restrict const local_bucket_sizes_chunk_1D = local_bucket_sizes_chunk[chunk];
KEY_TYPE** restrict my_local_bucketed_keys_chunk_2D = my_local_bucketed_keys_chunk[chunk];
for(uint64_t i = 0; i < keys_per_chunk; ++i){
const KEY_TYPE key = my_keys_1D[i];
const uint32_t bucket_index = key / BUCKET_WIDTH;
uint32_t index = local_bucket_offsets_chunk_1D[bucket_index]++;
assert(index < local_bucket_sizes_chunk_1D[bucket_index]);
my_local_bucketed_keys_chunk_2D[bucket_index][index] = key;
}
}
#endif
/*
* Places local keys into their corresponding local bucket.
* The contents of each bucket are not sorted.
*/
static inline KEY_TYPE * bucketize_local_keys(KEY_TYPE const * restrict const my_keys,
int * restrict const local_bucket_offsets)
{
KEY_TYPE * restrict const my_local_bucketed_keys = malloc(NUM_KEYS_PER_PE * sizeof(KEY_TYPE));
assert(my_local_bucketed_keys);
timer_start(&timers[TIMER_BUCKETIZE]);
my_local_bucketed_keys_chunk = malloc(CHUNKS_PER_PE* sizeof(KEY_TYPE**));
assert(my_local_bucketed_keys_chunk);
local_bucket_offsets_chunk = (int**) malloc(CHUNKS_PER_PE* sizeof(int*));
assert(local_bucket_offsets_chunk);
#if defined(_SHMEM_WORKERS)
int lowBound = 0;
int highBound = CHUNKS_PER_PE;
int stride = 1;
int tile_size = 1;
int loop_dimension = 1;
shmem_task_scope_begin();
shmem_parallel_for_nbi(bucketize_local_keys_async, (void*)(my_keys), NULL, lowBound, highBound, stride, tile_size, loop_dimension, SHMEM_PARALLEL_FOR_FLAT_MODE);
shmem_task_scope_end();
#else
#if defined(_OPENMP)
int chunk;
#pragma omp parallel for private(chunk) schedule (dynamic,1)
#endif
for(chunk=0; chunk<CHUNKS_PER_PE; chunk++) {
my_local_bucketed_keys_chunk[chunk] = malloc(NUM_BUCKETS * sizeof(KEY_TYPE*));
assert(my_local_bucketed_keys_chunk[chunk]);
local_bucket_offsets_chunk[chunk] = malloc(NUM_BUCKETS * sizeof(int));
assert(local_bucket_offsets_chunk[chunk]);
memset(local_bucket_offsets_chunk[chunk], 0x00, NUM_BUCKETS * sizeof(int));
for(int bucket=0; bucket<NUM_BUCKETS; bucket++) {
my_local_bucketed_keys_chunk[chunk][bucket] = malloc(sizeof(KEY_TYPE) * local_bucket_sizes_chunk[chunk][bucket]);
assert(my_local_bucketed_keys_chunk[chunk][bucket]);
}
const uint32_t keys_per_chunk = NUM_KEYS_PER_PE / CHUNKS_PER_PE;
const uint32_t start_index = chunk * keys_per_chunk;
KEY_TYPE const * restrict const my_keys_1D = &(my_keys[start_index]);
int * restrict local_bucket_offsets_chunk_1D = local_bucket_offsets_chunk[chunk];
int const * restrict const local_bucket_sizes_chunk_1D = local_bucket_sizes_chunk[chunk];
KEY_TYPE** restrict my_local_bucketed_keys_chunk_2D = my_local_bucketed_keys_chunk[chunk];
for(uint64_t i = 0; i < keys_per_chunk; ++i){
const KEY_TYPE key = my_keys_1D[i];
const uint32_t bucket_index = key / BUCKET_WIDTH;
uint32_t index = local_bucket_offsets_chunk_1D[bucket_index]++;
assert(index < local_bucket_sizes_chunk_1D[bucket_index]);
my_local_bucketed_keys_chunk_2D[bucket_index][index] = key;
}
}
#endif
for(int bucket=0; bucket<NUM_BUCKETS; bucket++) {
uint32_t index = local_bucket_offsets[bucket];
for(int chunk=0; chunk<CHUNKS_PER_PE; chunk++) {
memcpy(&(my_local_bucketed_keys[index]), my_local_bucketed_keys_chunk[chunk][bucket], sizeof(KEY_TYPE) * local_bucket_sizes_chunk[chunk][bucket]);
index += local_bucket_sizes_chunk[chunk][bucket];
}
local_bucket_offsets[bucket] = index;
}
// free the memory
for(int chunk=0; chunk<CHUNKS_PER_PE; chunk++) {
for(int bucket=0; bucket<NUM_BUCKETS; bucket++) {
free(my_local_bucketed_keys_chunk[chunk][bucket]);
}
free(local_bucket_offsets_chunk[chunk]);
free(my_local_bucketed_keys_chunk[chunk]);
}
free(my_local_bucketed_keys_chunk);
free(local_bucket_offsets_chunk);
timer_stop(&timers[TIMER_BUCKETIZE]);
#ifdef DEBUG
wait_my_turn();
char msg[1024];
const int my_rank = shmem_my_pe();
sprintf(msg,"Rank %d: local bucketed keys: ", my_rank);
for(uint64_t i = 0; i < NUM_KEYS_PER_PE; ++i){
if(i < PRINT_MAX)
sprintf(msg + strlen(msg),"%d ", my_local_bucketed_keys[i]);
}
sprintf(msg + strlen(msg),"\n");
printf("%s",msg);
fflush(stdout);
my_turn_complete();
#endif
return my_local_bucketed_keys;
}
#if defined(_SHMEM_WORKERS)
typedef struct exchange_keys_async_t {
KEY_TYPE const * restrict const my_local_bucketed_keys;
const long long int max_bucket_size;
const long long int send_offsets_start;
const long long int write_offset_into_self;
} exchange_keys_async_t;
void exchange_keys_async(void* args, int chunk) {
exchange_keys_async_t* arg = (exchange_keys_async_t*) args;
KEY_TYPE const * restrict const my_local_bucketed_keys = arg->my_local_bucketed_keys;
const long long int max_bucket_size = arg->max_bucket_size;
const long long int send_offsets_start = arg->send_offsets_start;
const long long int write_offset_into_self = arg->write_offset_into_self;
const long long int chunks = max_bucket_size / actual_num_workers;
const long long int write_offset_into_self_worker = write_offset_into_self + (chunk * chunks);
const long long int send_offsets_start_worker = send_offsets_start + (chunk * chunks);
long long int send_size = chunks;
if(chunk+1 == actual_num_workers) {
long long int leftover = max_bucket_size - (chunks * actual_num_workers);
send_size += leftover;
}
memcpy(&my_bucket_keys[write_offset_into_self_worker],&my_local_bucketed_keys[send_offsets_start_worker],
send_size*sizeof(KEY_TYPE));
}
#endif
/*
* Each PE sends the contents of its local buckets to the PE that owns that bucket.
*/
static inline KEY_TYPE * exchange_keys(int const * restrict const send_offsets,
int const * restrict const local_bucket_sizes,
KEY_TYPE const * restrict const my_local_bucketed_keys)
{
timer_start(&timers[TIMER_ATA_KEYS]);
const int my_rank = shmem_my_pe();
unsigned int total_keys_sent = 0;
unsigned long long start_time = current_time_ns();
for(uint64_t i = 0; i < NUM_PES; ++i){
#ifdef PERMUTE
const int target_pe = permute_array[i];
#elif INCAST
const int target_pe = i;
#else
const int target_pe = (my_rank + i) % NUM_PES;
#endif
// Local keys already written with memcpy
if(target_pe == my_rank){ continue; }
const int read_offset_from_self = send_offsets[target_pe];
const int my_send_size = local_bucket_sizes[target_pe];
const long long int write_offset_into_target = shmem_longlong_fadd(&receive_offset, (long long int)my_send_size, target_pe);
if (write_offset_into_target + my_send_size > KEY_BUFFER_SIZE) {
fprintf(stderr, "%llu %llu\n", write_offset_into_target + my_send_size, KEY_BUFFER_SIZE);
exit(1);
}
if (read_offset_from_self + my_send_size > NUM_KEYS_PER_PE) {
fprintf(stderr, "%llu %llu\n", read_offset_from_self + my_send_size, NUM_KEYS_PER_PE);
exit(1);
}
if (shmem_my_pe() == 0) {
fprintf(stderr, "Putting %llu integers to PE %d\n", my_send_size, target_pe);
}
shmem_int_put(&(my_bucket_keys[write_offset_into_target]),
&(my_local_bucketed_keys[read_offset_from_self]),
my_send_size, target_pe);
#ifdef DEBUG
printf("Rank: %d Target: %d Offset into target: %lld Offset into myself: %d Send Size: %d\n",
my_rank, target_pe, write_offset_into_target, read_offset_from_self, my_send_size);
#endif
total_keys_sent += my_send_size;
}
unsigned long long intermediate_time = current_time_ns();
// Keys destined for local key buffer can be written with memcpy
const long long int write_offset_into_self = shmem_longlong_fadd(&receive_offset, (long long int)local_bucket_sizes[my_rank], my_rank);
const long long int send_offsets_start = send_offsets[my_rank];
const long long int chunks = local_bucket_sizes[my_rank] / actual_num_workers;
const long long int max_bucket_size = local_bucket_sizes[my_rank];
#if defined(_SHMEM_WORKERS)
int lowBound = 0;
int highBound = actual_num_workers;
int stride = 1;
int tile_size = 1;
int loop_dimension = 1;
exchange_keys_async_t args = {my_local_bucketed_keys, max_bucket_size, send_offsets_start, write_offset_into_self};
shmem_task_scope_begin();
shmem_parallel_for_nbi(exchange_keys_async, (void*)(&args), NULL, lowBound, highBound, stride, tile_size, loop_dimension, SHMEM_PARALLEL_FOR_FLAT_MODE);
shmem_task_scope_end();
#else
#if defined(_OPENMP)
int chunk;
#pragma omp parallel for private(chunk) schedule (dynamic,1)
#endif
for(chunk=0; chunk<actual_num_workers; chunk++) {
const long long int write_offset_into_self_worker = write_offset_into_self + (chunk * chunks);
const long long int send_offsets_start_worker = send_offsets_start + (chunk * chunks);
long long int send_size = chunks;
if(chunk+1 == actual_num_workers) {
long long int leftover = max_bucket_size - (chunks * actual_num_workers);
send_size += leftover;
}
memcpy(&my_bucket_keys[write_offset_into_self_worker],&my_local_bucketed_keys[send_offsets_start_worker],
send_size*sizeof(KEY_TYPE));
}
#endif
unsigned long long end_time = current_time_ns();
if (shmem_my_pe() == 0) {
fprintf(stderr, "Time slices = %llu %llu\n", intermediate_time - start_time, end_time - intermediate_time);
}
#ifdef BARRIER_ATA
SHMEM_BARRIER_AT_EXCHANGE;
#endif
timer_stop(&timers[TIMER_ATA_KEYS]);
timer_count(&timers[TIMER_ATA_KEYS], total_keys_sent);
#ifdef DEBUG
wait_my_turn();
char msg[1024];
sprintf(msg,"Rank %d: Bucket Size %lld | Total Keys Sent: %u | Keys after exchange:",
my_rank, receive_offset, total_keys_sent);
for(long long int i = 0; i < receive_offset; ++i){
if(i < PRINT_MAX)
sprintf(msg + strlen(msg),"%d ", my_bucket_keys[i]);
}
sprintf(msg + strlen(msg),"\n");
printf("%s",msg);
fflush(stdout);
my_turn_complete();
#endif
return my_bucket_keys;
}
#if defined(_SHMEM_WORKERS)
typedef struct count_local_keys_async_t {
int max_chunks;
int my_min_key;
} count_local_keys_async_t;
void count_local_keys_async(void* args, int chunk) {
count_local_keys_async_t* arg = (count_local_keys_async_t*) args;
const int max_chunks = arg->max_chunks;
const int my_min_key = arg->my_min_key;
const int start_index = chunk * max_chunks;
int * restrict my_local_key_counts_1D = my_local_key_counts[chunk];
int const * restrict const my_bucket_keys_1D = &(my_bucket_keys[start_index]);
for(int i=0; i<max_chunks; i++) {
const unsigned int key_index = my_bucket_keys_1D[i] - my_min_key;
assert(my_bucket_keys_1D[i] >= my_min_key);
assert(key_index < BUCKET_WIDTH);
my_local_key_counts_1D[key_index]++;
}
}
#endif
/*
* Counts the occurence of each key in my bucket.
* Key indices into the count array are the key's value minus my bucket's
* minimum key value to allow indexing from 0.
* my_bucket_keys: All keys in my bucket unsorted [my_rank * BUCKET_WIDTH, (my_rank+1)*BUCKET_WIDTH)
*/
static inline int* count_local_keys(KEY_TYPE const * restrict const my_bucket_keys)
{
timer_start(&timers[TIMER_SORT]);
const int my_rank = shmem_my_pe();
const int my_min_key = my_rank * BUCKET_WIDTH;
const int max_chunks = (int) my_bucket_size / actual_num_workers;
#if defined(_SHMEM_WORKERS)
int lowBound = 0;
int highBound = CHUNKS_COUNT_LOCAL_KEYS;
int stride = 1;
int tile_size = 1;
int loop_dimension = 1;
count_local_keys_async_t args = {max_chunks, my_min_key};
shmem_task_scope_begin();
shmem_parallel_for_nbi(count_local_keys_async, (void*)(&args), NULL, lowBound, highBound, stride, tile_size, loop_dimension, SHMEM_PARALLEL_FOR_FLAT_MODE);
shmem_task_scope_end();
#else
#if defined(_OPENMP)
int chunk;
#pragma omp parallel for private(chunk) schedule (static,1)
#endif
for(chunk=0; chunk<CHUNKS_COUNT_LOCAL_KEYS; chunk++) {
const int start_index = chunk * max_chunks;
int * restrict my_local_key_counts_1D = my_local_key_counts[chunk];
int const * restrict const my_bucket_keys_1D = &(my_bucket_keys[start_index]);
for(int i=0; i<max_chunks; i++) {
const unsigned int key_index = my_bucket_keys_1D[i] - my_min_key;
assert(my_bucket_keys_1D[i] >= my_min_key);
assert(key_index < BUCKET_WIDTH);
my_local_key_counts_1D[key_index]++;
}
}
#endif
//sequential part here
const int leftover = my_bucket_size - (max_chunks * CHUNKS_COUNT_LOCAL_KEYS);
if(leftover) {
const int chunk = CHUNKS_COUNT_LOCAL_KEYS - 1;
for(int i=(my_bucket_size-leftover); i<my_bucket_size; i++) {
const unsigned int key_index = my_bucket_keys[i] - my_min_key;
assert(my_bucket_keys[i] >= my_min_key);
assert(key_index < BUCKET_WIDTH);
my_local_key_counts[chunk][key_index]++;
}
}
timer_stop(&timers[TIMER_SORT]);
#ifdef DEBUG
wait_my_turn();
char msg[4096];
sprintf(msg,"Rank %d: Bucket Size %lld | Local Key Counts:", my_rank, my_bucket_size);
for(int chunk=0; chunk<actual_num_workers; chunk++) {
for(uint64_t i = 0; i < BUCKET_WIDTH; ++i){
if(i < PRINT_MAX)
sprintf(msg + strlen(msg),"%d ", my_local_key_counts[chunk][i]);
}
}
sprintf(msg + strlen(msg),"\n");
printf("%s",msg);
fflush(stdout);
my_turn_complete();
#endif
return NULL;
}
typedef struct verify_results_async_t {
int max_chunks;
int my_min_key;
int my_max_key;
} verify_results_async_t;
void verify_results_async(void* args, int chunk) {
verify_results_async_t* arg = (verify_results_async_t*) args;
const int max_chunks = arg->max_chunks;
const int my_min_key = arg->my_min_key;
const int my_max_key = arg->my_max_key;
const int start_index = chunk * max_chunks;
const int max_index = start_index + max_chunks;
for(int i=start_index; i<max_index; i++) {
const int key = my_bucket_keys[i];
if((key < my_min_key) || (key > my_max_key)){
printf("Rank %d Failed Verification!\n",shmem_my_pe());
printf("Key: %d is outside of bounds [%d, %d]\n", key, my_min_key, my_max_key);
}
}
}
/*
* Verifies the correctness of the sort.
* Ensures all keys are within a PE's bucket boundaries.
* Ensures the final number of keys is equal to the initial.
*/
static int verify_results(KEY_TYPE const * restrict const my_local_keys)
{
shmem_barrier_all();
int error = 0;
const int my_rank = shmem_my_pe();
const int my_min_key = my_rank * BUCKET_WIDTH;
const int my_max_key = (my_rank+1) * BUCKET_WIDTH - 1;
const int max_chunks = (int) my_bucket_size / actual_num_workers;
#if defined(_SHMEM_WORKERS)
int lowBound = 0;
int highBound = actual_num_workers;
int stride = 1;
int tile_size = 1;
int loop_dimension = 1;
verify_results_async_t args = {max_chunks, my_min_key, my_max_key};
shmem_task_scope_begin();
shmem_parallel_for_nbi(verify_results_async, (void*)(&args), NULL, lowBound, highBound, stride, tile_size, loop_dimension, SHMEM_PARALLEL_FOR_FLAT_MODE);
shmem_task_scope_end();
#else
#if defined(_OPENMP)
int chunk;
#pragma omp parallel for private(chunk) schedule (static,1)
#endif
// Verify all keys are within bucket boundaries
for(chunk=0; chunk<actual_num_workers; chunk++) {
const int start_index = chunk * max_chunks;
const int max_index = start_index + max_chunks;
for(int i=start_index; i<max_index; i++) {
const int key = my_bucket_keys[i];
if((key < my_min_key) || (key > my_max_key)){
printf("Rank %d Failed Verification!\n",shmem_my_pe());
printf("Key: %d is outside of bounds [%d, %d]\n", key, my_min_key, my_max_key);
}
}
}
#endif
//sequential part here
const int leftover = my_bucket_size - (max_chunks * actual_num_workers);
if(leftover) {
for(int i=(my_bucket_size-leftover); i<my_bucket_size; i++) {
const int key = my_local_keys[i];
if((key < my_min_key) || (key > my_max_key)){
printf("Rank %d Failed Verification!\n",my_rank);
printf("Key: %d is outside of bounds [%d, %d]\n", key, my_min_key, my_max_key);
error = 1;
}
}
}
// Verify the sum of the key population equals the expected bucket size
long long int bucket_size_test = 0;
for(int chunk=0; chunk<CHUNKS_COUNT_LOCAL_KEYS; chunk++) {
for(uint64_t i = 0; i < BUCKET_WIDTH; ++i){
bucket_size_test += my_local_key_counts[chunk][i];
}
}
if(bucket_size_test != my_bucket_size){
printf("Rank %d Failed Verification!\n",my_rank);
printf("Actual Bucket Size: %lld Should be %lld\n", bucket_size_test, my_bucket_size);
error = 1;
}
// Verify the final number of keys equals the initial number of keys
static long long int total_num_keys = 0;
shmem_longlong_sum_to_all(&total_num_keys, &my_bucket_size, 1, 0, 0, NUM_PES, llWrk, pSync);
shmem_barrier_all();
if(total_num_keys != (long long int)(NUM_KEYS_PER_PE * NUM_PES)){
if(my_rank == ROOT_PE){
printf("Verification Failed!\n");
printf("Actual total number of keys: %lld Expected %" PRIu64 "\n", total_num_keys, NUM_KEYS_PER_PE * NUM_PES );
error = 1;
}
}
return error;
}
/*
* Gathers all the timing information from each PE and prints
* it to a file. All information from a PE is printed as a row in a tab seperated file
*/
static void log_times(char * log_file)
{
FILE * fp = NULL;
for(uint64_t i = 0; i < TIMER_NTIMERS; ++i){
timers[i].all_times = gather_rank_times(&timers[i]);
timers[i].all_counts = gather_rank_counts(&timers[i]);
}
if(shmem_my_pe() == ROOT_PE)
{
int print_names = 0;
if(file_exists(log_file) != 1){
print_names = 1;
}
if((fp = fopen(log_file, "a+b"))==NULL){
perror("Error opening log file:");
exit(1);
}
if(print_names == 1){
print_run_info(fp);
print_timer_names(fp);
}
print_timer_values(fp);
report_summary_stats();
fclose(fp);
}
}
/*
* Computes the average total time and average all2all time and prints it to the command line
*/
static void report_summary_stats(void)
{
if(timers[TIMER_TOTAL].seconds_iter > 0) {
const uint32_t num_records = NUM_PES * timers[TIMER_TOTAL].seconds_iter;
double temp = 0.0;
for(uint64_t i = 0; i < num_records; ++i){
temp += timers[TIMER_TOTAL].all_times[i];
}
#ifdef EXTRA_STATS
avg_time = temp/num_records;
#endif
printf("Average total time (per PE): %f seconds\n", temp/num_records);
}
if(timers[TIMER_ATA_KEYS].seconds_iter >0) {
const uint32_t num_records = NUM_PES * timers[TIMER_ATA_KEYS].seconds_iter;
double temp = 0.0;
for(uint64_t i = 0; i < num_records; ++i){
temp += timers[TIMER_ATA_KEYS].all_times[i];
}
#ifdef EXTRA_STATS
avg_time_all2all = temp/num_records;
#endif
printf("Average all2all time (per PE): %f seconds\n", temp/num_records);
}
}
/*
* Prints all the labels for each timer as a row to the file specified by 'fp'
*/
static void print_timer_names(FILE * fp)
{
for(uint64_t i = 0; i < TIMER_NTIMERS; ++i){
if(timers[i].seconds_iter > 0){
fprintf(fp, "%s (sec)\t", timer_names[i]);
}
if(timers[i].count_iter > 0){
fprintf(fp, "%s_COUNTS\t", timer_names[i]);
}
}
fprintf(fp,"\n");
}
/*
* Prints all the relevant runtime parameters as a row to the file specified by 'fp'
*/
static void print_run_info(FILE * fp)
{
fprintf(fp,"SHMEM\t");
fprintf(fp,"NUM_PES %" PRIu64 "\t", NUM_PES);
fprintf(fp,"Max_Key %" PRIu64 "\t", MAX_KEY_VAL);
fprintf(fp,"Num_Iters %u\t", NUM_ITERATIONS);
switch(SCALING_OPTION){
case STRONG: {
fprintf(fp,"Strong Scaling: %" PRIu64 " total keys\t", NUM_KEYS_PER_PE * NUM_PES);
break;
}
case WEAK: {
fprintf(fp,"Weak Scaling: %" PRIu64 " keys per PE\t", NUM_KEYS_PER_PE);
break;
}
case WEAK_ISOBUCKET: {
fprintf(fp,"Weak Scaling Constant Bucket Width: %" PRIu64 "u keys per PE \t", NUM_KEYS_PER_PE);
fprintf(fp,"Constant Bucket Width: %" PRIu64 "\t", BUCKET_WIDTH);
break;
}
default:
{
fprintf(fp,"Invalid Scaling Option!\t");
break;
}
}
#ifdef PERMUTE
fprintf(fp,"Randomized All2All\t");
#elif INCAST
fprintf(fp,"Incast All2All\t");
#else
fprintf(fp,"Round Robin All2All\t");
#endif
fprintf(fp,"\n");
}
/*
* Prints all of the timining information for an individual PE as a row
* to the file specificed by 'fp'.
*/
static void print_timer_values(FILE * fp)
{
unsigned int num_records = NUM_PES * NUM_ITERATIONS;
for(uint64_t i = 0; i < num_records; ++i) {
for(int t = 0; t < TIMER_NTIMERS; ++t){
if(timers[t].all_times != NULL){
fprintf(fp,"%f\t", timers[t].all_times[i]);
}
if(timers[t].all_counts != NULL){
fprintf(fp,"%u\t", timers[t].all_counts[i]);
}
}
fprintf(fp,"\n");
}
}
/*
* Aggregates the per PE timing information
*/
static double * gather_rank_times(_timer_t * const timer)
{
if(timer->seconds_iter > 0) {
assert(timer->seconds_iter == timer->num_iters);
const unsigned int num_records = NUM_PES * timer->seconds_iter;
#ifdef OPENSHMEM_COMPLIANT
double * my_times = shmem_malloc(timer->seconds_iter * sizeof(double));
assert(my_times);
if (shmem_my_pe() == 0) fprintf(stderr, "mallocing %llu bytes\n", timer->seconds_iter * sizeof(double));
#else
double * my_times = shmalloc(timer->seconds_iter * sizeof(double));
#endif
memcpy(my_times, timer->seconds, timer->seconds_iter * sizeof(double));
#ifdef OPENSHMEM_COMPLIANT
double * all_times = shmem_malloc( num_records * sizeof(double));
assert(all_times);
if (shmem_my_pe() == 0) fprintf(stderr, "mallocing %llu bytes\n", num_records * sizeof(double));
#else
double * all_times = shmalloc( num_records * sizeof(double));
#endif
shmem_barrier_all();
shmem_fcollect64(all_times, my_times, timer->seconds_iter, 0, 0, NUM_PES, pSync);
shmem_barrier_all();
#ifdef OPENSHMEM_COMPLIANT
shmem_free(my_times);
#else
shfree(my_times);
#endif
return all_times;
}
else{
return NULL;
}
}
/*
* Aggregates the per PE timing 'count' information
*/
static unsigned int * gather_rank_counts(_timer_t * const timer)
{
if(timer->count_iter > 0){
const unsigned int num_records = NUM_PES * timer->num_iters;
#ifdef OPENSHMEM_COMPLIANT
unsigned int * my_counts = shmem_malloc(timer->num_iters * sizeof(unsigned int));
assert(my_counts);
if (shmem_my_pe() == 0) fprintf(stderr, "mallocing %llu bytes\n", timer->num_iters * sizeof(unsigned int));
#else
unsigned int * my_counts = shmalloc(timer->num_iters * sizeof(unsigned int));
#endif
memcpy(my_counts, timer->count, timer->num_iters*sizeof(unsigned int));
#ifdef OPENSHMEM_COMPLIANT
unsigned int * all_counts = shmem_malloc( num_records * sizeof(unsigned int) );
assert(all_counts);
if (shmem_my_pe() == 0) fprintf(stderr, "mallocing %llu bytes\n", num_records * sizeof(unsigned int));
#else
unsigned int * all_counts = shmalloc( num_records * sizeof(unsigned int) );
#endif
shmem_barrier_all();
shmem_collect32(all_counts, my_counts, timer->num_iters, 0, 0, NUM_PES, pSync);
shmem_barrier_all();
#ifdef OPENSHMEM_COMPLIANT
shmem_free(my_counts);
#else
shfree(my_counts);
#endif
return all_counts;
}
else{
return NULL;
}
}
/*
* Seeds each rank based on the worker number, rank and time
*/
static inline pcg32_random_t seed_my_chunk(int chunk)
{
const unsigned int my_rank = shmem_my_pe();
const unsigned int my_virtual_rank = GET_VIRTUAL_RANK(my_rank, chunk);
pcg32_random_t rng;
pcg32_srandom_r(&rng, (uint64_t) my_virtual_rank, (uint64_t) my_virtual_rank );
return rng;
}
/*
* Seeds each rank based on the rank number and time
*/
static inline pcg32_random_t seed_my_rank(void)
{
const unsigned int my_rank = shmem_my_pe();
pcg32_random_t rng;
pcg32_srandom_r(&rng, (uint64_t) my_rank, (uint64_t) my_rank );
return rng;
}
/*
* Initializes the work array required for SHMEM collective functions
*/
static void init_shmem_sync_array(long * restrict const pSync)
{
for(uint64_t i = 0; i < _SHMEM_REDUCE_SYNC_SIZE; ++i){
pSync[i] = _SHMEM_SYNC_VALUE;
}
shmem_barrier_all();
}
/*
* Tests whether or not a file exists.
* Returns 1 if file exists
* Returns 0 if file does not exist
*/
static int file_exists(char * filename)
{
struct stat buffer;
if(stat(filename,&buffer) == 0){
return 1;
}
else {
return 0;
}
}
#ifdef DEBUG
static void wait_my_turn()
{
shmem_barrier_all();
whose_turn = 0;
shmem_barrier_all();
const int my_rank = shmem_my_pe();
shmem_int_wait_until((int*)&whose_turn, SHMEM_CMP_EQ, my_rank);
sleep(1);
}
static void my_turn_complete()
{
const int my_rank = shmem_my_pe();
const int next_rank = my_rank+1;
if(my_rank < (NUM_PES-1)){ // Last rank updates no one
shmem_int_put((int *) &whose_turn, &next_rank, 1, next_rank);
}
shmem_barrier_all();
}
#endif
#ifdef PERMUTE
/*
* Creates a randomly ordered array of PEs used in the exchange_keys function
*/
static void create_permutation_array()
{
permute_array = (int *) malloc( NUM_PES * sizeof(int) );
assert(permute_array);
for(uint64_t i = 0; i < NUM_PES; ++i){
permute_array[i] = i;
}
shuffle(permute_array, NUM_PES, sizeof(int));
}
/*
* Randomly shuffles a generic array
*/
static void shuffle(void * array, size_t n, size_t size)
{
char tmp[size];
char * arr = array;
size_t stride = size * sizeof(char);
if(n > 1){
for(size_t i = 0; i < (n - 1); ++i){
size_t rnd = (size_t) rand();
size_t j = i + rnd/(RAND_MAX/(n - i) + 1);
memcpy(tmp, arr + j*stride, size);
memcpy(arr + j*stride, arr + i*stride, size);
memcpy(arr + i*stride, tmp, size);
}
}
}
#endif
|
GB_unop__identity_uint16_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint16_uint64)
// op(A') function: GB (_unop_tran__identity_uint16_uint64)
// C type: uint16_t
// A type: uint64_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = (uint16_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint16_uint64)
(
uint16_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint16_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pi_task.c | /*
This program will numerically compute the integral of
4/(1+x*x)
from 0 to 1. The value of this integral is pi -- which
is great since it gives us an easy way to check the answer.
This version of the program uses a divide and concquer algorithm
with tasks and taskwait.
History: Written by Tim Mattson, 10/2013
*/
#include <omp.h>
#include <stdio.h>
static long num_steps = 1024*1024*1024;
#define MIN_BLK 1024*1024*256
#define MAX 4
double pi_comp(int Nstart,int Nfinish,double step)
{ int i,iblk;
double x, sum = 0.0,sum1, sum2;
if (Nfinish-Nstart < MIN_BLK){
for (i=Nstart;i< Nfinish; i++){
x = (i+0.5)*step;
sum = sum + 4.0/(1.0+x*x);
}
}
else{
iblk = Nfinish-Nstart;
#pragma omp task shared(sum1)
sum1 = pi_comp(Nstart, Nfinish-iblk/2,step);
#pragma omp task shared(sum2)
sum2 = pi_comp(Nfinish-iblk/2, Nfinish, step);
#pragma omp taskwait
sum = sum1 + sum2;
}return sum;
}
int main ()
{
int i,j;
double step, pi, sum;
double init_time, final_time;
step = 1.0/(double) num_steps;
for (j=1; j<=MAX; j++){
omp_set_num_threads(j);
init_time = omp_get_wtime();
#pragma omp parallel
{
#pragma omp single
{
printf("num threads=%d",omp_get_num_threads());
sum = pi_comp(0,num_steps,step);
}
}
pi = step * sum;
final_time = omp_get_wtime() - init_time;
printf(" for %ld steps pi = %f in %f secs\n",num_steps,pi,final_time);
}
}
|
GxB_IndexUnaryOp_ztype_name.c | //------------------------------------------------------------------------------
// GxB_IndexUnaryOp_ztype_name: return the type_name of z for z=f(x,thunk)
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
GrB_Info GxB_IndexUnaryOp_ztype_name // return the name of the type of z
(
char *type_name, // name of the type (char array of size at least
// GxB_MAX_NAME_LEN, owned by the user application).
const GrB_IndexUnaryOp op
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GxB_IndexUnaryOp_ztype_name (type_name, op)") ;
GB_RETURN_IF_NULL (type_name) ;
GB_RETURN_IF_NULL_OR_FAULTY (op) ;
ASSERT_INDEXUNARYOP_OK (op, "op for ztype_name", GB0) ;
//--------------------------------------------------------------------------
// get the type_name
//--------------------------------------------------------------------------
memcpy (type_name, op->ztype->name, GxB_MAX_NAME_LEN) ;
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
jac_solv_par_for.c | /*
** PROGRAM: jacobi Solver ... parallel region containing for constructs
**
** PURPOSE: This program will explore use of a jacobi iterative
** method to solve a system of linear equations (Ax= b).
**
** Here is the basic idea behind the method. Rewrite
** the matrix A as a Lower Triangular (L), upper triangular
** (U) and diagonal matrix (D)
**
** Ax = (L + D + U)x = b
**
** Carry out the multiplication and rearrange:
**
** Dx = b - (L+U)x --> x = (b-(L+U)x)/D
**
** We can do this iteratively
**
** x_new = (b-(L+U)x_old)/D
**
** USAGE: Run wtihout arguments to use default SIZE.
**
** ./jac_solv
**
** Run with a single argument for the order of the A
** matrix ... for example
**
** ./jac_solv 2500
**
** HISTORY: Written by Tim Mattson, Oct 2015
*/
#include<omp.h>
#include<math.h>
#include "mm_utils.h" //a library of basic matrix utilities functions
//and some key constants used in this program
//(such as TYPE)
#define TOLERANCE 0.001
#define DEF_SIZE 1000
#define MAX_ITERS 5000
#define LARGE 1000000.0
//#define DEBUG 1 // output a small subset of intermediate values
//#define VERBOSE 1
int main(int argc, char **argv)
{
int Ndim; // A[Ndim][Ndim]
int i,j, iters;
double start_time, elapsed_time;
TYPE conv, tmp, err, chksum;
TYPE *A, *b, *x1, *x2, *xnew, *xold, *xtmp;
// set matrix dimensions and allocate memory for matrices
if(argc ==2){
Ndim = atoi(argv[1]);
}
else{
Ndim = DEF_SIZE;
}
printf(" jacobi solver parallel (parallel + for version): ndim = %d\n",Ndim);
A = (TYPE *) malloc(Ndim*Ndim*sizeof(TYPE));
b = (TYPE *) malloc(Ndim*sizeof(TYPE));
x1 = (TYPE *) malloc(Ndim*sizeof(TYPE));
x2 = (TYPE *) malloc(Ndim*sizeof(TYPE));
if (!A || !b || !x1 || !x2)
{
printf("\n memory allocation error\n");
exit(-1);
}
// generate our diagonally dominant matrix, A
init_diag_dom_near_identity_matrix(Ndim, A);
#ifdef VERBOSE
mm_print(Ndim, Ndim, A);
#endif
//
// Initialize x and just give b some non-zero random values
//
for(i=0; i<Ndim; i++){
x1[i] = (TYPE)0.0;
x2[i] = (TYPE)0.0;
b[i] = (TYPE)(rand()%51)/100.0;
}
start_time = omp_get_wtime();
//
// jacobi iterative solver
//
conv = LARGE;
iters = 0;
xnew = x1;
xold = x2;
#pragma omp parallel default(none) private(tmp) \
shared (Ndim, conv, iters, b, A, xnew, xold, xtmp)
{
// note: i am comparing against the convergence sqaured. This saves a
// sqrt and an extra barrier.
while((conv > TOLERANCE*TOLERANCE) && (iters<MAX_ITERS))
{
#pragma omp single
{
xtmp = xnew; // don't copy arrays.
xnew = xold; // just swap pointers.
xold = xtmp;
}
#ifdef DEBUG
printf("thread %d, iters=%d conv=%f\n",
omp_get_thread_num(),iters,(float)conv);
#endif
#pragma omp for private(i,j) nowait
for (i=0; i<Ndim; i++){
xnew[i] = (TYPE) 0.0;
for (j=0; j<Ndim;j++){
// if(i!=j)
// xnew[i]+= A[i*Ndim + j]*xold[j];
xnew[i]+= A[i*Ndim + j]*xold[j] * (i != j);
}
xnew[i] = (b[i]-xnew[i])/A[i*Ndim+i];
}
#pragma omp single
{
iters++;
conv = 0.0;
}
//
// test convergence
//
#pragma omp for private(tmp) reduction(+:conv)
for (i=0; i<Ndim; i++){
tmp = xnew[i]-xold[i];
conv += tmp*tmp;
}
#ifdef DEBUG
printf(" conv = %f \n",(float)conv);
#endif
}
}
conv = sqrt((double)conv);
elapsed_time = omp_get_wtime() - start_time;
printf(" Convergence = %g with %d iterations and %f seconds\n",
(float)conv, iters, (float)elapsed_time);
//
// test answer by multiplying my computed value of x by
// the input A matrix and comparing the result with the
// input b vector.
//
err = (TYPE) 0.0;
chksum = (TYPE) 0.0;
for(i=0;i<Ndim;i++){
xold[i] = (TYPE) 0.0;
for(j=0; j<Ndim; j++)
xold[i] += A[i*Ndim+j]*xnew[j];
tmp = xold[i] - b[i];
#ifdef DEBUG
printf(" i=%d, diff = %f, computed b = %f, input b= %f \n",
i, (float)tmp, (float)xold[i], (float)b[i]);
#endif
chksum += xnew[i];
err += tmp*tmp;
}
err = sqrt((double)err);
printf("jacobi solver: err = %f, solution checksum = %f \n",
(float)sqrt(err), (float)chksum);
free(A);
free(b);
free(x1);
free(x2);
}
|
HOG2.h | #ifndef HOG2_H
#define HOG2_H
#include "../../Assertions.h"
#include "../ImageChannel.h"
#include "../ImagePoint.h"
#include "../Derivative.h"
#include "../../math/statistics/Histogram.h"
#include "../Kernel.h"
#include "../KernelFactory.h"
#include "../../geo/Size2.h"
#include "../../geo/Point2.h"
namespace K {
/**
* calculate the histogram-of-gradients at a given
* location using a provided region (size)
*
* // http://stackoverflow.com/questions/32417531/hog-what-is-done-in-the-contrast-normalization-step
* // http://www.geocities.ws/talh_davidc/#cst_extract
*
* - no smoothing beforehand!
* - [0:180] degree region!
* - a 270 degree gradient is the same as a 90 degree gradient -> modulo
*
*/
class HOG2 {
private:
struct HOGGradient {
float magnitude;
float direction; // in radians [0:2pi] 0 = left, pi/2 = up
};
// get half the given value. rounded down!
static inline int half(const int i) {return i/2;}
public:
enum Pattern {
RECTANGULAR,
CIRCULAR,
};
struct CellPoint : public ImagePoint {
float impact; // from gaussian, to downweight edge-pixels
CellPoint(const int x, const int y, const float impact) : ImagePoint(x,y), impact(impact) {;}
};
struct Contribution {
int bin;
float weight;
Contribution() : bin(0), weight(0) {;}
Contribution(int bin, float weight) : bin(bin), weight(weight) {;}
};
struct Contributions {
Contribution c1;
Contribution c2;
Contributions(const Contribution c1, const Contribution c2) : c1(c1), c2(c2) {;}
};
/** helper class to describe the feature-area based on the HOG settings */
struct Area {
// upper left coordinate for the area-of-interest
int sx;
int sy;
// first [upper left] block's center
int cx;
int cy;
// number of x and y blocks within the window
int wx;
int wy;
const Size2i blockStride;
Area(const Point2i pos, const Size2i blockSize, const Size2i winSize, const Size2i blockStride) : blockStride(blockStride) {
// upper left coordinate for the area-of-interest
sx = pos.x - half(winSize.w);
sy = pos.y - half(winSize.h);
// first [upper left] block's center
cx = sx + half(blockSize.w);
cy = sy + half(blockSize.h);
// number of x and y blocks within the window
wx = ((winSize.w - blockSize.w) / blockStride.w) + 1;
wy = ((winSize.h - blockSize.h) / blockStride.h) + 1;
}
/** get the center for the nx-th/ny-th block */
Point2i getBlockCenter(const int nx, const int ny) const {
return Point2i(
cx + nx*blockStride.w,
cy + ny*blockStride.h
);
}
};
struct Vector : public std::vector<float> {
Vector() {
;
}
Vector(const size_t size) {
this->reserve(size);
}
/** ensure the vector has a length of 1 */
void normalize() {
float length = 0;
for (float f : *this) {length += f*f;}
length += 0.2f; // this constant serves two purposes: prevent length = 0, and prevent near-0 vectors from getting too long
length = std::sqrt(length);
for (float& f : *this) {f /= length;}
}
float length() const {
float length = 0;
for (float f : *this) {length += f*f;}
return std::sqrt(length);
}
float distance(const Vector& o) const {
float sum = 0;
for (size_t i = 0; i < size(); ++i) {
const float d = (*this)[i] - o[i];
sum += d*d;
}
return std::sqrt(sum);
}
};
private:
/** the size for each cell [cell is the smallest element] */
const Size2i cellSize;
/** the number of bins to use within each cell */
const int bins;
/** number of degrees per bin */
const float degPerBin;
/** currently we pre-calculate everything at pixel-level [highest accuracy] */
const int stride = 1;
/** the size for each block [containing several cells]. must be a multiple of the cellSize */
const Size2i blockSize;
/** number of float-values per cell */
const int valuesPerCell;
/** number of float-values per block */
const int valuesPerBlock;
/** sigma to (slightly) downweight edge pixels */
const float sigma = 5.0f;
/** downweight each block's edge pixels [more importance to the center] */
K::Kernel gauss;
/** histogram for each cell */
DataMatrix<Vector> cells;
/** histogram for each block [multiple cells] */
DataMatrix<Vector> blocks;
public:
/** ctor */
HOG2(const ImageChannel& img, const Size2i cellSize = Size2i(8,8), const int bins = 9, const Size2i blockSize = Size2i(16,16)) :
cellSize(cellSize), bins(bins), degPerBin(180.0f / (float)bins), blockSize(blockSize),
valuesPerCell(bins),
valuesPerBlock(valuesPerCell*(blockSize.w/cellSize.w)*(blockSize.h/cellSize.h)) {
if (blockSize.w != blockSize.h) {throw Exception("currently, only square blocks are supported");}
// perform some sanity checks
if (blockSize.w % cellSize.w != 0) {throw Exception("blockSize must be a multiple of cellSize");}
if (blockSize.h % cellSize.h != 0) {throw Exception("blockSize must be a multiple of cellSize");}
// TODO
gauss = K::KernelFactory::gauss2D(0.5, cellSize.w);
// TODO: searching stride? (currently 1px, but requires many [unnecessary] calculations)
precalc(img);
}
/** get the histogram for the cell around [=centered at] (x,y) */
const Vector& getCell(const int x, const int y) const {
//if (x % stride != 0) {throw Exception("x-coordinate must be a multiple of the stride-size");}
//if (y % stride != 0) {throw Exception("y-coordinate must be a multiple of the stride-size");}
if ((x < cellSize.w / 2) || (y < cellSize.h / 2)) {throw Exception("block position out of bounds");}
return cells.getConstRef(x/stride, y/stride);
}
/** get the historgram for the block around [=centered at] (x,y) */
const Vector& getBlock(const int x, const int y) const {
//if (x % stride != 0) {throw Exception("x-coordinate must be a multiple of the stride-size");}
//if (y % stride != 0) {throw Exception("y-coordinate must be a multiple of the stride-size");}
if ((x < blockSize.w / 2) || (y < blockSize.h / 2)) {throw Exception("window position out of bounds");}
return blocks.getConstRef(x/stride, y/stride);
}
/** get a feature-vector for the given location (x,y) = center and size(w,h) */
Vector getFeature(const Point2i pos, const Size2i winSize, const Size2i blockStride = Size2i(8,8)) const {
//const int x = pos.x;
//const int y = pos.y;
const int w = winSize.w;
const int h = winSize.h;
// sanity checks
//if (x % stride != 0) {throw Exception("x-coordinate must be a multiple of the stride-size");}
//if (y % stride != 0) {throw Exception("y-coordinate must be a multiple of the stride-size");}
if (w % cellSize.w != 0) {throw Exception("window-width must be a multiple of the cell-width");}
if (h % cellSize.h != 0) {throw Exception("window-height must be a multiple of the cell-height");}
if ((winSize.w - blockSize.w) % blockStride.w != 0) {throw Exception("err");}
if ((winSize.h - blockSize.h) % blockStride.h != 0) {throw Exception("err");}
//if (windowSize != 2*blockSize) {throw Exception("not yet supported!");}
const Area a = Area(pos, blockSize, winSize, blockStride);
const size_t reserve = a.wx*a.wy*valuesPerBlock;
//Vector feature(reserve);
Vector feature;
feature.resize(reserve);
float* data = feature.data();
for (int y = 0; y < a.wy; ++y) {
for (int x = 0; x < a.wx; ++x) {
const Point2i pt = a.getBlockCenter(x, y);
const Vector& block = getBlock(pt.x, pt.y);
_assertNot0(block.size(), "invalid number of values in window detected");
//feature.insert(feature.end(), block.begin(), block.end());
memcpy(data, block.data(), block.size()*sizeof(float));
data += block.size();
}
}
_assertEqual(reserve, feature.size(), "invalid feature size");
return feature;
}
public:
// FOR TESTING
/** convert from degress to bin number [float!] */
inline float degToBin(const float deg) const {
// sanity check
if (deg < 0) {throw Exception("degrees out of bounds");}
if (deg > 360) {throw Exception("degrees out of bounds");}
return deg / degPerBin;
}
/** convert orientation + magnitude to a bin-contribution */
Contributions getContribution(const float deg, const float mag) const {
const float bin = degToBin(deg);
Contribution c1, c2;
c1.bin = (int) std::floor(bin);
c2.bin = (int) std::ceil(bin);
const float alpha = (bin - (float)c1.bin);
c1.bin %= bins;
c2.bin %= bins;
c1.weight = mag * (1.0f - alpha);
c2.weight = mag * (alpha);
return Contributions(c1,c2);
}
private:
inline float atan360(const float dy, const float dx) const {
const float rad = std::atan2(dy, dx);
return (rad >= 0.0) ? (rad) : (2.0f*(float)M_PI+rad);
}
/** perform one-time calculations for fast lookups */
void precalc(const ImageChannel& img) {
buildCells(img);
buildBlocks(img);
}
/**
* step1)
* calculate HOG cells [usually 8x8] around each "pixel" of the input image
* TODO: do not calculate for each pixel [++i] but for [i+=stride]
* that will be used during the matching phase. this is less accurate but faster
*/
void buildCells(const ImageChannel& img) {
const int w = img.getWidth();
const int h = img.getHeight();
// number of blocks to calculate
const int nx = img.getWidth() / stride;
const int ny = img.getHeight() / stride;
// get derivative images (x and y)
const K::ImageChannel imgX = Derivative::getXcen(img); // [-1: 0: +1]
const K::ImageChannel imgY = Derivative::getYcen(img); // [-1: 0: +1]
// buffer containing HOG-Block-Histogram for every stride-th-pixel within the image
cells = DataMatrix<Vector>(nx, ny);
// list of all pixels that belong to a HOG-window (centered at 0,0)
const std::vector<CellPoint> region = getCellPoints(Pattern::RECTANGULAR);
// border to skip [half block size]
const int w2 = half(cellSize.w);
const int h2 = half(cellSize.h);
// build HOG-Histogram for each block centered at (x,y) with stride-th increment
for (int y = h2; y <= h-h2; y += stride) {
#pragma omp parallel for
for (int x = w2; x <= w-w2; x += stride) {
cells.set(x/stride, y/stride, getHistogram(imgX, imgY, x,y, region));
}
}
}
/**
* step2)
* calculate HOG blocks [=several cells] [usually 16x16 (or 2x2 cells)] around each "pixel" of the input image
*/
void buildBlocks(const ImageChannel& img) {
//if (windowSize != 2*blockSize) {throw Exception("not yet supported!");}
const int w = img.getWidth();
const int h = img.getHeight();
// number of windows to calculate
const int nx = img.getWidth() / stride;
const int ny = img.getHeight() / stride;
// buffer containing HOG-Window-Vector for every stride-th-pixel within the image
blocks = DataMatrix<Vector>(nx, ny);
const int bsw2 = half(blockSize.w);
const int bsh2 = half(blockSize.h);
// build combined/normalized Histogram for each Window centered at (x,y)
for (int y = bsh2; y <= h-bsh2; y += stride) {
#pragma omp parallel for
for (int x = bsw2; x <= w-bsw2; x += stride) {
// upper left coordinate for the area-of-interest
const int sx = x - half(blockSize.w);
const int sy = y - half(blockSize.h);
// first block's center
const int cx = sx + half(cellSize.w);
const int cy = sy + half(cellSize.h);
// number of cells within each block
const int cellsX = blockSize.w / cellSize.w;
const int cellsY = blockSize.h / cellSize.h;
// build the block
const size_t reserve = cellsX*cellsY*bins;
//Vector block(reserve);
Vector block;
block.resize(reserve);
float* data = block.data();
// fetch each cell that belongs to the block
for (int y1 = 0; y1 < cellsY; ++y1) {
for (int x1 = 0; x1 < cellsX; ++x1) {
const int xx = cx + x1*cellSize.w;
const int yy = cy + y1*cellSize.h;
const Vector& cell = getCell(xx, yy);
//block.insert(block.end(), cell.begin(), cell.end());
memcpy(data, cell.data(), cell.size()*sizeof(float));
data += cell.size();
}
}
_assertEqual(reserve, block.size(), "invalid number of entries in block");
// normalize the window
block.normalize();
// store
blocks.set(x/stride, y/stride, block);
}
}
}
// /** convert gradients to histogram */
// Vector getHistogram(const std::vector<HOGGradient>& gradients) {
// Vector res(bins);
// res.resize(bins);
// for (const HOGGradient& hg : gradients) {
// const float deg = hg.direction * 180.0f / (float)M_PI;
// const Contributions c = getContribution(deg, hg.magnitude);
// if (1 == 1) {
// res[c.c1.bin] += c.c1.weight; // split contribution
// res[c.c2.bin] += c.c2.weight;
// } else {
// res[c.c1.bin] += c.c1.weight; // both to the same bin
// res[c.c1.bin] += c.c2.weight; // both to the same bin
// }
// }
// return res;
// }
// /** get all individual gradients at the given location */
// std::vector<HOGGradient> getGradients(const K::ImageChannel& imgX, const K::ImageChannel& imgY, const int x, const int y, const std::vector<ImagePoint>& region) const {
// std::vector<HOGGradient> gradients(bins);
// for (size_t i = 0; i < region.size(); ++i) {
// const ImagePoint p = region[i];
// // point within the image
// const int x1 = x+p.x;
// const int y1 = y+p.y;
// // clamp
// if (x1 < 0 || x1 >= imgX.getWidth()) {continue;}
// if (y1 < 0 || y1 >= imgX.getHeight()) {continue;}
// // calculate the centered derivatives
// const auto dx = imgX.get(x1, y1); // gradient's magnitude in x direction
// const auto dy = imgY.get(x1, y1); // gradient's magnitude in y direction
// // calculate magnitude and direction of the gradient
// HOGGradient grad;
// grad.magnitude = std::sqrt( (dx*dx) + (dy*dy) ); // gradient's overall magnitude
// grad.direction = atan360(dy, dx); // the gradient's direction
// gradients.push_back(grad);
// }
// return gradients;
// }
/** get all individual gradients at the given location */
Vector getHistogram(const K::ImageChannel& imgX, const K::ImageChannel& imgY, const int x, const int y, const std::vector<CellPoint>& region) const {
// output histogram
Vector res;
res.resize(bins);
// process each pixel
for (size_t i = 0; i < region.size(); ++i) {
const CellPoint p = region[i];
// point within the image
const int x1 = x+p.x;
const int y1 = y+p.y;
// clamp
if (x1 < 0 || x1 >= imgX.getWidth()) {continue;}
if (y1 < 0 || y1 >= imgX.getHeight()) {continue;}
// calculate the centered derivatives
const auto dx = imgX.get(x1, y1); // gradient's magnitude in x direction
const auto dy = imgY.get(x1, y1); // gradient's magnitude in y direction
// calculate magnitude and direction of the gradient
const float mag = std::sqrt( (dx*dx) + (dy*dy) ); // gradient's overall magnitude
const float dir = atan360(dy, dx); // the gradient's direction in radians
const float deg = dir * 180.0f / (float)M_PI; // in degree
// calculate bin-contribution [max 2 bins]
// hereby add the impact factor based on the blur-window
const Contributions c = getContribution(deg, mag * p.impact);
if (1 == 1) {
res[c.c1.bin] += c.c1.weight; // split contribution
res[c.c2.bin] += c.c2.weight;
} else {
res[c.c1.bin] += c.c1.weight; // both to the same bin
res[c.c1.bin] += c.c2.weight; // both to the same bin
}
}
// done
return res;
}
public:
/** a list of all pixels within a cell. (0,0) = cell-center */
std::vector<CellPoint> getCellPoints(const Pattern p) const {
std::vector<CellPoint> region;
CellPoint dst(0,0,0);
const ImagePoint center(0,0);
if (p == CIRCULAR && cellSize.w != cellSize.h) {
throw Exception("CIRCULAR pattern requires cellSize.w == cellSize.h");
}
const int sw2 = half(cellSize.w);
const int sh2 = half(cellSize.w);
// if the cell-size is odd, the end is included -> +1
const int ew2 = sw2 + ( (cellSize.w % 2 == 1) ? 1 : 0);
const int eh2 = sh2 + ( (cellSize.h % 2 == 1) ? 1 : 0);
// process a square region...
for (dst.y = -sh2; dst.y < +eh2; ++dst.y) {
for (dst.x = -sw2; dst.x < +ew2; ++dst.x) {
// impact factor based on sigma and distance from center
// ensure the impact pattern is symmetric, also for even block sizes [no real center]
const float dx = (float)dst.x + ( (cellSize.w % 2 == 0) ? 0.5f : 0.0f );
const float dy = (float)dst.y + ( (cellSize.h % 2 == 0) ? 0.5f : 0.0f );
dst.impact = std::exp( - ((dx*dx) + (dy*dy)) / (2.0f*sigma*sigma) );
// pattern?
if (p == RECTANGULAR) {
region.push_back(dst);
} else if (p == CIRCULAR) {
// ...but use only points within a radius around the center
const float d = center.distance(dst);
if (d <= cellSize.w) {region.push_back(dst);}
}
std::cout << dst.x << "," << dst.y << ":" << dst.impact << "\t";
}
std::cout << std::endl;
}
return region;
}
};
}
#endif // HOG2_H
|
GB_reduce_to_vector.c | //------------------------------------------------------------------------------
// GB_reduce_to_vector: reduce a matrix to a vector using a binary op
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// CALLS: GB_build
// C<M> = accum (C,reduce(A)) where C is n-by-1. Reduces a matrix A or A'
// to a vector.
#include "GB_reduce.h"
#include "GB_build.h"
#include "GB_ek_slice.h"
#include "GB_accum_mask.h"
#ifndef GBCOMPACT
#include "GB_red__include.h"
#endif
#define GB_FREE_WORK \
{ \
GB_FREE_MEMORY (Wfirst_space, ntasks, zsize) ; \
GB_FREE_MEMORY (Wlast_space, ntasks, zsize) ; \
GB_ek_slice_free (&pstart_slice, &kfirst_slice, &klast_slice, ntasks) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORK ; \
GB_MATRIX_FREE (&T) ; \
}
GrB_Info GB_reduce_to_vector // C<M> = accum (C,reduce(A))
(
GrB_Matrix C, // input/output for results, size n-by-1
const GrB_Matrix M, // optional M for C, unused if NULL
const GrB_BinaryOp accum, // optional accum for z=accum(C,T)
const GrB_BinaryOp reduce, // reduce operator for T=reduce(A)
const GB_void *terminal, // for early exit (NULL if none)
const GrB_Matrix A, // first input: matrix A
const GrB_Descriptor desc, // descriptor for C, M, and A
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
// C may be aliased with M and/or A
GB_RETURN_IF_NULL_OR_FAULTY (C) ;
GB_RETURN_IF_FAULTY (M) ;
GB_RETURN_IF_FAULTY (accum) ;
GB_RETURN_IF_NULL_OR_FAULTY (A) ;
GB_RETURN_IF_FAULTY (desc) ;
ASSERT_MATRIX_OK (C, "C input for reduce_BinaryOp", GB0) ;
ASSERT_MATRIX_OK_OR_NULL (M, "M for reduce_BinaryOp", GB0) ;
ASSERT_BINARYOP_OK_OR_NULL (accum, "accum for reduce_BinaryOp", GB0) ;
ASSERT_BINARYOP_OK (reduce, "reduce for reduce_BinaryOp", GB0) ;
ASSERT_MATRIX_OK (A, "A input for reduce_BinaryOp", GB0) ;
ASSERT_DESCRIPTOR_OK_OR_NULL (desc, "desc for reduce_BinaryOp", GB0) ;
GrB_Matrix T = NULL ;
int ntasks = 0 ;
size_t zsize = 0 ;
int64_t *pstart_slice = NULL, *kfirst_slice = NULL, *klast_slice = NULL ;
GB_void *GB_RESTRICT Wfirst_space = NULL ;
GB_void *GB_RESTRICT Wlast_space = NULL ;
// get the descriptor
GB_GET_DESCRIPTOR (info, desc, C_replace, Mask_comp, Mask_struct,
A_transpose, xx1, xx2) ;
// C and M are n-by-1 GrB_Vector objects, typecasted to GrB_Matrix
ASSERT (GB_VECTOR_OK (C)) ;
ASSERT (GB_IMPLIES (M != NULL, GB_VECTOR_OK (M))) ;
// check domains and dimensions for C<M> = accum (C,T)
GrB_Type ttype = reduce->ztype ;
GB_OK (GB_compatible (C->type, C, M, accum, ttype, Context)) ;
// check types of reduce
if (reduce->xtype != reduce->ztype || reduce->ytype != reduce->ztype)
{
// all 3 types of z = reduce (x,y) must be the same. reduce must also
// be associative but there is no way to check this in general.
return (GB_ERROR (GrB_DOMAIN_MISMATCH, (GB_LOG,
"All domains of reduction operator must be identical;\n"
"operator is: [%s] = %s ([%s],[%s])", reduce->ztype->name,
reduce->name, reduce->xtype->name, reduce->ytype->name))) ;
}
// T = reduce (T,A) must be compatible
if (!GB_Type_compatible (A->type, reduce->ztype))
{
return (GB_ERROR (GrB_DOMAIN_MISMATCH, (GB_LOG,
"incompatible type for reduction operator z=%s(x,y):\n"
"input matrix A of type [%s]\n"
"cannot be typecast to reduction operator of type [%s]",
reduce->name, A->type->name, reduce->ztype->name))) ;
}
// check the dimensions
int64_t n = GB_NROWS (C) ;
if (A_transpose)
{
if (n != GB_NCOLS (A))
{
return (GB_ERROR (GrB_DIMENSION_MISMATCH, (GB_LOG,
"w=reduce(A'): length of w is "GBd";\n"
"it must match the number of columns of A, which is "GBd".",
n, GB_NCOLS (A)))) ;
}
}
else
{
if (n != GB_NROWS(A))
{
return (GB_ERROR (GrB_DIMENSION_MISMATCH, (GB_LOG,
"w=reduce(A): length of w is "GBd";\n"
"it must match the number of rows of A, which is "GBd".",
n, GB_NROWS (A)))) ;
}
}
// quick return if an empty mask is complemented
GB_RETURN_IF_QUICK_MASK (C, C_replace, M, Mask_comp) ;
//--------------------------------------------------------------------------
// delete any lingering zombies and assemble any pending tuples
//--------------------------------------------------------------------------
// GB_WAIT (C) ;
GB_WAIT (M) ;
GB_WAIT (A) ;
ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ;
//--------------------------------------------------------------------------
// handle the CSR/CSC format of A
//--------------------------------------------------------------------------
// the result vector T is in CSC format
if (!(A->is_csc))
{
A_transpose = !A_transpose ;
}
//--------------------------------------------------------------------------
// T = reduce (A) or reduce (A')
//--------------------------------------------------------------------------
// T is created below so that it can be typecasted to a GrB_Vector when
// done: non-hypersparse n-by-1 matrix in CSC format.
// T = reduce_to_vector (A) or reduce_to_vector (A'), which is T = sum (A')
// or sum (A), in MATLAB notation, except where where 'sum' is any
// associative operator.
// By default, T(i) = op (A (i,:)) is a vector whose length is the same as
// the number of rows of A. T(i) is the reduction of all entries in the
// ith row of A. If A_transpose is true, the T is computed as if A were
// transposed first, and thus its length is equal to the number of vectors
// of the input matrix A. The use of A_transpose is the opposite of
// MATLAB, since sum(A) in MATLAB sums up the columns of A, and sum(A')
// sums up the rows of A..
// T is an n-by-1 GrB_Matrix that represents the vector. It is computed
// as a GrB_Matrix so it can be passed to GB_accum_mask without
// typecasting.
ASSERT (n == ((A_transpose) ? A->vdim : A->vlen)) ;
//--------------------------------------------------------------------------
// scalar workspace
//--------------------------------------------------------------------------
size_t asize = A->type->size ;
GB_Type_code acode = A->type->code ;
const int64_t *GB_RESTRICT Ai = A->i ;
const GB_void *GB_RESTRICT Ax = A->x ;
int64_t anvec = A->nvec ;
int64_t anz = GB_NNZ (A) ;
zsize = reduce->ztype->size ;
GB_Type_code zcode = reduce->ztype->code ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (anz + anvec, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// T = reduce(A) or reduce(A')
//--------------------------------------------------------------------------
GxB_binary_function freduce = reduce->function ;
GB_cast_function cast_A_to_Z = GB_cast_factory (zcode, acode) ;
bool nocasting = (A->type == reduce->ztype) ;
if (A_transpose)
{
//----------------------------------------------------------------------
// T = reduce(A'), where T(j) = reduce (A (:,j))
//----------------------------------------------------------------------
// Each vector A(:,j) is reduced to the scalar T(j)
//----------------------------------------------------------------------
// allocate T, including T->p, T->i, and T->x. T is not hypersparse.
//----------------------------------------------------------------------
// since T is a GrB_Vector, it is CSC and not hypersparse
GB_CREATE (&T, ttype, n, 1, GB_Ap_calloc, true,
GB_FORCE_NONHYPER, GB_HYPER_DEFAULT, 1, anvec, true, Context) ;
GB_OK (info) ;
ASSERT (GB_VECTOR_OK (T)) ;
T->p [0] = 0 ;
T->p [1] = anvec ;
int64_t *GB_RESTRICT Ti = T->i ;
GB_void *GB_RESTRICT Tx = T->x ;
T->nvec_nonempty = (anvec > 0) ? 1 : 0 ;
T->magic = GB_MAGIC ;
//----------------------------------------------------------------------
// symbolic phase
//----------------------------------------------------------------------
// Construct the pattern of T. The kth vector in A creates one entry
// in T, but it is flagged as a zombie if it is empty.
int64_t nzombies = 0 ;
const int64_t *GB_RESTRICT Ah = A->h ;
const int64_t *GB_RESTRICT Ap = A->p ;
int nth = GB_nthreads (anvec, chunk, nthreads_max) ;
int64_t k ;
#pragma omp parallel for num_threads(nth) schedule(static) \
reduction(+:nzombies)
for (k = 0 ; k < anvec ; k++)
{
// if A(:,j) is empty, then the entry in T becomes a zombie
int64_t j = (Ah == NULL) ? k : Ah [k] ;
int64_t jnz = Ap [k+1] - Ap [k] ;
if (jnz == 0)
{
// A(:,j) is empty: T(j) is a zombie
Ti [k] = GB_FLIP (j) ;
nzombies++ ;
}
else
{
// A(:,j) has at least one entry; T(j) is live
Ti [k] = j ;
}
}
if (A->nvec_nonempty < 0)
{
A->nvec_nonempty = anvec - nzombies ;
}
ASSERT (A->nvec_nonempty == (anvec - nzombies)) ;
T->nzombies = nzombies ;
//----------------------------------------------------------------------
// slice the entries of A for the numeric phase
//----------------------------------------------------------------------
// Task tid does entries pstart_slice [tid] to pstart_slice [tid+1]-1
// and vectors kfirst_slice [tid] to klast_slice [tid]. The first and
// last vectors may be shared with prior slices and subsequent slices.
ntasks = (nthreads == 1) ? 1 : (8 * nthreads) ;
ntasks = GB_IMIN (ntasks, anz) ;
ntasks = GB_IMAX (ntasks, 1) ;
GB_MALLOC_MEMORY (Wfirst_space, ntasks, zsize) ;
GB_MALLOC_MEMORY (Wlast_space, ntasks, zsize) ;
if (Wfirst_space == NULL || Wlast_space == NULL ||
!GB_ek_slice (&pstart_slice, &kfirst_slice, &klast_slice, A, ntasks))
{
// out of memory
GB_FREE_ALL ;
return (GB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// numeric phase: launch the switch factory
//----------------------------------------------------------------------
bool done = false ;
#ifndef GBCOMPACT
#define GB_red(opname,aname) GB_red_eachvec_ ## opname ## aname
#define GB_RED_WORKER(opname,aname,atype) \
{ \
info = GB_red (opname, aname) ((atype *) Tx, A, \
kfirst_slice, klast_slice, pstart_slice, \
Wfirst_space, Wlast_space, ntasks, nthreads) ; \
done = (info != GrB_NO_VALUE) ; \
} \
break ;
if (nocasting)
{
// controlled by opcode and typecode. No typecasting is done.
GB_Opcode opcode = reduce->opcode ;
GB_Type_code typecode = acode ;
ASSERT (typecode <= GB_UDT_code) ;
#include "GB_red_factory.c"
}
#endif
//----------------------------------------------------------------------
// generic worker: with typecasting
//----------------------------------------------------------------------
if (!done)
{
GB_BURBLE_MATRIX (A, "generic ") ;
#define GB_ATYPE GB_void
#define GB_CTYPE GB_void
// ztype s ;
#define GB_SCALAR(s) \
GB_void s [GB_VLA(zsize)]
// ztype s = (ztype) Ax [p], with typecast
#define GB_CAST_ARRAY_TO_SCALAR(s,Ax,p) \
cast_A_to_Z (s, Ax +((p)*asize), zsize) ; \
// s += (ztype) Ax [p], with typecast
#define GB_ADD_CAST_ARRAY_TO_SCALAR(s, Ax, p) \
GB_void awork [GB_VLA(zsize)] ; \
cast_A_to_Z (awork, Ax +((p)*asize), zsize) ; \
freduce (s, s, awork) ;
// W [k] = s, no typecast
#define GB_COPY_SCALAR_TO_ARRAY(W,k,s) \
memcpy (W +((k)*zsize), s, zsize) ;
// W [k] = S [i], no typecast
#define GB_COPY_ARRAY_TO_ARRAY(W,k,S,i) \
memcpy (W +((k)*zsize), S +((i)*zsize), zsize) ;
// W [k] += S [i], no typecast
#define GB_ADD_ARRAY_TO_ARRAY(W,k,S,i) \
freduce (W +((k)*zsize), W +((k)*zsize), S +((i)*zsize)) ;
// W [k] += s, no typecast
#define GB_ADD_SCALAR_TO_ARRAY(W,k,s) \
freduce (W +((k)*zsize), W +((k)*zsize), s) ;
// break if terminal value reached
#define GB_BREAK_IF_TERMINAL(t) \
if (terminal != NULL) \
{ \
if (memcmp (t, terminal, zsize) == 0) break ; \
}
#include "GB_reduce_each_vector.c"
}
//----------------------------------------------------------------------
// wrapup: delete any zombies
//----------------------------------------------------------------------
ASSERT_MATRIX_OK (T, "T before wait", GB_FLIP (GB0)) ;
if (nzombies > 0)
{
ASSERT (GB_VECTOR_OK (T)) ;
ASSERT (!GB_PENDING (T)) ;
ASSERT (GB_ZOMBIES (T)) ;
GB_OK (GB_wait (T, Context)) ;
}
ASSERT_MATRIX_OK (T, "T output = reduce_each_vector (A)", GB0) ;
}
else
{
//----------------------------------------------------------------------
// T = reduce(A), where T(i) = reduce (A (i,:))
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// select the method
//----------------------------------------------------------------------
// When A_transpose is false (after flipping it to account for the
// CSR/CSC format), n is A->vlen, the vector length of A. This is
// the number of rows of a CSC matrix, or the # of columns of a CSR
// matrix. The matrix A itself requires O(vdim+anz) memory if
// non-hypersparse and O(anz) if hypersparse. This does not depend on
// A->vlen. So if the vector length is really huge (when anz << n),
// the bucket method would fail. Thus, the qsort method, below, is
// used when A is very sparse.
if (GB_CHOOSE_QSORT_INSTEAD_OF_BUCKET (anz, n))
{
//------------------------------------------------------------------
// qsort method
//------------------------------------------------------------------
// memory usage is O(anz) and time is O(anz*log(anz)). This is
// more efficient than the bucket method, below, when A is very
// hypersparse. The time and memory complexity does not depend
// on n.
// since T is a GrB_Vector, it is not hypersparse
GB_NEW (&T, ttype, n, 1, GB_Ap_null, true, GB_FORCE_NONHYPER,
GB_HYPER_DEFAULT, 1, Context) ;
GB_OK (info) ;
// GB_build treats Ai and Ax as read-only; they must not be modified
GB_OK (GB_build
(
T, // construct result in the T vector
(GrB_Index *) Ai, // indices inside the vector
NULL, // vector indices (none)
Ax, // values, of size anz
anz, // number of tuples
reduce, // reduction operator
acode, // type code of the Ax array
false, // the input is a vector
false, // indices do not need to be checked
Context
)) ;
ASSERT (T->nvec_nonempty == GB_nvec_nonempty (T, NULL)) ;
}
else
{
//------------------------------------------------------------------
// bucket method
//------------------------------------------------------------------
// Determine number of threads to use for constructing the buckets.
// Each thread requires O(n) workspace, so this method does not
// scale well when there are many threads compared to anz. Total
// workspace is O(n*ntasks), so limit the # of threads used so that
// at most anz workspace is used. Each thread takes a single task.
ntasks = (n > 0) ? (anz / n) : 1 ;
ntasks = GB_IMIN (ntasks, nthreads) ;
ntasks = GB_IMAX (ntasks, 1) ;
int nth = ntasks ; // one thread per task
//------------------------------------------------------------------
// slice the entries for each thread
//------------------------------------------------------------------
// Thread tid does entries pstart_slice [tid] to
// pstart_slice [tid+1]-1. No need to compute kfirst or klast.
GB_MALLOC_MEMORY (pstart_slice, ntasks+1, sizeof (int64_t)) ;
if (pstart_slice == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GB_OUT_OF_MEMORY) ;
}
GB_eslice (pstart_slice, anz, ntasks) ;
//------------------------------------------------------------------
// sum across each index: T(i) = reduce (A (i,:))
//------------------------------------------------------------------
// Early exit cannot be exploited; ignore the terminal value.
#undef GB_red
#define GB_red(opname,aname) GB_red_eachindex_ ## opname ## aname
#undef GB_RED_WORKER
#define GB_RED_WORKER(opname,aname,atype) \
{ \
info = GB_red (opname, aname) (&T, ttype, A, pstart_slice, \
ntasks, nthreads, Context) ; \
done = (info != GrB_NO_VALUE) ; \
} \
break ;
bool done = false ;
//------------------------------------------------------------------
// launch the switch factory
//------------------------------------------------------------------
#ifndef GBCOMPACT
if (nocasting)
{
// controlled by opcode and typecode. No typecasting
GB_Opcode opcode = reduce->opcode ;
GB_Type_code typecode = acode ;
ASSERT (typecode <= GB_UDT_code) ;
#include "GB_red_factory.c"
if (! (info == GrB_SUCCESS || info == GrB_NO_VALUE))
{
// out of memory
GB_FREE_ALL ;
return (info) ;
}
}
#endif
//------------------------------------------------------------------
// generic worker
//------------------------------------------------------------------
if (!done)
{
// if this fails, the template frees all workspace with the
// GB_FREE_ALL macro, defined above.
GB_BURBLE_MATRIX (A, "generic ") ;
#include "GB_reduce_each_index.c"
}
}
ASSERT_MATRIX_OK (T, "T output for T = reduce_each_index (A)", GB0) ;
}
//--------------------------------------------------------------------------
// C<M> = accum (C,T): accumulate the results into C via the mask
//--------------------------------------------------------------------------
GB_FREE_WORK ;
return (GB_accum_mask (C, M, NULL, accum, &T, C_replace, Mask_comp,
Mask_struct, Context)) ;
}
|
GB_binop__lxor_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_int8)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__lxor_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_int8)
// A*D function (colscale): GB (_AxD__lxor_int8)
// D*A function (rowscale): GB (_DxB__lxor_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_int8)
// C=scalar+B GB (_bind1st__lxor_int8)
// C=scalar+B' GB (_bind1st_tran__lxor_int8)
// C=A+scalar GB (_bind2nd__lxor_int8)
// C=A'+scalar GB (_bind2nd_tran__lxor_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ((x != 0) != (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_INT8 || GxB_NO_LXOR_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lxor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = Ax [p] ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lxor_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
actividad4.c |
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "ctimer.h"
#include <omp.h>
int main( int argc, char *argv[] ) {
int v, i;
double suma;
if( argc<2 ) {
printf("Usage: %s n_vectores [tam_max] \n",argv[0]);
return 1;
}
int n_vectores;
sscanf(argv[1],"%d",&n_vectores);
int tam_max = 10000;
if( argc>2 ) {
sscanf(argv[2],"%d",&tam_max);
}
double **M = (double **) malloc (n_vectores*sizeof(double*));
int *tam = (int *) malloc (n_vectores*sizeof(int));
for( v=0; v<n_vectores; v++ ) {
tam[v] = rand()%tam_max;
M[v] = (double *) malloc (tam[v]*sizeof(double));
for( i = 0; i<tam[v]; i++ ) {
M[v][i] = (double) rand()/RAND_MAX * 2.0*tam[v] - 1.0*tam[v];
}
}
double elapsed, ucpu, scpu;
ctimer(&elapsed,&ucpu,&scpu);
double *media = (double *) malloc (n_vectores*sizeof(double));
double *desvt = (double *) malloc (n_vectores*sizeof(double));
/*********************************************************/
/* PRINCIPIO DEL CODIGO A INCLUIR */
double parc;
#pragma omp parallel
{
#pragma omp single
{
for( v=0; v<n_vectores; v++ ) {
#pragma omp task firstprivate(v) shared(M) private(parc,i)
{
parc = 0;
for( i = 0; i<tam[v]; i++ ) {
parc = M[v][i];
}
media[v]=parc/tam[v];
}
}
}
}
#pragma omp parallel
{
#pragma omp single
{
#pragma omp task firstprivate(v) shared(M) private(parc,i)
{
for( v=0; v<n_vectores; v++ ) {
parc = 0;
for( i = 0; i<tam[v]; i++ ) {
parc = pow((M[v][i]-media[v]),2);
}
desvt[v]=sqrt(parc/tam[v]);
}
}
}
}
/* FIN DEL CODIGO A INCLUIR */
/*********************************************************/
ctimer(&elapsed,&ucpu,&scpu);
printf("Tiempo = %f segundos\n",elapsed);
FILE *fp;
fp = fopen("media_desvt","wb");
fwrite (media, sizeof(double), n_vectores, fp);
fwrite (desvt, sizeof(double), n_vectores, fp);
fclose(fp);
free(desvt);
free(media);
free(tam);
for( i = 0; i<n_vectores; i++ ) {
free(M[i]);
}
free(M);
return 0;
}
|
gbdt.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_BOOSTING_GBDT_H_
#define LIGHTGBM_BOOSTING_GBDT_H_
#include <LightGBM/boosting.h>
#include <LightGBM/objective_function.h>
#include <LightGBM/prediction_early_stop.h>
#include <LightGBM/cuda/vector_cudahost.h>
#include <LightGBM/utils/json11.h>
#include <LightGBM/utils/threading.h>
#include <string>
#include <algorithm>
#include <cstdio>
#include <fstream>
#include <map>
#include <memory>
#include <mutex>
#include <unordered_map>
#include <utility>
#include <vector>
#include "score_updater.hpp"
namespace LightGBM {
using json11::Json;
/*!
* \brief GBDT algorithm implementation. including Training, prediction, bagging.
*/
class GBDT : public GBDTBase {
public:
/*!
* \brief Constructor
*/
GBDT();
/*!
* \brief Destructor
*/
~GBDT();
/*!
* \brief Initialization logic
* \param gbdt_config Config for boosting
* \param train_data Training data
* \param objective_function Training objective function
* \param training_metrics Training metrics
*/
void Init(const Config* gbdt_config, const Dataset* train_data,
const ObjectiveFunction* objective_function,
const std::vector<const Metric*>& training_metrics) override;
/*!
* \brief Merge model from other boosting object. Will insert to the front of current boosting object
* \param other
*/
void MergeFrom(const Boosting* other) override {
auto other_gbdt = reinterpret_cast<const GBDT*>(other);
// tmp move to other vector
auto original_models = std::move(models_);
models_ = std::vector<std::unique_ptr<Tree>>();
// push model from other first
for (const auto& tree : other_gbdt->models_) {
auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get())));
models_.push_back(std::move(new_tree));
}
num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
// push model in current object
for (const auto& tree : original_models) {
auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get())));
models_.push_back(std::move(new_tree));
}
num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
}
void ShuffleModels(int start_iter, int end_iter) override {
int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_;
start_iter = std::max(0, start_iter);
if (end_iter <= 0) {
end_iter = total_iter;
}
end_iter = std::min(total_iter, end_iter);
auto original_models = std::move(models_);
std::vector<int> indices(total_iter);
for (int i = 0; i < total_iter; ++i) {
indices[i] = i;
}
Random tmp_rand(17);
for (int i = start_iter; i < end_iter - 1; ++i) {
int j = tmp_rand.NextShort(i + 1, end_iter);
std::swap(indices[i], indices[j]);
}
models_ = std::vector<std::unique_ptr<Tree>>();
for (int i = 0; i < total_iter; ++i) {
for (int j = 0; j < num_tree_per_iteration_; ++j) {
int tree_idx = indices[i] * num_tree_per_iteration_ + j;
auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get())));
models_.push_back(std::move(new_tree));
}
}
}
/*!
* \brief Reset the training data
* \param train_data New Training data
* \param objective_function Training objective function
* \param training_metrics Training metrics
*/
void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function,
const std::vector<const Metric*>& training_metrics) override;
/*!
* \brief Reset Boosting Config
* \param gbdt_config Config for boosting
*/
void ResetConfig(const Config* gbdt_config) override;
/*!
* \brief Adding a validation dataset
* \param valid_data Validation dataset
* \param valid_metrics Metrics for validation dataset
*/
void AddValidDataset(const Dataset* valid_data,
const std::vector<const Metric*>& valid_metrics) override;
/*!
* \brief Perform a full training procedure
* \param snapshot_freq frequency of snapshot
* \param model_output_path path of model file
*/
void Train(int snapshot_freq, const std::string& model_output_path) override;
void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override;
/*!
* \brief Training logic
* \param gradients nullptr for using default objective, otherwise use self-defined boosting
* \param hessians nullptr for using default objective, otherwise use self-defined boosting
* \return True if cannot train any more
*/
bool TrainOneIter(const score_t* gradients, const score_t* hessians) override;
/*!
* \brief Rollback one iteration
*/
void RollbackOneIter() override;
/*!
* \brief Get current iteration
*/
int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; }
/*!
* \brief Can use early stopping for prediction or not
* \return True if cannot use early stopping for prediction
*/
bool NeedAccuratePrediction() const override {
if (objective_function_ == nullptr) {
return true;
} else {
return objective_function_->NeedAccuratePrediction();
}
}
/*!
* \brief Get evaluation result at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \return evaluation result
*/
std::vector<double> GetEvalAt(int data_idx) const override;
/*!
* \brief Get current training score
* \param out_len length of returned score
* \return training score
*/
const double* GetTrainingScore(int64_t* out_len) override;
/*!
* \brief Get size of prediction at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \return The size of prediction
*/
int64_t GetNumPredictAt(int data_idx) const override {
CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size()));
data_size_t num_data = train_data_->num_data();
if (data_idx > 0) {
num_data = valid_score_updater_[data_idx - 1]->num_data();
}
return num_data * num_class_;
}
/*!
* \brief Get prediction result at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \param result used to store prediction result, should allocate memory before call this function
* \param out_len length of returned score
*/
void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override;
/*!
* \brief Get number of prediction for one data
* \param start_iteration Start index of the iteration to predict
* \param num_iteration number of used iterations
* \param is_pred_leaf True if predicting leaf index
* \param is_pred_contrib True if predicting feature contribution
* \return number of prediction
*/
inline int NumPredictOneRow(int start_iteration, int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override {
int num_pred_in_one_row = num_class_;
if (is_pred_leaf) {
int max_iteration = GetCurrentIteration();
start_iteration = std::max(start_iteration, 0);
start_iteration = std::min(start_iteration, max_iteration);
if (num_iteration > 0) {
num_pred_in_one_row *= static_cast<int>(std::min(max_iteration - start_iteration, num_iteration));
} else {
num_pred_in_one_row *= (max_iteration - start_iteration);
}
} else if (is_pred_contrib) {
num_pred_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline
}
return num_pred_in_one_row;
}
void PredictRaw(const double* features, double* output,
const PredictionEarlyStopInstance* earlyStop) const override;
void PredictRawByMap(const std::unordered_map<int, double>& features, double* output,
const PredictionEarlyStopInstance* early_stop) const override;
void Predict(const double* features, double* output,
const PredictionEarlyStopInstance* earlyStop) const override;
void PredictByMap(const std::unordered_map<int, double>& features, double* output,
const PredictionEarlyStopInstance* early_stop) const override;
void PredictLeafIndex(const double* features, double* output) const override;
void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override;
void PredictContrib(const double* features, double* output) const override;
void PredictContribByMap(const std::unordered_map<int, double>& features,
std::vector<std::unordered_map<int, double>>* output) const override;
/*!
* \brief Dump model to json format string
* \param start_iteration The model will be saved start from
* \param num_iteration Number of iterations that want to dump, -1 means dump all
* \param feature_importance_type Type of feature importance, 0: split, 1: gain
* \return Json format string of model
*/
std::string DumpModel(int start_iteration, int num_iteration,
int feature_importance_type) const override;
/*!
* \brief Translate model to if-else statement
* \param num_iteration Number of iterations that want to translate, -1 means translate all
* \return if-else format codes of model
*/
std::string ModelToIfElse(int num_iteration) const override;
/*!
* \brief Translate model to if-else statement
* \param num_iteration Number of iterations that want to translate, -1 means translate all
* \param filename Filename that want to save to
* \return is_finish Is training finished or not
*/
bool SaveModelToIfElse(int num_iteration, const char* filename) const override;
/*!
* \brief Save model to file
* \param start_iteration The model will be saved start from
* \param num_iterations Number of model that want to save, -1 means save all
* \param feature_importance_type Type of feature importance, 0: split, 1: gain
* \param filename Filename that want to save to
* \return is_finish Is training finished or not
*/
bool SaveModelToFile(int start_iteration, int num_iterations,
int feature_importance_type,
const char* filename) const override;
/*!
* \brief Save model to string
* \param start_iteration The model will be saved start from
* \param num_iterations Number of model that want to save, -1 means save all
* \param feature_importance_type Type of feature importance, 0: split, 1: gain
* \return Non-empty string if succeeded
*/
std::string SaveModelToString(int start_iteration, int num_iterations, int feature_importance_type) const override;
/*!
* \brief Restore from a serialized buffer
*/
bool LoadModelFromString(const char* buffer, size_t len) override;
/*!
* \brief Calculate feature importances
* \param num_iteration Number of model that want to use for feature importance, -1 means use all
* \param importance_type: 0 for split, 1 for gain
* \return vector of feature_importance
*/
std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override;
/*!
* \brief Calculate upper bound value
* \return upper bound value
*/
double GetUpperBoundValue() const override;
/*!
* \brief Calculate lower bound value
* \return lower bound value
*/
double GetLowerBoundValue() const override;
/*!
* \brief Get max feature index of this model
* \return Max feature index of this model
*/
inline int MaxFeatureIdx() const override { return max_feature_idx_; }
/*!
* \brief Get feature names of this model
* \return Feature names of this model
*/
inline std::vector<std::string> FeatureNames() const override { return feature_names_; }
/*!
* \brief Get index of label column
* \return index of label column
*/
inline int LabelIdx() const override { return label_idx_; }
/*!
* \brief Get number of weak sub-models
* \return Number of weak sub-models
*/
inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); }
/*!
* \brief Get number of tree per iteration
* \return number of tree per iteration
*/
inline int NumModelPerIteration() const override { return num_tree_per_iteration_; }
/*!
* \brief Get number of classes
* \return Number of classes
*/
inline int NumberOfClasses() const override { return num_class_; }
inline void InitPredict(int start_iteration, int num_iteration, bool is_pred_contrib) override {
num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
start_iteration = std::max(start_iteration, 0);
start_iteration = std::min(start_iteration, num_iteration_for_pred_);
if (num_iteration > 0) {
num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_ - start_iteration);
} else {
num_iteration_for_pred_ = num_iteration_for_pred_ - start_iteration;
}
start_iteration_for_pred_ = start_iteration;
if (is_pred_contrib) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < static_cast<int>(models_.size()); ++i) {
models_[i]->RecomputeMaxDepth();
}
}
}
inline double GetLeafValue(int tree_idx, int leaf_idx) const override {
CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size());
CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves());
return models_[tree_idx]->LeafOutput(leaf_idx);
}
inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override {
CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size());
CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves());
models_[tree_idx]->SetLeafOutput(leaf_idx, val);
}
/*!
* \brief Get Type name of this boosting object
*/
const char* SubModelName() const override { return "tree"; }
bool IsLinear() const override { return linear_tree_; }
protected:
virtual bool GetIsConstHessian(const ObjectiveFunction* objective_function) {
if (objective_function != nullptr) {
return objective_function->IsConstantHessian();
} else {
return false;
}
}
/*!
* \brief Print eval result and check early stopping
*/
virtual bool EvalAndCheckEarlyStopping();
/*!
* \brief reset config for bagging
*/
void ResetBaggingConfig(const Config* config, bool is_change_dataset);
/*!
* \brief Implement bagging logic
* \param iter Current interation
*/
virtual void Bagging(int iter);
virtual data_size_t BaggingHelper(data_size_t start, data_size_t cnt,
data_size_t* buffer);
data_size_t BalancedBaggingHelper(data_size_t start, data_size_t cnt,
data_size_t* buffer);
/*!
* \brief calculate the object function
*/
virtual void Boosting();
/*!
* \brief updating score after tree was trained
* \param tree Trained tree of this iteration
* \param cur_tree_id Current tree for multiclass training
*/
virtual void UpdateScore(const Tree* tree, const int cur_tree_id);
/*!
* \brief eval results for one metric
*/
virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const;
/*!
* \brief Print metric result of current iteration
* \param iter Current iteration
* \return best_msg if met early_stopping
*/
std::string OutputMetric(int iter);
double BoostFromAverage(int class_id, bool update_scorer);
/*! \brief current iteration */
int iter_;
/*! \brief Pointer to training data */
const Dataset* train_data_;
/*! \brief Config of gbdt */
std::unique_ptr<Config> config_;
/*! \brief Tree learner, will use this class to learn trees */
std::unique_ptr<TreeLearner> tree_learner_;
/*! \brief Objective function */
const ObjectiveFunction* objective_function_;
/*! \brief Store and update training data's score */
std::unique_ptr<ScoreUpdater> train_score_updater_;
/*! \brief Metrics for training data */
std::vector<const Metric*> training_metrics_;
/*! \brief Store and update validation data's scores */
std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_;
/*! \brief Metric for validation data */
std::vector<std::vector<const Metric*>> valid_metrics_;
/*! \brief Number of rounds for early stopping */
int early_stopping_round_;
/*! \brief Only use first metric for early stopping */
bool es_first_metric_only_;
/*! \brief Best iteration(s) for early stopping */
std::vector<std::vector<int>> best_iter_;
/*! \brief Best score(s) for early stopping */
std::vector<std::vector<double>> best_score_;
/*! \brief output message of best iteration */
std::vector<std::vector<std::string>> best_msg_;
/*! \brief Trained models(trees) */
std::vector<std::unique_ptr<Tree>> models_;
/*! \brief Max feature index of training data*/
int max_feature_idx_;
#ifdef USE_CUDA
/*! \brief First order derivative of training data */
std::vector<score_t, CHAllocator<score_t>> gradients_;
/*! \brief Second order derivative of training data */
std::vector<score_t, CHAllocator<score_t>> hessians_;
#else
/*! \brief First order derivative of training data */
std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> gradients_;
/*! \brief Second order derivative of training data */
std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> hessians_;
#endif
/*! \brief Store the indices of in-bag data */
std::vector<data_size_t, Common::AlignmentAllocator<data_size_t, kAlignedSize>> bag_data_indices_;
/*! \brief Number of in-bag data */
data_size_t bag_data_cnt_;
/*! \brief Number of training data */
data_size_t num_data_;
/*! \brief Number of trees per iterations */
int num_tree_per_iteration_;
/*! \brief Number of class */
int num_class_;
/*! \brief Index of label column */
data_size_t label_idx_;
/*! \brief number of used model */
int num_iteration_for_pred_;
/*! \brief Start iteration of used model */
int start_iteration_for_pred_;
/*! \brief Shrinkage rate for one iteration */
double shrinkage_rate_;
/*! \brief Number of loaded initial models */
int num_init_iteration_;
/*! \brief Feature names */
std::vector<std::string> feature_names_;
std::vector<std::string> feature_infos_;
std::unique_ptr<Dataset> tmp_subset_;
bool is_use_subset_;
std::vector<bool> class_need_train_;
bool is_constant_hessian_;
std::unique_ptr<ObjectiveFunction> loaded_objective_;
bool average_output_;
bool need_re_bagging_;
bool balanced_bagging_;
std::string loaded_parameter_;
std::vector<int8_t> monotone_constraints_;
const int bagging_rand_block_ = 1024;
std::vector<Random> bagging_rands_;
ParallelPartitionRunner<data_size_t, false> bagging_runner_;
Json forced_splits_json_;
bool linear_tree_;
};
} // namespace LightGBM
#endif // LightGBM_BOOSTING_GBDT_H_
|
graphProcessing.h | /*
FINISH TEMPFLATPATH CODE
AS WRITTEN, THESE FUNCTIONS WILL ONLY WORK WITH GRAPHS THAT ARE IMPLEMENTED IN THE boost NAMESPACE.
*/
#define LP 1
#define PERFDEBUG 0
//#define FULLDEBUG 1
#ifdef _OPENMP
#include <omp.h>
#endif
#include <boost/regex.hpp>
#include <iostream>
#include <fstream>
#include <string>
#include <assert.h>
#include <staticCFG.h>
/**
*@file graphProcessing.h
*Brief Overview of Algorithm:
***********************
*Current Implementation
***********************
*This implementation uses BOOSTs graph structure to analyze the paths of the graph
*The path analyzer sends the user paths to be evaluated by the "analyzePath" function that is user defined
**************************
*Further Improvements: TODO
**************************
@todo utilize BOOST visitors to take advantage of the BOOST graph structures abilities
***************
*Contact Info
***************
*Finally, blame can be assigned to and questions can be forwarded to the author, though response is not guaranteed
*if I'm still at Lawrence
*hoffman34 AT llnl DOT gov
*@author Michael Hoffman
*/
#include <boost/graph/adjacency_list.hpp>
#include <boost/bind.hpp>
#include <boost/foreach.hpp>
#include <boost/tuple/tuple.hpp>
#include <boost/graph/graphviz.hpp>
#include <boost/graph/dominator_tree.hpp>
#include <boost/graph/reverse_graph.hpp>
#include <boost/graph/transpose_graph.hpp>
#include <boost/algorithm/string.hpp>
#include <vector>
#include <algorithm>
#include <utility>
#include <iostream>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/time.h>
template <class CFG>
class SgGraphTraversal
{
public:
typedef typename boost::graph_traits<CFG>::vertex_descriptor Vertex;
typedef typename boost::graph_traits<CFG>:: edge_descriptor Edge;
void constructPathAnalyzer(CFG* g, bool unbounded=false, Vertex end=0, Vertex begin=0, bool ns = true);
virtual void analyzePath(std::vector<Vertex>& pth) = 0;
std::vector<int> getInEdges(int& node, CFG*& g);
std::vector<int> getOutEdges(int& node, CFG*& g);
int getTarget(int& n, CFG*& g);
int getSource(int& n, CFG*& g);
std::map<Vertex, int> vertintmap;
std::map<Edge, int> edgeintmap;
std::map<int, Vertex> intvertmap;
std::map<int, Edge> intedgemap;
SgGraphTraversal();
virtual ~SgGraphTraversal();
SgGraphTraversal( SgGraphTraversal &);
SgGraphTraversal &operator=( SgGraphTraversal &);
int pathnum;
void firstPrepGraph(CFG*& g);
private:
int normals;
int abnormals;
bool needssafety;
int recursed;
int checkedfound;
// typedef typename boost::graph_traits<CFG>::vertex_descriptor Vertex;
// typedef typename boost::graph_traits<CFG>:: edge_descriptor Edge;
// std::vector<int> getInEdges(int& node, CFG*& g);
// std::vector<int> getOutEdges(int& node, CFG*& g);
void prepareGraph(CFG*& g);
void findClosuresAndMarkersAndEnumerate(CFG*& g);
// void constructPathAnalyzer(CFG* g, bool unbounded=false, Vertex end=0, Vertex begin=0, bool ns = true);
// virtual void analyzePath(std::vector<Vertex>& pth) = 0;
// void firstPrepGraph(CFG*& g);
int stoppedpaths;
std::set<std::vector<int> > traversePath(int begin, int end, CFG*& g, bool loop=false);
std::set<std::vector<int> > uTraversePath(int begin, int end, CFG*& g, bool loop, std::map<int, std::vector<std::vector<int> > >& localLoops);
std::vector<std::vector<int> > bfsTraversePath(int begin, int end, CFG*& g, bool loop=false);
std::vector<int> unzipPath(std::vector<int>& path, CFG*& g, int start, int end);
std::vector<int> zipPath(std::vector<int>& path, CFG*& g, int start, int end);
std::vector<int> zipPath2(std::vector<int>& path, CFG*& g);
void printCFGNode(int& cf, std::ofstream& o);
void printCFGNodeGeneric(int& cf, std::string prop, std::ofstream& o);
void printCFGEdge(int& cf, CFG*& cfg, std::ofstream& o);
void printHotness(CFG*& g);
void printPathDot(CFG*& g);
void computeOrder(CFG*& g, const int& begin);
void computeSubGraphs(const int& begin, const int &end, CFG*& g, int depthDifferential);
//int getTarget(int& n, CFG*& g);
//int getSource(int& n, CFG*& g);
std::vector<int> sources;
std::vector<int> sinks;
std::vector<int> recursiveLoops;
std::vector<int> recurses;
std::map<int, int> ptsNum;
bool borrowed;
std::set<int> badloop;
std::map<int, std::vector<std::vector<int> > > totalLoops;
// int pathnum;
std::map<int, std::string> nodeStrings;
int sourcenum;
unsigned long long evaledpaths;
int badpaths;
int workingthreadnum;
bool workingthread;
std::map<int, std::set<std::vector<int> > > loopStore;
std::vector<std::vector<int> > pathStore;
std::map<int, std::vector<int> > subpathglobal;
std::map<std::vector<int>, int> subpathglobalinv;
int nextsubpath;
std::vector<int> orderOfNodes;
// std::map<Vertex, int> vertintmap;
// std::map<Edge, int> edgeintmap;
// std::map<int, Vertex> intvertmap;
// std::map<int, Edge> intedgemap;
std::vector<std::map<Vertex, Vertex> > SubGraphGraphMap;
std::vector<std::map<Vertex, Vertex> > GraphSubGraphMap;
std::vector<CFG*> subGraphVector;
void getVertexPath(std::vector<int> path, CFG*& g, std::vector<Vertex>& vertexPath );
void storeCompact(std::vector<int> path);
int nextNode;
int nextEdge;
std::vector<int> markers;
std::vector<int> closures;
std::map<int, int> markerIndex;
std::map<int, std::vector<int> > pathsAtMarkers;
typedef typename boost::graph_traits<CFG>::vertex_iterator vertex_iterator;
typedef typename boost::graph_traits<CFG>::out_edge_iterator out_edge_iterator;
typedef typename boost::graph_traits<CFG>::in_edge_iterator in_edge_iterator;
typedef typename boost::graph_traits<CFG>::edge_iterator edge_iterator;
bool bound;
// SgGraphTraversal();
// virtual ~SgGraphTraversal();
// SgGraphTraversal( SgGraphTraversal &);
// SgGraphTraversal &operator=( SgGraphTraversal &);
};
template<class CFG>
SgGraphTraversal<CFG>::
SgGraphTraversal()
{
}
template<class CFG>
SgGraphTraversal<CFG> &
SgGraphTraversal<CFG>::
operator=( SgGraphTraversal &other)
{
return *this;
}
#ifndef SWIG
template<class CFG>
SgGraphTraversal<CFG>::
~SgGraphTraversal()
{
}
#endif
/**
Gets the source of an edge
SgGraphTraversal::getSource
Input:
@param[edge] int& integer representation of edge in question
@param[g] CFG*& the CFG used
*/
template<class CFG>
inline int
SgGraphTraversal<CFG>::
getSource(int& edge, CFG*& g)
{
Edge e = intedgemap[edge];
Vertex v = boost::source(e, *g);
return(vertintmap[v]);
}
/**
Gets the target of an edge
SgGraphTraversal::getTarget
Input:
@param[edge] int& integer representation of edge in quesution
@param[g] the CFG*& CFG used
*/
template<class CFG>
inline int
SgGraphTraversal<CFG>::
getTarget(int& edge, CFG*& g)
{
Edge e = intedgemap[edge];
Vertex v = boost::target(e, *g);
return(vertintmap[v]);
}
/**
Gets out edges with integer inputs, internal use only
SgGraphTraversal::getInEdges
Input:
@param[node] int, integer representation of the node to get the in edges from
@param[g] CFG* g, CFG
*/
template<class CFG>
std::vector<int>
SgGraphTraversal<CFG>::
getInEdges(int& node, CFG*& g)
{
Vertex getIns = intvertmap[node];
std::vector<int> inedges;
in_edge_iterator i, j;
for (boost::tie(i, j) = boost::in_edges(getIns, *g); i != j; ++i)
{
inedges.push_back(edgeintmap[*i]);
}
return inedges;
}
/**
Gets out edges with integer inputs, internal use only
SgGraphTraversal::getOutEdges
Input:
@param[node] int, integer representation of the node to get the out edges from
@param[g] CFG* g, CFG
*/
template<class CFG>
std::vector<int>
SgGraphTraversal<CFG>::
getOutEdges(int &node, CFG*& g)
{
Vertex getOuts = intvertmap[node];
std::vector<int> outedges;
out_edge_iterator i, j;
for (boost::tie(i, j) = boost::out_edges(getOuts, *g); i != j; ++i)
{
outedges.push_back(edgeintmap[*i]);
}
return outedges;
}
/**
Condenses paths, currently deprecated...
Input:
@param[pth] std::vector<int> the original path
@param[g] CFG*, the ambient graph
Output:
zipped path
*/
template<class CFG>
inline
std::vector<int>
SgGraphTraversal<CFG>::
zipPath2(std::vector<int>& pth, CFG*& g) {
std::vector<int> npth;
npth.push_back(pth[0]);
for (int i = 1; i < pth.size()-1; i++) {
if (find(closures.begin(), closures.end(), pth[i]) != closures.end()) {
npth.push_back(pth[i]);
}
}
npth.push_back(pth.back());
return npth;
}
/**
Condenses paths to simply the first and last node and the ordered set of edges
taken at nodes with more than 1 outedge
Input:
@param[pth] std::vector<int>, the original path
@param[g] CFG*, the ambient graph
@param[start] integer representation of the first node
@param[end] integer representation of the last node
*/
template<class CFG>
std::vector<int>
SgGraphTraversal<CFG>::
zipPath(std::vector<int>& pth, CFG*& g, int start, int end) {
std::vector<int> subpath;
std::vector<int> movepath;
movepath.push_back(pth.front());
movepath.push_back(pth.back());
for (unsigned int qw = 0; qw < pth.size()-1; qw++) {
if (find(markers.begin(), markers.end(), pth[qw]) != markers.end()) {
std::vector<int> oeds = getOutEdges(pth[qw], g);
for (unsigned int i = 0; i < oeds.size(); i++) {
if (getTarget(oeds[i], g) == pth[qw+1]) {
movepath.push_back(oeds[i]);
}
}
}
}
return movepath;
}
/**
unzips the paths zipped by zipPath
Input:
@param[pzipped] the zipped path
@param[CFG] the ambient graph
@param[start] the integer representation of the first node (used to check that zipPath is working correctly)
@param[end] the integer representation of the end node
*/
template<class CFG>
std::vector<int>
SgGraphTraversal<CFG>::
unzipPath(std::vector<int>& pzipped, CFG*& g, int start, int end) {
ROSE_ASSERT(pzipped[0] == start && (pzipped[1] == end || end == -1));
std::vector<int> zipped;
for (unsigned int i = 2; i < pzipped.size(); i++) {
zipped.push_back(pzipped[i]);
}
std::vector<int> unzipped;
unzipped.push_back(start);
std::vector<int> oeds = getOutEdges(start, g);
if (oeds.size() == 0) {
return unzipped;
}
for (unsigned int i = 0; i < zipped.size(); i++) {
oeds = getOutEdges(unzipped.back(), g);
while (oeds.size() == 1) {
if (getTarget(oeds[0], g) == end && unzipped.size() != 1) {
unzipped.push_back(end);
return unzipped;
}
unzipped.push_back(getTarget(oeds[0], g));
oeds = getOutEdges(unzipped.back(), g);
}
if (oeds.size() == 0) {
return unzipped;
}
if (oeds.size() > 1 && (unzipped.back() != end || (unzipped.size() == 1 && unzipped.back() == end))) {
ROSE_ASSERT(getSource(zipped[i], g) == unzipped.back());
unzipped.push_back(getTarget(zipped[i], g));
}
}
std::vector<int> oeds2 = getOutEdges(unzipped.back(), g);
if (unzipped.back() != end && oeds2.size() != 0) {
while (oeds2.size() == 1 && unzipped.back() != end) {
unzipped.push_back(getTarget(oeds2[0], g));
oeds2 = getOutEdges(unzipped.back(), g);
}
}
return unzipped;
}
/*
Example Time
Example:
timeval tim;
gettimeofday(&tim, NULL);
double t1=tim.tv_sec+(tim.tv_usec/1000000.0);
do_something_long();
gettimeofday(&tim, NULL);
double t2=tim.tv_sec+(tim.tv_usec/1000000.0);
printf("%.6lf seconds elapsed\n", t2-t1);
*/
/**
The function responsible for collecting all paths without loops, and all paths within lops that do not include other loops
then sending those to uTraverse to assemble them into all paths with any combination of loops
Input:
@param[begin] integer representation of the first node
@param[end] integer representation of the last node (or -1 if its not bounded)
@param[g] CFG*, the ambient CFG
@param[loop] boolean expressing whether or not we are calculating paths contained within a loop
*/
template<class CFG>
std::vector<std::vector<int> >
SgGraphTraversal<CFG>::
bfsTraversePath(int begin, int end, CFG*& g, bool loop) {
//perfdebug allows for examining the speed of traversal
#ifdef PERFDEBUG
//timeval tim;
//gettimeofday(&tim, NULL);
//double tim1 = tim.tv_sec+(tim.tv_usec/1000000.0);
#endif
bool recursedloop = loop;
std::map<int, std::vector<std::vector<int> > > PtP;
std::set<int> nodes;
std::vector<std::vector<int> > pathContainer;
//std::vector<std::vector<int> > oldPaths;
std::vector<int> completedLoops;
std::vector<std::vector<int> > npc;
std::vector<int> bgpath;
bgpath.push_back(begin);
pathContainer.push_back(bgpath);
std::vector<std::vector<int> > newPathContainer;
std::vector<std::vector<int> > paths;
std::vector<int> localLoops;
std::map<int, std::vector<std::vector<int> > > globalLoopPaths;
//std::cout << "at the while" << std::endl;
//To keep
while (pathContainer.size() != 0 /*|| oldPaths.size() != 0*/) {
/*
unsigned int mpc = 50000;
if (pathContainer.size() == 0) {
unsigned int mxl = 0;
if (oldPaths.size() > mpc) {
mxl = mpc/2;
}
else {
mxl = oldPaths.size();
}
for (unsigned int k = 0; k < mxl; k++) {
pathContainer.push_back(oldPaths.back());
oldPaths.pop_back();
}
}
if (pathContainer.size() > mpc) {
unsigned int j = 0;
while (j < mpc) {
npc.push_back(pathContainer.back());
pathContainer.pop_back();
j++;
}
oldPaths.insert(oldPaths.end(), pathContainer.begin(), pathContainer.end());
pathContainer = npc;
npc.clear();
}
*/
//iterating through the currently discovered subpaths to build them up
for (unsigned int i = 0; i < pathContainer.size(); i++) {
std::vector<int> npth = pathContainer[i];
std::vector<int> oeds = getOutEdges(npth.back(), g);
std::vector<int> ieds = getInEdges(npth.back(), g);
npth = pathContainer[i];
oeds = getOutEdges(npth.back(), g);
if ((!recursedloop && ((bound && npth.back() == end && npth.size() != 1) || (!bound && oeds.size() == 0))) || (recursedloop && npth.back() == end && npth.size() != 1)) {
std::vector<int> newpth;
newpth = (pathContainer[i]);
std::vector<int> movepath = newpth;//zipPath(newpth, g);
if (recursedloop && newpth.back() == end && newpth.size() != 1) {
paths.push_back(movepath);
}
else if (!recursedloop) {
if (bound && newpth.size() != 1 && newpth.back() == end) {
paths.push_back(movepath);
}
else if (!bound) {
paths.push_back(movepath);
}
}
}
else {
std::vector<int> oeds = getOutEdges(pathContainer[i].back(), g);
for (unsigned int j = 0; j < oeds.size(); j++) {
int tg = getTarget(oeds[j], g);
std::vector<int> newpath = (pathContainer[i]);
//we split up paths into pieces so that they don't take up a lot of memory, basically this is when we run into a path
//more than once, so we attach all paths that go to that path to that particular node via PtP
if (nodes.find(tg) != nodes.end() && find(newpath.begin(), newpath.end(), tg) == newpath.end() && tg != end) {
if (PtP.find(tg) == PtP.end()) {
std::vector<int> nv;
nv.push_back(tg);
newPathContainer.push_back(nv);
PtP[tg].push_back(/*zipPath(*(*/newpath);//, g, newpath.front(), newpath.back()));
}
else {
PtP[tg].push_back(/*zipPath(*/newpath);//, g, newpath.front(), newpath.back()));
}
}
else if (find(newpath.begin(), newpath.end(), getTarget(oeds[j], g)) == newpath.end() || getTarget(oeds[j], g) == end) {
newpath.push_back(tg);
std::vector<int> ieds = getInEdges(tg, g);
if (ieds.size() > 1) {//find(closures.begin(), closures.end(), tg) != closures.end()) {
nodes.insert(tg);
}
newPathContainer.push_back(newpath);
}
else if (tg == end && recursedloop) {
newpath.push_back(tg);
newPathContainer.push_back(newpath);
}
else {//if (find(newpath.begin(), newpath.end(), tg) != newpath.end() && tg != end) {
std::vector<int> ieds = getInEdges(tg, g);
if (ieds.size() > 1/*find(closures.begin(), closures.end(), tg) != closures.end()*/ && find(completedLoops.begin(), completedLoops.end(), tg) == completedLoops.end() /*&& find(localLoops.begin(), localLoops.end(), tg) == localLoops.end()*/ && find(recurses.begin(), recurses.end(), tg) == recurses.end()) {
localLoops.push_back(tg);
nodes.insert(tg);
}
// else if (find(recurses.begin(), recurses.end(), tg) != recurses.end()) {
// }
}
//else {
// std::cout << "problem" << std::endl;
// ROSE_ASSERT(false);
// }
}
}
}
pathContainer = newPathContainer;
newPathContainer.clear();
}
// std::cout << "done while" << std::endl;
pathContainer.clear();
std::vector<std::vector<int> > finnpts;
std::vector<std::vector<int> > npts;
while (true) {
if (paths.size() > 1000000) {
std::cout << "too many paths, consider a subgraph" << std::endl;
ROSE_ASSERT(false);
}
//#pragma omp parallel for schedule(guided)
for (unsigned int qq = 0; qq < paths.size(); qq++) {
std::vector<int> pq = paths[qq];
std::vector<int> qp;
int ppf = paths[qq].front();
if (PtP.find(ppf) != PtP.end()) {
for (unsigned int kk = 0; kk < PtP[ppf].size(); kk++) {
std::vector<int> newpath = /*unzipPath(*/PtP[ppf][kk];//, g, PtP[ppf][kk][0], PtP[ppf][kk][1]);
bool good = true;
if (newpath.back() == newpath.front() && newpath.front() != begin && newpath.size() > 1) {
good = false;
}
else {
// if (find(pq.begin(), pq.end(), newpath.front()) != pq.end() && newpath.front() != begin) {
// good = false;
// }
// else {
for (unsigned int kk1 = 0; kk1 < newpath.size(); kk1++) {
/*
if (newpath.front() == newpath.back()) {
good = false;
break;
}
else */if (find(pq.begin(), pq.end(), newpath[kk1]) != pq.end() && newpath[kk1] != begin) {
good = false;
break;
}
}
//}
}
if (good) {
newpath.insert(newpath.end(), pq.begin(), pq.end());
#pragma omp critical
{
npts.push_back(newpath);
}
}
}
}
else {
std::vector<int> ppq = pq;// zipPath(pq, g, pq.front(), pq.back());
#pragma omp critical
{
finnpts.push_back(ppq);
}
}
}
if (npts.size() == 0) {
break;
}
else {
paths = npts;
npts.clear();
}
}
paths = finnpts;
finnpts.clear();
for (unsigned int k = 0; k < localLoops.size(); k++) {
int lk = localLoops[k];
std::vector<std::vector<int> > loopp;
if (loopStore.find(localLoops[k]) != loopStore.end()) {
loopp.insert(loopp.end(), loopStore[localLoops[k]].begin(), loopStore[localLoops[k]].end());
}
else {
std::map<int, std::vector<std::vector<int> > > localLoopPaths;
completedLoops.push_back(lk);
recurses.push_back(lk);
loopp = bfsTraversePath(lk, lk, g, true);
recurses.pop_back();
}
for (unsigned int ik = 0; ik < loopp.size(); ik++) {
if (find(globalLoopPaths[lk].begin(), globalLoopPaths[lk].end(), loopp[ik]) == globalLoopPaths[lk].end()) {
globalLoopPaths[localLoops[k]].push_back(loopp[ik]);
}
}
}
borrowed = true;
std::vector<std::vector<int> > lps2;
unsigned int maxpaths = 1000;
unsigned int pathdivisor = 1;//paths.size()/maxpaths;///paths.size();
//if (pathdivisor < 1) {
pathdivisor = 1;
maxpaths = paths.size();
// }
/*
for (unsigned int j = 0; j < pathdivisor+1; j++) {
std::vector<std::vector<int> > npaths;
std::vector<int> dummyvec;
unsigned int mxpths;
if (j < pathdivisor) {
mxpths = maxpaths;
}
else {
mxpths = paths.size() % pathdivisor;
}
for (unsigned int k = 0; k < mxpths; k++) {
npaths.push_back(paths.back());//unzipPath(paths.back(), g, begin, end));
paths.pop_back();
}
*/
pathStore = paths;
paths.clear();
if (!recursedloop) {
uTraversePath(begin, end, g, false, globalLoopPaths);
}
else {
recursed++;
std::set<std::vector<int> > lps = uTraversePath(begin, end, g, true, globalLoopPaths);
recursed--;
for (std::set<std::vector<int> >::iterator ij = lps.begin(); ij != lps.end(); ij++) {
std::vector<int> ijk = (*ij);
lps2.push_back(*ij);
}
}
//}
#ifdef PERFDEBUG
// timeval tim;
//std::cout << "begin: " << begin << " end: " << end << std::endl;
//gettimeofday(&tim, NULL);
//double tim2 = tim.tv_sec+(tim.tv_usec/1000000);
//double timeRet = tim2 - tim1;
//std::cout << "bfs time elapsed: " << timeRet << std::endl;
#endif
return lps2;
}
/**
This function calculates all the permutations of loops on paths
it also throws away duplicate paths
Input:
@param[begin] integer representation of first node
@param[end] integer representation of the final node
@param[g] ambient CFG
@param[globalLoopPaths] connects an integer representation of a node to all possible loops starting at that node
*/
template<class CFG>
std::set<std::vector<int> >
SgGraphTraversal<CFG>::
uTraversePath(int begin, int end, CFG*& g, bool loop, std::map<int, std::vector<std::vector<int> > >& globalLoopPaths) {
//std::cout << "uTraverse" << std::endl;
//int doubledpaths = 0;
int newmil = 1;
//#ifdef LP
//if (loop && loopStore.find(begin) != loopStore.end()) {
// return loopStore[begin];
//}
//#endif
#ifdef PERFDEBUG
//timeval tim;
//gettimeofday(&tim, NULL);
//double t1 = tim.tv_sec+(tim.tv_usec/1000000);
#endif
std::set<std::vector<int> > newpaths;
std::set<std::vector<int> > npaths;
pathnum = 0;
std::vector<int> path;
std::vector<std::vector<int> > paths;
int truepaths = 0;
std::vector<std::vector<int> > checkpaths;
std::vector<std::vector<int> > npathchecker;
std::map<int, int> currents;
//int nnumpaths = 0;
std::set<std::vector<int> > loopPaths;
//bool threadsafe = true;
bool done = false;
std::set<std::vector<int> > fts;
//double ttfors = 0;
//double tperms = 0;
while (true) {
//std::cout << "paths.size() " << paths.size() << std::endl;
if (paths.size() > 1000000) {
std::cout << "nearly 1 million paths with no loops, stopping" << std::endl;
return loopPaths;
std::cout << "ended early" << std::endl;
}
if (done || borrowed) {
if (borrowed) {
paths = pathStore;
pathStore.clear();
}
//std::cout << "paths.size(): " << paths.size() << std::endl;
if (paths.size() != 0) {
}
else {
return loopPaths;
}
// #pragma omp parallel
// {
#pragma omp parallel for schedule(guided)
for (unsigned int qqq = 0; qqq < paths.size(); qqq++) {
// std::cout << "pathcheck" << std::endl;
//int pathevals = 0;
//std::vector<int> zpt = zipPath2(paths[qqq], g);
//std::set<std::vector<int> > boxpaths;
std::set<std::vector<int> > movepaths;
std::vector<int> path;// = paths[qqq];
path = paths[qqq];//unzipPath(paths[qqq], g, begin, end);
truepaths++;
int permnums = 1;
std::vector<int> perms;
std::vector<unsigned int> qs;
std::map<int, std::vector<std::vector<int> > > localLoops;
std::vector<int> takenLoops;
takenLoops.push_back(path[0]);
bool taken = false;
//timeval timfor;
int lost = 0;
//gettimeofday(&timfor, NULL);
//double t1for = timfor.tv_sec + (timfor.tv_usec/1000000);
for (unsigned int q = 1; q < path.size()-1; q++) {
//if (find(closures.begin(), closures.end(), path[q]) != closures.end()) {
if (globalLoopPaths.find(path[q]) != globalLoopPaths.end() /*&& find(lloops.begin(), lloops.end(), path[q]) != lloops.end()*/ && globalLoopPaths[path[q]].size() != 0 /*&& path[q] != begin && path[q] != end*/) {
for (unsigned int qp1 = 0; qp1 < globalLoopPaths[path[q]].size(); qp1++) {
std::vector<int> gp = globalLoopPaths[path[q]][qp1]; //unzipPath(globalLoopPaths[path[q]][qp1],g,path[q],path[q]);
// std::vector<int> zgp = zipPath2(globalLoopPaths[zpt[q]][qp1], g);
for (unsigned int qp2 = 0; qp2 < takenLoops.size(); qp2++) {
if (find(gp.begin(),gp.end(), takenLoops[qp2]) != gp.end()) {
taken = true;
}
}
if (!taken) {
localLoops[path[q]].push_back(gp);
}
else {
lost++;
taken = false;
}
}
if (localLoops[path[q]].size() != 0) {
takenLoops.push_back(path[q]);
permnums *= (localLoops[path[q]].size()+1);
perms.push_back(permnums);
qs.push_back(path[q]);
}
}
}
//}
//if (loop) {
//std::cout << "lostloop: " << lost << std::endl;
//}
//else {
//std::cout << "lostpath: " << lost << std::endl;
//}
//std::cout << "endpathcheck" << std::endl;
//std::cout << "rest" << std::endl;
//std::cout << "permnums: " << permnums << std::endl;
//gettimeofday(&timfor, NULL);
//double t2for = timfor.tv_sec + (timfor.tv_usec/1000000);
//double ttfor = t2for - t1for;
//#pragma omp atomic
//ttfors += ttfor;
//std::set<std::vector<int> > movepaths2;
std::set<std::vector<int> > movepathscheck;
//timeval timperms;
//gettimeofday(&timperms, NULL);
// double t1perm = timperms.tv_sec + (timperms.tv_usec/1000000);
std::vector<int> nvec;
std::vector<std::vector<int> > boxpaths(permnums, nvec);
//#pragma omp parallel for schedule(guided)
for (int i = 1; i <= permnums; i++) {
//bool goodthread = false;
std::vector<int> loopsTaken;
//bool stop = false;
unsigned int j = 0;
std::vector<int> npath;
while (true) {
if (j == perms.size() || perms[j] > i) {
break;
}
else {
j++;
}
}
int pn = i;
std::vector<int> pL;
for (unsigned int j1 = 0; j1 <= j; j1++) {
pL.push_back(-1);
}
for (unsigned int k = j; k > 0; k--) {
int l = 1;
while (perms[k-1]*l < pn) {
l++;
}
pL[k] = l-2;
pn -= (perms[k-1]*(l-1));
}
pL[0] = pn-2;
unsigned int q2 = 0;
for (unsigned int q1 = 0; q1 < path.size(); q1++) {
if (q2 < qs.size()) {
if (qs.size() != 0 && (unsigned)path[q1] == qs[q2] && (size_t)q2 != pL.size()) {
if (pL[q2] == -1) {
npath.push_back(path[q1]);
}
else {
// if (!stop) {
npath.insert(npath.end(), localLoops[path[q1]][pL[q2]].begin(),
localLoops[path[q1]][pL[q2]].end());
// }
}
q2++;
}
else {
npath.push_back(path[q1]);
}
}
else {
npath.push_back(path[q1]);
}
}
#ifdef FULLDEBUG
std::cout << "path: " << std::endl;
for (int qe = 0; qe < npath.size(); qe++) {
std::cout << ", " << npath[qe];
}
std::cout << std::endl;
std::cout << "permnum: " << i << std::endl;
#endif
// bool addit = false;
//if (!stop) {
// if (loop && npath.front() == npath.back()) {
// addit = true;
// }
// else if (!loop && bound && npath.front() == begin && npath.back() == end && npath.size() != 1) {
// addit = true;
// }
// else if (!loop && !bound) {
// addit = true;
// }
// if (!addit) {
// std::cout << "bad path" << std::endl;
// }
//bool extra = false;
//if (addit && !loop) {
//if (movepathscheck.find(npath) == movepathscheck.end()) {
//int mpc = movepathscheck.size();
//std::set<std::vector<int> > movepathspre = movepathscheck;
// movepaths2.insert(npath);
//movepathscheck.insert(npath);
//ROSE_ASSERT(movepathscheck.size() == mpc || movepathspre.find(npath) == movepathspre.end());
//if (movepathscheck.size() == mpc) {
// extra = true;
// }
//}
//else {
//#pragma omp atomic
// doubledpaths++;
// }
//}
//if (!workingthread || threadsafe) {
//if ((newpaths.size() > 1 || i == permnums || threadsafe)) {
// }
// }
// }
//if (!extra)
// {
//if (movepaths2.size() > 0) //|| i == permnums || threadsafe)
// #pragma omp critical
// {
boxpaths[i-1] = npath;
// }
// }
//std::cout << "endrest" << std::endl;
}
evaledpaths += boxpaths.size();
if (evaledpaths > newmil*100000ull) {
//std::cout << "evaledpaths: " << evaledpaths << std::endl;
newmil++;
}
// #pragma omp critical
// {
if (!loop) {
for (std::vector<std::vector<int> >::iterator box = boxpaths.begin(); box != boxpaths.end(); box++) {
std::vector<Vertex> verts;
getVertexPath((*box), g, verts);
#pragma omp critical
{
analyzePath(verts);
}
}
}
else {
#pragma omp critical
{
loopPaths.insert(boxpaths.begin(), boxpaths.end());;
}
}
}
}
//}
/*
#pragma omp atomic
evaledpaths++;
//pathevals++;
if (evaledpaths % 10000 == 0 && evaledpaths != 0) {
std::cout << "evaled paths: " << evaledpaths << std::endl;
}
if (!loop) {
std::vector<Vertex> verts;
getVertexPath(npath, g, verts);
#pragma omp critical
{
#ifdef FULLDEBUG
for (unsigned int aa = 0; aa < npath.size(); aa++) {
if (ptsNum.find(npath[aa]) != ptsNum.end()) {
ptsNum[npath[aa]] += 1;
}
else {
ptsNum[npath[aa]] = 1;
}
}
#endif
analyzePath(verts);
}
}
else if (loop)
{
//std::vector<int> zpth = zipPath(npath, g, npath.front(), npath.back());
#pragma omp critical
{
loopPaths.insert(npath);//zipPath(npath, g, npath.front(), npath.back()));
}
}
else {
}
}
*/
// movepaths2.clear();
// std::cout << "permnums: " << permnums << std::endl;
// std::cout << "evaledpaths final: " << pathevals << std::endl;
//gettimeofday(&timperms, NULL);
//double t2perm = timperms.tv_sec+(timperms.tv_usec/1000000);
//#pragma omp atomic
//tperms += t2perm - t1perm;
// }
//}
//}
//}
#ifdef PERFDEBUG
//gettimeofday(&tim, NULL);
// double t2 = tim.tv_sec+(tim.tv_usec/1000000.0);
// double tperm = t2 - t1perm
//double tX = t2 - t1;
//std::cout << "begin: " << begin << " end: " << end << std::endl;
// std::cout << "uTraverse time: " << tX << std::endl;
// std::cout << "tperms: " << tperms << std::endl;
// std::cout << "ttfors: " << ttfors << std::endl;
// std::cout << "doubledpaths: " << doubledpaths << std::endl;
#endif
#ifdef LP
if (loop) {
#ifdef PERFDEBUG
// std::cout << "loopPaths: " << loopPaths.size() << std::endl;
#endif
loopStore[begin] = loopPaths;
}
#endif
return loopPaths;
}
}
/**
This is the function that is used by the user directly to start the algorithm. It is immediately available to the user
SgGraphTraversal::constructPathAnalyzer
Input:
@param[begin] Vertex, starting node
@param[end] Vertex, endnode
@param[g] CFG* g, CFG calculated previously
*/
template<class CFG>
void
SgGraphTraversal<CFG>::
constructPathAnalyzer(CFG* g, bool unbounded, Vertex begin, Vertex end, bool ns) {
abnormals = 0;
normals = 0;
if (ns) {
needssafety = true;
}
else {
needssafety = false;
}
checkedfound = 0;
recursed = 0;
nextsubpath = 0;
borrowed = true;
stoppedpaths = 0;
evaledpaths = 0;
badpaths = 0;
sourcenum = 0;
prepareGraph(g);
workingthread = false;
workingthreadnum = -1;
//std::cout << "markers: " << markers.size() << std::endl;
//std::cout << "closures: " << closures.size() << std::endl;
//std::cout << "sources: " << sources.size() << std::endl;
//std::cout << "sinks" << sinks.size() << std::endl;
// printHotness(g);
bool subgraph = false;
if (!subgraph) {
if (!unbounded) {
bound = true;
recursiveLoops.clear();
recurses.clear();
std::vector<std::vector<int> > spaths = bfsTraversePath(vertintmap[begin], vertintmap[end], g);
// std::cout << "spaths: " << spaths.size() << std::endl;
}
else {
std::set<int> usedsources;
bound = false;
std::vector<int> localLps;
for (unsigned int j = 0; j < sources.size(); j++) {
sourcenum = sources[j];
recursiveLoops.clear();
recurses.clear();
std::vector<std::vector<int> > spaths = bfsTraversePath(sources[j], -1, g);
}
}
}
//std::cout << "checkedfound: " << checkedfound << std::endl;
printHotness(g);
}
/** DEPRECATED
This is a function to construct subgraphs for parallelization
SgGraphTraversal::computeSubGraphs
Input:
@param[begin] const int, starting point
@param[end] const int ending point
@param[g] const CFG*, control flow graph to compute
@param[depthDifferential] int, used to specify how large the subgraph should be
*/
template<class CFG>
void
SgGraphTraversal<CFG>::
computeSubGraphs(const int& begin, const int &end, CFG*& g, int depthDifferential) {
int minDepth = 0;
int maxDepth = minDepth + depthDifferential;
int currSubGraph = 0;
CFG* subGraph;
std::set<int> foundNodes;
while (true) {
Vertex begin = boost::add_vertex(*subGraphVector[currSubGraph]);
GraphSubGraphMap[currSubGraph][intvertmap[orderOfNodes[minDepth]]] = intvertmap[begin];
SubGraphGraphMap[currSubGraph][intvertmap[begin]] = intvertmap[orderOfNodes[minDepth]];
for (int i = minDepth; i <= maxDepth; i++) {
Vertex v = GraphSubGraphMap[currSubGraph][intvertmap[orderOfNodes[i]]];
std::vector<int> outEdges = getOutEdges(orderOfNodes[i], g);
for (unsigned int j = 0; j < outEdges.size(); j++) {
Vertex u;
if (foundNodes.find(getTarget(outEdges[j], g)) == foundNodes.end()) {
u = GraphSubGraphMap[currSubGraph][intvertmap[getTarget(outEdges[j], g)]];
}
else {
u = boost::add_vertex(*subGraphVector[currSubGraph]);
foundNodes.insert(getTarget(outEdges[j], g));
SubGraphGraphMap[currSubGraph][u] = intvertmap[getTarget(outEdges[j], g)];
GraphSubGraphMap[currSubGraph][intvertmap[getTarget(outEdges[j], g)]] = u;
}
Edge edge;
bool ok;
boost::tie(edge, ok) = boost::add_edge(v,u,*subGraphVector[currSubGraph]);
}
}
minDepth = maxDepth;
if ((unsigned int) minDepth == orderOfNodes.size()-1) {
break;
}
maxDepth += depthDifferential;
if ((unsigned int) maxDepth > orderOfNodes.size()-1)
{
maxDepth = orderOfNodes.size()-1;
}
CFG* newSubGraph;
subGraphVector.push_back(newSubGraph);
currSubGraph++;
}
return;
}
/*
These should NOT be used by the user. They are simply for writing interesting information on the DOT graphs of the CFG
*/
template<class CFG>
void
SgGraphTraversal<CFG>::
printCFGNodeGeneric(int &cf, std::string prop, std::ofstream& o) {
std::string nodeColor = "black";
o << cf << " [label=\"" << " num:" << cf << " prop: " << prop << "\", color=\"" << nodeColor << "\", style=\"" << "solid" << "\"];\n";
}
template<class CFG>
void
SgGraphTraversal<CFG>::
printCFGNode(int& cf, std::ofstream& o)
{
#ifdef FULLDEBUG
int pts = ptsNum[cf];
std::string nodeColor = "black";
o << cf << " [label=\"" << " pts: " << pts << "\", color=\"" << nodeColor << "\", style=\"" << "solid" << "\"];\n";
#endif
#ifndef FULLDEBUG
std::string nodeColor = "black";
o << cf << " [label=\"" << " num:" << cf << "\", color=\"" << nodeColor << "\", style=\"" << "solid" << "\"];\n";
#endif
}
template<class CFG>
void
SgGraphTraversal<CFG>::
printCFGEdge(int& cf, CFG*& cfg, std::ofstream& o)
{
int src = getSource(cf, cfg);
int tar = getTarget(cf, cfg);
o << src << " -> " << tar << " [label=\"" << src << " " << tar << "\", style=\"" << "solid" << "\"];\n";
}
template<class CFG>
void
SgGraphTraversal<CFG>::
printHotness(CFG*& g)
{
const CFG* gc = g;
int currhot = 0;
std::ofstream mf;
std::stringstream filenam;
filenam << "hotness" << currhot << ".dot";
currhot++;
std::string fn = filenam.str();
mf.open(fn.c_str());
mf << "digraph defaultName { \n";
vertex_iterator v, vend;
edge_iterator e, eend;
for (boost::tie(v, vend) = vertices(*gc); v != vend; ++v)
{
printCFGNode(vertintmap[*v], mf);
}
for (tie(e, eend) = edges(*gc); e != eend; ++e)
{
printCFGEdge(edgeintmap[*e], g, mf);
}
mf.close();
}
template<class CFG>
void
SgGraphTraversal<CFG>::
printPathDot(CFG*& g)
{
const CFG* gc = g;
std::ofstream mf;
std::stringstream filenam;
filenam << "pathnums.dot";
std::string fn = filenam.str();
mf.open(fn.c_str());
mf << "digraph defaultName { \n";
vertex_iterator v, vend;
edge_iterator e, eend;
for (tie(v, vend) = vertices(*gc); v != vend; ++v)
{
if (nodeStrings.find(vertintmap[*v]) != nodeStrings.end()) {
int nn = vertintmap[*v];
printCFGNodeGeneric(vertintmap[*v], nodeStrings[nn], mf);
}
else {
printCFGNodeGeneric(vertintmap[*v], "noprop", mf);
}
}
for (tie(e, eend) = edges(*gc); e != eend; ++e)
{
printCFGEdge(edgeintmap[*e], g, mf);
}
mf.close();
}
/**
This is the function that preps the graph for traversal
SgGraphTraversal::prepareGraph
Input:
@param[g] CFG*& g, CFG calculated previously
*/
template<class CFG>
void
SgGraphTraversal<CFG>::
prepareGraph(CFG*& g) {
nextNode = 1;
nextEdge = 1;
findClosuresAndMarkersAndEnumerate(g);
}
/**
DEPRECATED
This is the function that preps the graph for traversal, currently this one isn't used but for many traversals on one visitor
may necessitate
SgGraphTraversal::firstPrepGraph
Input:
@param[g] CFG*& g, CFG calculated previously
*/
template<class CFG>
void
SgGraphTraversal<CFG>::
firstPrepGraph(CFG*& g) {
nextNode = 1;
nextEdge = 1;
findClosuresAndMarkersAndEnumerate(g);
}
/**
This calculates nodes with more than one in edge or more than one out edge
SgGraphTraversal::findClosuresAndMarkers
Input:
@param[g] CFG*& g, CFG calculated previously
*/
template<class CFG>
void
SgGraphTraversal<CFG>::
findClosuresAndMarkersAndEnumerate(CFG*& g)
{
edge_iterator e, eend;
for (tie(e, eend) = edges(*g); e != eend; ++e) {
intedgemap[nextEdge] = *e;
edgeintmap[*e] = nextEdge;
nextEdge++;
}
vertex_iterator v1, vend1;
for (boost::tie(v1, vend1) = vertices(*g); v1 != vend1; ++v1)
{
vertintmap[*v1] = nextNode;
intvertmap[nextNode] = *v1;
nextNode++;
}
vertex_iterator v, vend;
for (boost::tie(v, vend) = vertices(*g); v != vend; ++v) {
std::vector<int> outs = getOutEdges(vertintmap[*v], g);
std::vector<int> ins = getInEdges(vertintmap[*v], g);
if (outs.size() > 1)
{
markers.push_back(vertintmap[*v]);
markerIndex[vertintmap[*v]] = markers.size()-1;
for (unsigned int i = 0; i < outs.size(); i++) {
pathsAtMarkers[vertintmap[*v]].push_back(getTarget(outs[i], g));
}
}
if (ins.size() > 1)
{
closures.push_back(vertintmap[*v]);
}
if (outs.size() == 0) {
sinks.push_back(vertintmap[*v]);
}
if (ins.size() == 0) {
sources.push_back(vertintmap[*v]);
}
}
return;
}
/** DEPRECATED
Currently unused but will be necessary for parallelization in progress
SgGraphTraversal::computeOrder
@param[g] CFG* cfg in question
@parm[begin] const int, integer representation of source node
*/
template<class CFG>
void
SgGraphTraversal<CFG>::
computeOrder(CFG*& g, const int& begin) {
std::vector<int> currentNodes;
std::vector<int> newCurrentNodes;
currentNodes.push_back(begin);
std::map<int, int> reverseCurrents;
orderOfNodes.push_back(begin);
std::set<int> heldBackNodes;
while (currentNodes.size() != 0) {
for (unsigned int j = 0; j < currentNodes.size(); j++) {
std::vector<int> inEdges = getInEdges(currentNodes[j], g);
if (inEdges.size() > 1) {
if (reverseCurrents.find(currentNodes[j]) == reverseCurrents.end()) {
reverseCurrents[currentNodes[j]] = 0;
}
if ((unsigned int) reverseCurrents[currentNodes[j]] == inEdges.size() - 1) {
heldBackNodes.erase(currentNodes[j]);
reverseCurrents[currentNodes[j]]++;
std::vector<int> outEdges = getOutEdges(currentNodes[j], g);
for (unsigned int k = 0; k < outEdges.size(); k++) {
newCurrentNodes.push_back(getTarget(outEdges[k], g));
orderOfNodes.push_back(getTarget(outEdges[k], g));
}
}
else if (reverseCurrents[currentNodes[j]] < reverseCurrents.size()) {
reverseCurrents[currentNodes[j]]++;
if (heldBackNodes.find(currentNodes[j]) == heldBackNodes.end()) {
heldBackNodes.insert(currentNodes[j]);
}
}
}
else {
std::vector<int> outEdges = getOutEdges(currentNodes[j], g);
for (unsigned int k = 0; k < outEdges.size(); k++) {
newCurrentNodes.push_back(getTarget(outEdges[k], g));
orderOfNodes.push_back(getTarget(outEdges[k], g));
}
}
}
if (newCurrentNodes.size() == 0 && heldBackNodes.size() != 0) {
for (std::set<int>::iterator q = heldBackNodes.begin(); q != heldBackNodes.end(); q++) {
int qint = *q;
std::vector<int> heldBackOutEdges = getOutEdges(qint, g);
for (unsigned int p = 0; p < heldBackOutEdges.size(); p++) {
newCurrentNodes.push_back(getTarget(heldBackOutEdges[p], g));
}
}
heldBackNodes.clear();
}
currentNodes = newCurrentNodes;
newCurrentNodes.clear();
}
return;
}
/**
Converts the path calculated by this algorithm to Vertices so users can
access data
SgGraphTraversal::getVertexPath
@param[path] integer representation of path
@param[g] CFG*, cfg in question
@param[vertexPath] for some reason this can't be a return value so it is changed via pass by reference
*/
template<class CFG>
void
SgGraphTraversal<CFG>::
getVertexPath(std::vector<int> path, CFG*& g, std::vector<Vertex>& vertexPath) {
for (unsigned int i = 0; i < path.size(); i++) {
vertexPath.push_back(intvertmap[path[i]]);
}
}
/**
DEPRECATED
Currently unused, may eventually be modified for optimal storage purposes
SgGraphTraversal::storeCompact
@param[compactPath] path to be compactified
*/
template<class CFG>
void
SgGraphTraversal<CFG>::
storeCompact(std::vector<int> compactPath) {
return;
}
|
GB_unaryop__minv_uint8_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint8_int16
// op(A') function: GB_tran__minv_uint8_int16
// C type: uint8_t
// A type: int16_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 8)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 8) ;
// casting
#define GB_CASTING(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT8 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint8_int16
(
uint8_t *Cx, // Cx and Ax may be aliased
int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint8_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hpgmg-fv.c | /*******************************************************************************
Copyright (c) 2016 Advanced Micro Devices, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
//------------------------------------------------------------------------------------------------------------------------------
// Copyright Notice
//------------------------------------------------------------------------------------------------------------------------------
// HPGMG, Copyright (c) 2014, The Regents of the University of
// California, through Lawrence Berkeley National Laboratory (subject to
// receipt of any required approvals from the U.S. Dept. of Energy). All
// rights reserved.
//
// If you have questions about your rights to use or distribute this
// software, please contact Berkeley Lab's Technology Transfer Department
// at TTD@lbl.gov.
//
// NOTICE. This software is owned by the U.S. Department of Energy. As
// such, the U.S. Government has been granted for itself and others
// acting on its behalf a paid-up, nonexclusive, irrevocable, worldwide
// license in the Software to reproduce, prepare derivative works, and
// perform publicly and display publicly. Beginning five (5) years after
// the date permission to assert copyright is obtained from the U.S.
// Department of Energy, and subject to any subsequent five (5) year
// renewals, the U.S. Government is granted for itself and others acting
// on its behalf a paid-up, nonexclusive, irrevocable, worldwide license
// in the Software to reproduce, prepare derivative works, distribute
// copies to the public, perform publicly and display publicly, and to
// permit others to do so.
//------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
//------------------------------------------------------------------------------------------------------------------------------
#ifdef USE_MPI
#include <mpi.h>
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
//------------------------------------------------------------------------------------------------------------------------------
#include "timers.h"
#include "defines.h"
#include "level.h"
#include "mg.h"
#include "operators.h"
#include "solvers.h"
//------------------------------------------------------------------------------------------------------------------------------
void bench_hpgmg(mg_type *all_grids, int onLevel, double a, double b, double dtol, double rtol){
int doTiming;
int minSolves = 10; // do at least minSolves MGSolves
double timePerSolve = 0;
for(doTiming=0;doTiming<=1;doTiming++){ // first pass warms up, second pass times
#ifdef USE_HPM // IBM performance counters for BGQ...
if( (doTiming==1) && (onLevel==0) )HPM_Start("FMGSolve()");
#endif
#ifdef USE_MPI
double minTime = 60.0; // minimum time in seconds that the benchmark should run
double startTime = MPI_Wtime();
if(doTiming==1){
if((minTime/timePerSolve)>minSolves)minSolves=(minTime/timePerSolve); // if one needs to do more than minSolves to run for minTime, change minSolves
}
#endif
if(all_grids->levels[onLevel]->my_rank==0){
if(doTiming==0){fprintf(stdout,"\n\n===== Warming up by running %d solves ==========================================\n",minSolves);}
else{fprintf(stdout,"\n\n===== Running %d solves ========================================================\n",minSolves);}
fflush(stdout);
}
int numSolves = 0; // solves completed
MGResetTimers(all_grids);
while( (numSolves<minSolves) ){
zero_vector(all_grids->levels[onLevel],VECTOR_U);
#ifdef USE_FCYCLES
FMGSolve(all_grids,onLevel,VECTOR_U,VECTOR_F,a,b,dtol,rtol);
#else
MGSolve(all_grids,onLevel,VECTOR_U,VECTOR_F,a,b,dtol,rtol);
#endif
numSolves++;
}
#ifdef USE_MPI
if(doTiming==0){
double endTime = MPI_Wtime();
timePerSolve = (endTime-startTime)/numSolves;
MPI_Bcast(&timePerSolve,1,MPI_DOUBLE,0,MPI_COMM_WORLD); // after warmup, process 0 broadcasts the average time per solve (consensus)
}
#endif
#ifdef USE_HPM // IBM performance counters for BGQ...
if( (doTiming==1) && (onLevel==0) )HPM_Stop("FMGSolve()");
#endif
}
}
//------------------------------------------------------------------------------------------------------------------------------
int main(int argc, char **argv){
int my_rank=0;
int num_tasks=1;
int OMP_Threads = 1;
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#ifdef _OPENMP
#pragma omp parallel
{
#pragma omp master
{
OMP_Threads = omp_get_num_threads();
}
}
#endif
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// initialize MPI and HPM
#ifdef USE_MPI
int actual_threading_model = -1;
int requested_threading_model = -1;
requested_threading_model = MPI_THREAD_SINGLE;
//requested_threading_model = MPI_THREAD_FUNNELED;
//requested_threading_model = MPI_THREAD_SERIALIZED;
//requested_threading_model = MPI_THREAD_MULTIPLE;
#ifdef _OPENMP
requested_threading_model = MPI_THREAD_FUNNELED;
//requested_threading_model = MPI_THREAD_SERIALIZED;
//requested_threading_model = MPI_THREAD_MULTIPLE;
#endif
MPI_Init_thread(&argc, &argv, requested_threading_model, &actual_threading_model);
MPI_Comm_size(MPI_COMM_WORLD, &num_tasks);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
#ifdef USE_HPM // IBM HPM counters for BGQ...
HPM_Init();
#endif
#endif // USE_MPI
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// parse the arguments...
int log2_box_dim = 6; // 64^3
int target_boxes_per_rank = 1;
//int64_t target_memory_per_rank = -1; // not specified
int64_t box_dim = -1;
int64_t boxes_in_i = -1;
int64_t target_boxes = -1;
if(argc==3){
log2_box_dim=atoi(argv[1]);
target_boxes_per_rank=atoi(argv[2]);
if(log2_box_dim>9){
// NOTE, in order to use 32b int's for array indexing, box volumes must be less than 2^31 doubles
if(my_rank==0){fprintf(stderr,"log2_box_dim must be less than 10\n");}
#ifdef USE_MPI
MPI_Finalize();
#endif
exit(0);
}
if(log2_box_dim<4){
if(my_rank==0){fprintf(stderr,"log2_box_dim must be at least 4\n");}
#ifdef USE_MPI
MPI_Finalize();
#endif
exit(0);
}
if(target_boxes_per_rank<1){
if(my_rank==0){fprintf(stderr,"target_boxes_per_rank must be at least 1\n");}
#ifdef USE_MPI
MPI_Finalize();
#endif
exit(0);
}
#ifndef MAX_COARSE_DIM
#define MAX_COARSE_DIM 11
#endif
box_dim=1<<log2_box_dim;
target_boxes = (int64_t)target_boxes_per_rank*(int64_t)num_tasks;
boxes_in_i = -1;
int64_t bi;
for(bi=1;bi<1000;bi++){ // search all possible problem sizes to find acceptable boxes_in_i
int64_t total_boxes = bi*bi*bi;
if(total_boxes<=target_boxes){
int64_t coarse_grid_dim = box_dim*bi;
while( (coarse_grid_dim%2) == 0){coarse_grid_dim=coarse_grid_dim/2;}
if(coarse_grid_dim<=MAX_COARSE_DIM){
boxes_in_i = bi;
}
}
}
if(boxes_in_i<1){
if(my_rank==0){fprintf(stderr,"failed to find an acceptable problem size\n");}
#ifdef USE_MPI
MPI_Finalize();
#endif
exit(0);
}
} // argc==3
#if 0
else if(argc==2){ // interpret argv[1] as target_memory_per_rank
char *ptr = argv[1];
char *tmp;
target_memory_per_rank = strtol(ptr,&ptr,10);
if(target_memory_per_rank<1){
if(my_rank==0){fprintf(stderr,"unrecognized target_memory_per_rank... '%s'\n",argv[1]);}
#ifdef USE_MPI
MPI_Finalize();
#endif
exit(0);
}
tmp=strstr(ptr,"TB");if(tmp){ptr=tmp+2;target_memory_per_rank *= (uint64_t)(1<<30)*(1<<10);}
tmp=strstr(ptr,"GB");if(tmp){ptr=tmp+2;target_memory_per_rank *= (uint64_t)(1<<30);}
tmp=strstr(ptr,"MB");if(tmp){ptr=tmp+2;target_memory_per_rank *= (uint64_t)(1<<20);}
tmp=strstr(ptr,"tb");if(tmp){ptr=tmp+2;target_memory_per_rank *= (uint64_t)(1<<30)*(1<<10);}
tmp=strstr(ptr,"gb");if(tmp){ptr=tmp+2;target_memory_per_rank *= (uint64_t)(1<<30);}
tmp=strstr(ptr,"mb");if(tmp){ptr=tmp+2;target_memory_per_rank *= (uint64_t)(1<<20);}
if( (ptr) && (*ptr != '\0') ){
if(my_rank==0){fprintf(stderr,"unrecognized units... '%s'\n",ptr);}
#ifdef USE_MPI
MPI_Finalize();
#endif
exit(0);
}
// FIX, now search for an 'acceptable' box_dim and boxes_in_i constrained by target_memory_per_rank, num_tasks, and MAX_COARSE_DIM
} // argc==2
#endif
else{
if(my_rank==0){fprintf(stderr,"usage: ./hpgmg-fv [log2_box_dim] [target_boxes_per_rank]\n");}
//fprintf(stderr," ./hpgmg-fv [target_memory_per_rank[MB,GB,TB]]\n");}
#ifdef USE_MPI
MPI_Finalize();
#endif
exit(0);
}
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if(my_rank==0){
fprintf(stdout,"\n\n");
fprintf(stdout,"********************************************************************************\n");
fprintf(stdout,"*** HPGMG-FV Benchmark ***\n");
fprintf(stdout,"********************************************************************************\n");
#ifdef USE_MPI
if(requested_threading_model == MPI_THREAD_MULTIPLE )fprintf(stdout,"Requested MPI_THREAD_MULTIPLE, ");
else if(requested_threading_model == MPI_THREAD_SINGLE )fprintf(stdout,"Requested MPI_THREAD_SINGLE, ");
else if(requested_threading_model == MPI_THREAD_FUNNELED )fprintf(stdout,"Requested MPI_THREAD_FUNNELED, ");
else if(requested_threading_model == MPI_THREAD_SERIALIZED)fprintf(stdout,"Requested MPI_THREAD_SERIALIZED, ");
else if(requested_threading_model == MPI_THREAD_MULTIPLE )fprintf(stdout,"Requested MPI_THREAD_MULTIPLE, ");
else fprintf(stdout,"Requested Unknown MPI Threading Model (%d), ",requested_threading_model);
if(actual_threading_model == MPI_THREAD_MULTIPLE )fprintf(stdout,"got MPI_THREAD_MULTIPLE\n");
else if(actual_threading_model == MPI_THREAD_SINGLE )fprintf(stdout,"got MPI_THREAD_SINGLE\n");
else if(actual_threading_model == MPI_THREAD_FUNNELED )fprintf(stdout,"got MPI_THREAD_FUNNELED\n");
else if(actual_threading_model == MPI_THREAD_SERIALIZED)fprintf(stdout,"got MPI_THREAD_SERIALIZED\n");
else if(actual_threading_model == MPI_THREAD_MULTIPLE )fprintf(stdout,"got MPI_THREAD_MULTIPLE\n");
else fprintf(stdout,"got Unknown MPI Threading Model (%d)\n",actual_threading_model);
#endif
fprintf(stdout,"%d MPI Tasks of %d threads\n",num_tasks,OMP_Threads);
fprintf(stdout,"\n\n===== Benchmark setup ==========================================================\n");
}
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// create the fine level...
#ifdef USE_PERIODIC_BC
int bc = BC_PERIODIC;
int minCoarseDim = 2; // avoid problems with black box calculation of D^{-1} for poisson with periodic BC's on a 1^3 grid
#else
int bc = BC_DIRICHLET;
int minCoarseDim = 1; // assumes you can drop order on the boundaries
#endif
level_type level_h;
int ghosts=stencil_get_radius();
create_level(&level_h,boxes_in_i,box_dim,ghosts,VECTORS_RESERVED,bc,my_rank,num_tasks);
#ifdef USE_HELMHOLTZ
double a=1.0;double b=1.0; // Helmholtz
if(my_rank==0)fprintf(stdout," Creating Helmholtz (a=%f, b=%f) test problem\n",a,b);
#else
double a=0.0;double b=1.0; // Poisson
if(my_rank==0)fprintf(stdout," Creating Poisson (a=%f, b=%f) test problem\n",a,b);
#endif
double h=1.0/( (double)boxes_in_i*(double)box_dim ); // [0,1]^3 problem
initialize_problem(&level_h,h,a,b); // initialize VECTOR_ALPHA, VECTOR_BETA*, and VECTOR_F
rebuild_operator(&level_h,NULL,a,b); // calculate Dinv and lambda_max
if(level_h.boundary_condition.type == BC_PERIODIC){ // remove any constants from the RHS for periodic problems
double average_value_of_f = mean(&level_h,VECTOR_F);
if(average_value_of_f!=0.0){
if(my_rank==0){fprintf(stderr," WARNING... Periodic boundary conditions, but f does not sum to zero... mean(f)=%e\n",average_value_of_f);}
shift_vector(&level_h,VECTOR_F,VECTOR_F,-average_value_of_f);
}
}
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// create the MG hierarchy...
mg_type MG_h;
MGBuild(&MG_h,&level_h,a,b,minCoarseDim); // build the Multigrid Hierarchy
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// HPGMG-500 benchmark proper
// evaluate performance on problem sizes of h, 2h, and 4h
// (i.e. examine dynamic range for problem sizes N, N/8, and N/64)
//double dtol=1e-15;double rtol= 0.0; // converged if ||D^{-1}(b-Ax)|| < dtol
double dtol= 0.0;double rtol=1e-10; // converged if ||b-Ax|| / ||b|| < rtol
int l;
#ifndef TEST_ERROR
double AverageSolveTime[3];
for(l=0;l<3;l++){
if(l>0)restriction(MG_h.levels[l],VECTOR_F,MG_h.levels[l-1],VECTOR_F,RESTRICT_CELL);
bench_hpgmg(&MG_h,l,a,b,dtol,rtol);
AverageSolveTime[l] = (double)MG_h.timers.MGSolve / (double)MG_h.MGSolves_performed;
if(my_rank==0){fprintf(stdout,"\n\n===== Timing Breakdown =========================================================\n");}
MGPrintTiming(&MG_h,l);
}
if(my_rank==0){
#ifdef CALIBRATE_TIMER
double _timeStart=getTime();sleep(1);double _timeEnd=getTime();
double SecondsPerCycle = (double)1.0/(double)(_timeEnd-_timeStart);
#else
double SecondsPerCycle = 1.0;
#endif
fprintf(stdout,"\n\n===== Performance Summary ======================================================\n");
for(l=0;l<3;l++){
double DOF = (double)MG_h.levels[l]->dim.i*(double)MG_h.levels[l]->dim.j*(double)MG_h.levels[l]->dim.k;
double seconds = SecondsPerCycle*(double)AverageSolveTime[l];
double DOFs = DOF / seconds;
fprintf(stdout," h=%0.15e DOF=%0.15e time=%0.6f DOF/s=%0.3e MPI=%d OMP=%d\n",MG_h.levels[l]->h,DOF,seconds,DOFs,num_tasks,OMP_Threads);
}
}
#endif
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if(my_rank==0){fprintf(stdout,"\n\n===== Richardson error analysis ================================================\n");}
// solve A^h u^h = f^h
// solve A^2h u^2h = f^2h
// solve A^4h u^4h = f^4h
// error analysis...
MGResetTimers(&MG_h);
for(l=0;l<3;l++){
if(l>0)restriction(MG_h.levels[l],VECTOR_F,MG_h.levels[l-1],VECTOR_F,RESTRICT_CELL);
zero_vector(MG_h.levels[l],VECTOR_U);
#ifdef USE_FCYCLES
FMGSolve(&MG_h,l,VECTOR_U,VECTOR_F,a,b,dtol,rtol);
#else
MGSolve(&MG_h,l,VECTOR_U,VECTOR_F,a,b,dtol,rtol);
#endif
}
richardson_error(&MG_h,0,VECTOR_U);
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if(my_rank==0){fprintf(stdout,"\n\n===== Deallocating memory ======================================================\n");}
MGDestroy(&MG_h);
destroy_level(&level_h);
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if(my_rank==0){fprintf(stdout,"\n\n===== Done =====================================================================\n");}
#ifdef USE_MPI
#ifdef USE_HPM // IBM performance counters for BGQ...
HPM_Print();
#endif
MPI_Finalize();
#endif
return(0);
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
}
|
colormap.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO L OOO RRRR M M AAA PPPP %
% C O O L O O R R MM MM A A P P %
% C O O L O O RRRR M M M AAAAA PPPP %
% C O O L O O R R M M A A P %
% CCCC OOO LLLLL OOO R R M M A A P %
% %
% %
% MagickCore Colormap Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% We use linked-lists because splay-trees do not currently support duplicate
% key / value pairs (.e.g X11 green compliance and SVG green compliance).
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/cache-view.h"
#include "magick/cache.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/client.h"
#include "magick/configure.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/semaphore.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/xml-tree.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageColormap() allocates an image colormap and initializes
% it to a linear gray colorspace. If the image already has a colormap,
% it is replaced. AcquireImageColormap() returns MagickTrue if successful,
% otherwise MagickFalse if there is not enough memory.
%
% The format of the AcquireImageColormap method is:
%
% MagickBooleanType AcquireImageColormap(Image *image,const size_t colors)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colors: the number of colors in the image colormap.
%
*/
static inline size_t MagickMax(const size_t x,
const size_t y)
{
if (x > y)
return(x);
return(y);
}
MagickExport MagickBooleanType AcquireImageColormap(Image *image,
const size_t colors)
{
register ssize_t
i;
/*
Allocate image colormap.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->colors=MagickMax(colors,2);
if (image->colormap == (PixelPacket *) NULL)
image->colormap=(PixelPacket *) AcquireQuantumMemory(image->colors,
sizeof(*image->colormap));
else
image->colormap=(PixelPacket *) ResizeQuantumMemory(image->colormap,
image->colors,sizeof(*image->colormap));
if (image->colormap == (PixelPacket *) NULL)
{
image->colors=0;
image->storage_class=DirectClass;
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
for (i=0; i < (ssize_t) image->colors; i++)
{
size_t
pixel;
pixel=(size_t) (i*(QuantumRange/(image->colors-1)));
image->colormap[i].red=(Quantum) pixel;
image->colormap[i].green=(Quantum) pixel;
image->colormap[i].blue=(Quantum) pixel;
image->colormap[i].opacity=OpaqueOpacity;
}
return(SetImageStorageClass(image,PseudoClass));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C y c l e C o l o r m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CycleColormap() displaces an image's colormap by a given number of
% positions. If you cycle the colormap a number of times you can produce
% a psychodelic effect.
%
% The format of the CycleColormapImage method is:
%
% MagickBooleanType CycleColormapImage(Image *image,const ssize_t displace)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o displace: displace the colormap this amount.
%
*/
MagickExport MagickBooleanType CycleColormapImage(Image *image,
const ssize_t displace)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == DirectClass)
(void) SetImageType(image,PaletteType);
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
ssize_t
index;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) (GetPixelIndex(indexes+x)+displace) %
image->colors;
if (index < 0)
index+=(ssize_t) image->colors;
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S o r t C o l o r m a p B y I n t e n s i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SortColormapByIntensity() sorts the colormap of a PseudoClass image by
% decreasing color intensity.
%
% The format of the SortColormapByIntensity method is:
%
% MagickBooleanType SortColormapByIntensity(Image *image)
%
% A description of each parameter follows:
%
% o image: A pointer to an Image structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const PixelPacket
*color_1,
*color_2;
int
intensity;
color_1=(const PixelPacket *) x;
color_2=(const PixelPacket *) y;
intensity=PixelPacketIntensity(color_2)-(int) PixelPacketIntensity(color_1);
return(intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
MagickExport MagickBooleanType SortColormapByIntensity(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
y;
unsigned short
*pixels;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (image->storage_class != PseudoClass)
return(MagickTrue);
/*
Allocate memory for pixel indexes.
*/
pixels=(unsigned short *) AcquireQuantumMemory((size_t) image->colors,
sizeof(*pixels));
if (pixels == (unsigned short *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Assign index values to colormap entries.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].opacity=(IndexPacket) i;
/*
Sort image colormap by decreasing color popularity.
*/
qsort((void *) image->colormap,(size_t) image->colors,
sizeof(*image->colormap),IntensityCompare);
/*
Update image colormap indexes to sorted colormap order.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
pixels[(ssize_t) image->colormap[i].opacity]=(unsigned short) i;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
index;
register ssize_t
x;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(IndexPacket) pixels[(ssize_t) GetPixelIndex(indexes+x)];
SetPixelIndex(indexes+x,index);
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (status == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
pixels=(unsigned short *) RelinquishMagickMemory(pixels);
return(status);
}
|
fib_omp.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int fib(int n)
{
int x,y;
if (n<2) return n;
//if n too small, sequence is too short and task creation dominates
if (n<20) return fib(n-1)+fib(n-2);
#pragma omp task default(none) shared(x,n)
x= fib(n-1);
#pragma omp task default(none) shared(y,n)
y= fib(n-2);
#pragma omp taskwait
return x+y;
}
int main (int argc, char *argv[])
{
int N;
int res;
double t_start,t_end;
N=44;
t_start=omp_get_wtime();
#pragma omp parallel default(none) shared(res,N)
{
#pragma omp single
res=fib(N);
}
t_end=omp_get_wtime();
printf("res=%d\n",res);
printf("Took %g s\n",t_end - t_start);
return EXIT_SUCCESS;
}
|
GB_binop__lt_bool.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lt_bool)
// A.*B function (eWiseMult): GB (_AemultB_08__lt_bool)
// A.*B function (eWiseMult): GB (_AemultB_02__lt_bool)
// A.*B function (eWiseMult): GB (_AemultB_04__lt_bool)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_bool)
// A*D function (colscale): GB (_AxD__lt_bool)
// D*A function (rowscale): GB (_DxB__lt_bool)
// C+=B function (dense accum): GB (_Cdense_accumB__lt_bool)
// C+=b function (dense accum): GB (_Cdense_accumb__lt_bool)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_bool)
// C=scalar+B GB (_bind1st__lt_bool)
// C=scalar+B' GB (_bind1st_tran__lt_bool)
// C=A+scalar GB (_bind2nd__lt_bool)
// C=A'+scalar GB (_bind2nd_tran__lt_bool)
// C type: bool
// A type: bool
// B,b type: bool
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
bool aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
bool bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_BOOL || GxB_NO_LT_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lt_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lt_bool)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lt_bool)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lt_bool)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lt_bool)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lt_bool)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lt_bool)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lt_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lt_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lt_bool)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lt_bool)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
bool bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lt_bool)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
bool aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__lt_bool)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__lt_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
regularisation.h | /* Incremental diffusion regularisation of parametrised transformation
using (globally optimal) belief-propagation on minimum spanning tree.
Fast distance transform uses squared differences.
Similarity cost for each node and label has to be given as input.
*/
void messageDT(int ind, float *data, short *indout, int len1, float offsetx, float offsety, float offsetz)
{
// int ind1=get_global_id(0)+start;
// int ind=ordered[ind1];
int len2 = len1 * len1;
int len3 = len1 * len1 * len1;
float *z = new float[len1 * 2 + 1];
float *val;
float *valout;
short *indo;
float *valb;
float *valb2;
float *buffer = new float[len3];
float *buffer2 = new float[len3];
int *indb;
int *indb2;
int *bufferi = new int[len3];
int *bufferi2 = new int[len3];
for (int i = 0; i < len1 * 2 + 1; i++)
{
z[i] = (i - len1 + offsety) * (i - len1 + offsety);
}
for (int k1 = 0; k1 < len1; k1++)
{
for (int j1 = 0; j1 < len1; j1++)
{
// valb=buffer2+(j1*len1+k1*len1*len1);//
val = data + ind * len3 + (j1 * len1 + k1 * len1 * len1);
valb2 = buffer + (j1 * len1 + k1 * len1 * len1);
indb = bufferi + (j1 * len1 + k1 * len1 * len1);
int num = (j1 * len1 + k1 * len1 * len1);
for (int i = 0; i < len1; i++)
{
float minval = val[0] + z[i + len1];
int minind = 0;
for (int j = 0; j < len1; j++)
{
bool b = (val[j] + z[i - j + len1] < minval);
minval = b ? val[j] + z[i - j + len1] : minval;
minind = b ? j : minind;
}
valb2[i] = minval;
indb[i] = minind + num;
}
}
}
for (int i = 0; i < len1 * 2; i++)
{
z[i] = (i - len1 + offsetx) * (i - len1 + offsetx);
}
for (int k1 = 0; k1 < len1; k1++)
{
for (int i1 = 0; i1 < len1; i1++)
{
valb = buffer + (i1 + k1 * len1 * len1);
valb2 = buffer2 + (i1 + k1 * len1 * len1);
indb = bufferi + (i1 + k1 * len1 * len1);
indb2 = bufferi2 + (i1 + k1 * len1 * len1);
for (int i = 0; i < len1; i++)
{
float minval = valb[0] + z[i + len1];
int minind = 0;
for (int j = 0; j < len1; j++)
{
bool b = (valb[j * len1] + z[i - j + len1] < minval);
minval = b ? valb[j * len1] + z[i - j + len1] : minval;
minind = b ? j : minind;
}
valb2[i * len1] = minval;
indb2[i * len1] = indb[minind * len1];
}
}
}
for (int i = 0; i < len1 * 2; i++)
{
z[i] = (i - len1 + offsetz) * (i - len1 + offsetz);
}
for (int j1 = 0; j1 < len1; j1++)
{
for (int i1 = 0; i1 < len1; i1++)
{
valb = buffer2 + (i1 + j1 * len1);
// valb2=buffer+(i1+j1*len1);
valout = data + ind * len3 + (i1 + j1 * len1);
indb = bufferi2 + (i1 + j1 * len1);
// indb2=bufferi+(i1+j1*len1);
indo = indout + ind * len3 + (i1 + j1 * len1);
for (int i = 0; i < len1; i++)
{
float minval = valb[0] + z[i + len1];
int minind = 0;
for (int j = 0; j < len1; j++)
{
bool b = (valb[j * len2] + z[i - j + len1] < minval);
minval = b ? valb[j * len2] + z[i - j + len1] : minval;
minind = b ? j : minind;
}
valout[i * len2] = minval;
indo[i * len2] = indb[minind * len2];
}
}
}
delete z;
delete buffer;
delete buffer2;
delete bufferi;
delete bufferi2;
}
void regularisationCL(float *costall, float *u0, float *v0, float *w0, float *u1, float *v1, float *w1, int hw, int step1, float quant, int *ordered, int *parents, float *edgemst)
{
int m2 = image_m;
int n2 = image_n;
int o2 = image_o;
int m = m2 / step1;
int n = n2 / step1;
int o = o2 / step1;
int sz = m * n * o;
int len = hw * 2 + 1;
int len1 = len;
int len2 = len * len * len;
int len3 = len * len * len;
auto time1 = chrono::steady_clock::now();
short *allinds = new short[sz * len2];
float *cost1 = new float[len2];
float *vals = new float[len2];
int *inds = new int[len2];
// calculate level boundaries for parallel implementation
int *levels = new int[sz];
for (int i = 0; i < sz; i++)
{
levels[i] = 0;
}
for (int i = 1; i < sz; i++)
{
int ochild = ordered[i];
int oparent = parents[ordered[i]];
levels[ochild] = levels[oparent] + 1;
}
int maxlev = 1 + *max_element(levels, levels + sz);
int *numlev = new int[maxlev];
int *startlev = new int[maxlev];
for (int i = 0; i < maxlev; i++)
{
numlev[i] = 0;
}
for (int i = 0; i < sz; i++)
{
numlev[levels[i]]++;
}
startlev[0] = numlev[0];
for (int i = 1; i < maxlev; i++)
{ // cumulative sum
startlev[i] = startlev[i - 1] + numlev[i];
}
delete levels;
int xs1, ys1, zs1, xx, yy, zz, xx2, yy2, zz2;
for (int i = 0; i < len2; i++)
{
cost1[i] = 0;
}
// MAIN LOOP - TO BE PARALLELISED
int frac = (int)(sz / 25);
int counti = 0;
int counti2 = 0;
bool *processed = new bool[sz];
for (int i = 0; i < sz; i++)
{
processed[i] = false;
}
int dblcount = 0;
float timeCopy = 0;
float timeMessage = 0;
// calculate mst-cost
for (int lev = maxlev - 1; lev > 0; lev--)
{
int start = startlev[lev - 1];
int length = numlev[lev];
time1 = chrono::steady_clock::now();
for (int i = start; i < start + length; i++)
{
int ochild = ordered[i];
for (int l = 0; l < len2; l++)
{
costall[ochild * len2 + l] *= edgemst[ochild];
}
}
#pragma omp parallel for
for (int i = start; i < start + length; i++)
{
int ochild = ordered[i];
int oparent = parents[ordered[i]];
float offsetx = (u0[oparent] - u0[ochild]) / (float)quant;
float offsety = (v0[oparent] - v0[ochild]) / (float)quant;
float offsetz = (w0[oparent] - w0[ochild]) / (float)quant;
messageDT(ochild, costall, allinds, len1, offsetx, offsety, offsetz);
}
auto time2 = chrono::steady_clock::now();
timeMessage += chrono::duration_cast<chrono::duration<float>>(time2 - time1).count();
time1 = chrono::steady_clock::now();
// copy necessary if vectorisation is used (otherwise multiple simultaneous +='s)
int start0 = startlev[lev - 1];
int length0 = numlev[lev];
for (int i = start0; i < start0 + length0; i++)
{
int ochild = ordered[i];
int oparent = parents[ordered[i]];
float minval = *min_element(costall + ochild * len2, costall + ochild * len2 + len3);
for (int l = 0; l < len2; l++)
{
costall[oparent * len2 + l] += (costall[ochild * len2 + l] - minval); /// edgemst[ochild];//transp
// edgemst[ochild]*
}
}
time2 = chrono::steady_clock::now();
timeCopy += chrono::duration_cast<chrono::duration<float>>(time2 - time1).count();
}
// dense displacement space
float *xs = new float[len * len * len];
float *ys = new float[len * len * len];
float *zs = new float[len * len * len];
for (int i = 0; i < len; i++)
{
for (int j = 0; j < len; j++)
{
for (int k = 0; k < len; k++)
{
xs[i + j * len + k * len * len] = (j - hw) * quant;
ys[i + j * len + k * len * len] = (i - hw) * quant;
zs[i + j * len + k * len * len] = (k - hw) * quant;
}
}
}
int *selected = new int[sz];
// mst-cost & select displacement for root note
int i = 0;
int oroot = ordered[i];
for (int l = 0; l < len2; l++)
{
cost1[l] = costall[oroot * len2 + l]; // transp
}
float value = cost1[0];
int index = 0;
for (int l = 0; l < len2; l++)
{
if (cost1[l] < value)
{
value = cost1[l];
index = l;
}
allinds[oroot * len2 + l] = l; // transp
}
selected[oroot] = index;
u1[oroot] = xs[index] + u0[oroot];
v1[oroot] = ys[index] + v0[oroot];
w1[oroot] = zs[index] + w0[oroot];
// select displacements and add to previous deformation field
for (int i = 1; i < sz; i++)
{
int ochild = ordered[i];
int oparent = parents[ordered[i]];
// select from argmin of based on parent selection
// index=allinds[ochild+selected[oparent]*sz];
index = allinds[ochild * len2 + selected[oparent]]; // transp
selected[ochild] = index;
u1[ochild] = xs[index] + u0[ochild];
v1[ochild] = ys[index] + v0[ochild];
w1[ochild] = zs[index] + w0[ochild];
}
// cout<<"Deformation field calculated!\n";
delete cost1;
delete vals;
delete inds;
delete allinds;
delete selected;
}
|
quicksort.h | /*
* CrissCross
* A multi-purpose cross-platform library.
*
* A product of Uplink Laboratories.
*
* (c) 2006-2021 Steven Noonan.
* Licensed under the New BSD License.
*
*/
#ifndef __included_cc_quicksort_h
#define __included_cc_quicksort_h
#include <crisscross/cc_attr.h>
#include <crisscross/sort.h>
#include <crisscross/system.h>
#include <crisscross/heapsort.h>
namespace CrissCross
{
namespace Data
{
/*! \brief A QuickSort implementation for sorting arrays. */
template <class T>
class QuickSort : public Sorter<T>
{
private:
HeapSort<T> hs;
void InternalSort(T *_array, size_t _left, size_t _right)
{
int i, j, p, p1, p2, p3;
if (_right > _left) {
if (_right - _left < 10)
hs.Sort(&_array[_left], _right - _left + 1);
else {
i = _left - 1;
j = _right;
/* Pivot zufaellig waehlen */
p1 = _left + (CrissCross::System::RandomNumber() % (_right - _left + 1));
p2 = _left + (CrissCross::System::RandomNumber() % (_right - _left + 1));
p3 = _left + (CrissCross::System::RandomNumber() % (_right - _left + 1));
if (Compare(_array[p1], _array[p2]) < 0)
if (Compare(_array[p2], _array[p3]) < 0) {
p = p2;
} else {
if (Compare(_array[p1], _array[p3]) < 0)
p = p3;
else
p = p1;
}
else
if (Compare(_array[p2], _array[p3]) > 0) {
p = p2;
} else {
if (Compare(_array[p1], _array[p3]) > 0)
p = p3;
else
p = p1;
}
Sorter<T>::Swap(_array, p, _right);
for (;;) {
do i++;while (Compare(_array[i], _array[_right]) < 0);
do j--;while (Compare(_array[j], _array[_right]) > 0);
if (i >= j) break;
Sorter<T>::Swap(_array, i, j);
}
Sorter<T>::Swap(_array, i, _right);
#ifdef _OPENMP
#pragma omp parallel sections
#endif
{
#ifdef _OPENMP
#pragma omp section
#endif
InternalSort(_array, i + 1, _right);
#ifdef _OPENMP
#pragma omp section
#endif
InternalSort(_array, _left, i - 1);
}
}
}
}
public:
/*! \brief Sorts an array using the QuickSort method. */
/*!
* \param _array The array to sort.
* \param _size The size of the array to sort.
* \return 0 on success.
* \sa HeapSort ShellSort CombSort
*/
int Sort(T *_array, size_t _size)
{
for (size_t i = _size - 1; i > 0; i--) {
if (Compare(_array[i - 1], _array[i]) > 0)
Sorter<T>::Swap(_array, i - 1, i);
}
InternalSort(_array, 1, _size - 1);
return 0;
}
};
}
}
#endif
|
convolution_sgemm_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_int8_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
// permute
Mat tmp;
#if __ARM_NEON
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
if (inch >= 8)
{
if (size >= 16)
tmp.create(16 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 16 + (size % 16) / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else if (size >= 8)
tmp.create(8 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size, 8u, 8, opt.workspace_allocator);
}
else if (inch >= 4)
{
if (size >= 16)
tmp.create(16 * maxk, inch / 4 + inch % 4, size / 16 + (size % 16) / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else if (size >= 8)
tmp.create(8 * maxk, inch / 4 + inch % 4, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch / 4 + inch % 4, size / 4 + (size % 4) / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator);
}
else
{
if (size >= 16)
tmp.create(16 * maxk, inch, size / 16 + (size % 16) / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 1, opt.workspace_allocator);
}
#else // __ARM_FEATURE_DOTPROD
if (inch >= 8)
{
if (size >= 4)
tmp.create(4 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size, 8u, 8, opt.workspace_allocator);
}
else if (inch >= 4)
{
if (size >= 4)
tmp.create(4 * maxk, inch / 4 + inch % 4, size / 4 + (size % 4) / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator);
}
else
{
if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator);
}
#endif // __ARM_FEATURE_DOTPROD
#else // __aarch64__
if (inch >= 8)
{
if (size >= 2)
tmp.create(2 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch / 8 + (inch % 8) / 4 + inch % 4, size, 8u, 8, opt.workspace_allocator);
}
else if (inch >= 4)
{
if (size >= 2)
tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator);
}
else
{
if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator);
}
#endif // __aarch64__
{
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
int nn_size = size >> 4;
int remain_size_start = 0;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 16;
signed char* tmpptr = tmp.channel(i / 16);
int q = 0;
for (; q + 7 < inch; q += 8)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i;
const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i;
const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i;
const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i;
for (int k = 0; k < maxk; k++)
{
asm volatile(
"ld1 {v0.16b}, [%0] \n"
"ld1 {v1.16b}, [%1] \n"
"ld1 {v2.16b}, [%2] \n"
"ld1 {v3.16b}, [%3] \n"
"ld1 {v4.16b}, [%4] \n"
"ld1 {v5.16b}, [%5] \n"
"ld1 {v6.16b}, [%6] \n"
"ld1 {v7.16b}, [%7] \n"
"st4 {v0.16b, v1.16b, v2.16b, v3.16b}, [%8], #64 \n"
"st4 {v4.16b, v5.16b, v6.16b, v7.16b}, [%8], #64 \n"
: "=r"(img0), // %0
"=r"(img1),
"=r"(img2),
"=r"(img3),
"=r"(img4),
"=r"(img5),
"=r"(img6),
"=r"(img7),
"=r"(tmpptr) // %8
: "0"(img0),
"1"(img1),
"2"(img2),
"3"(img3),
"4"(img4),
"5"(img5),
"6"(img6),
"7"(img7),
"8"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7");
img0 += size;
img1 += size;
img2 += size;
img3 += size;
img4 += size;
img5 += size;
img6 += size;
img7 += size;
}
}
for (; q + 3 < inch; q += 4)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
for (int k = 0; k < maxk; k++)
{
asm volatile(
"ld1 {v0.16b}, [%0] \n"
"ld1 {v1.16b}, [%1] \n"
"ld1 {v2.16b}, [%2] \n"
"ld1 {v3.16b}, [%3] \n"
"st4 {v0.16b, v1.16b, v2.16b, v3.16b}, [%4], #64 \n"
: "=r"(img0), // %0
"=r"(img1),
"=r"(img2),
"=r"(img3),
"=r"(tmpptr) // %4
: "0"(img0),
"1"(img1),
"2"(img2),
"3"(img3),
"4"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
img0 += size;
img1 += size;
img2 += size;
img3 += size;
}
}
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.16b}, [%0] \n"
"st1 {v0.16b}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0");
img0 += size;
}
}
}
remain_size_start += nn_size << 4;
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8);
int q = 0;
for (; q + 7 < inch; q += 8)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i;
const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i;
const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i;
const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i;
for (int k = 0; k < maxk; k++)
{
asm volatile(
"ld1 {v0.8b}, [%0] \n"
"ld1 {v1.8b}, [%1] \n"
"ld1 {v2.8b}, [%2] \n"
"ld1 {v3.8b}, [%3] \n"
"ld1 {v4.8b}, [%4] \n"
"ld1 {v5.8b}, [%5] \n"
"ld1 {v6.8b}, [%6] \n"
"ld1 {v7.8b}, [%7] \n"
"st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [%8], #32 \n"
"st4 {v4.8b, v5.8b, v6.8b, v7.8b}, [%8], #32 \n"
: "=r"(img0), // %0
"=r"(img1),
"=r"(img2),
"=r"(img3),
"=r"(img4),
"=r"(img5),
"=r"(img6),
"=r"(img7),
"=r"(tmpptr) // %8
: "0"(img0),
"1"(img1),
"2"(img2),
"3"(img3),
"4"(img4),
"5"(img5),
"6"(img6),
"7"(img7),
"8"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7");
img0 += size;
img1 += size;
img2 += size;
img3 += size;
img4 += size;
img5 += size;
img6 += size;
img7 += size;
}
}
for (; q + 3 < inch; q += 4)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
for (int k = 0; k < maxk; k++)
{
asm volatile(
"ld1 {v0.8b}, [%0] \n"
"ld1 {v1.8b}, [%1] \n"
"ld1 {v2.8b}, [%2] \n"
"ld1 {v3.8b}, [%3] \n"
"st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [%4], #32 \n"
: "=r"(img0), // %0
"=r"(img1),
"=r"(img2),
"=r"(img3),
"=r"(tmpptr) // %4
: "0"(img0),
"1"(img1),
"2"(img2),
"3"(img3),
"4"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
img0 += size;
img1 += size;
img2 += size;
img3 += size;
}
}
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
asm volatile(
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v0.8b}, [%0] \n"
"st1 {v0.8b}, [%1], #8 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0");
img0 += size;
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#else // __ARM_FEATURE_DOTPROD
int remain_size_start = 0;
int nn_size = (size - remain_size_start) >> 2;
#endif // __ARM_FEATURE_DOTPROD
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
#if __ARM_FEATURE_DOTPROD
signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4);
#else
signed char* tmpptr = tmp.channel(i / 4);
#endif
int q = 0;
for (; q + 7 < inch; q += 8)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i;
const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i;
const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i;
const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i;
for (int k = 0; k < maxk; k++)
{
#if __ARM_FEATURE_DOTPROD
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr[4] = img0[1];
tmpptr[5] = img1[1];
tmpptr[6] = img2[1];
tmpptr[7] = img3[1];
tmpptr += 8;
tmpptr[0] = img0[2];
tmpptr[1] = img1[2];
tmpptr[2] = img2[2];
tmpptr[3] = img3[2];
tmpptr[4] = img0[3];
tmpptr[5] = img1[3];
tmpptr[6] = img2[3];
tmpptr[7] = img3[3];
tmpptr += 8;
tmpptr[0] = img4[0];
tmpptr[1] = img5[0];
tmpptr[2] = img6[0];
tmpptr[3] = img7[0];
tmpptr[4] = img4[1];
tmpptr[5] = img5[1];
tmpptr[6] = img6[1];
tmpptr[7] = img7[1];
tmpptr += 8;
tmpptr[0] = img4[2];
tmpptr[1] = img5[2];
tmpptr[2] = img6[2];
tmpptr[3] = img7[2];
tmpptr[4] = img4[3];
tmpptr[5] = img5[3];
tmpptr[6] = img6[3];
tmpptr[7] = img7[3];
tmpptr += 8;
#else
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr[4] = img4[0];
tmpptr[5] = img5[0];
tmpptr[6] = img6[0];
tmpptr[7] = img7[0];
tmpptr += 8;
tmpptr[0] = img0[1];
tmpptr[1] = img1[1];
tmpptr[2] = img2[1];
tmpptr[3] = img3[1];
tmpptr[4] = img4[1];
tmpptr[5] = img5[1];
tmpptr[6] = img6[1];
tmpptr[7] = img7[1];
tmpptr += 8;
tmpptr[0] = img0[2];
tmpptr[1] = img1[2];
tmpptr[2] = img2[2];
tmpptr[3] = img3[2];
tmpptr[4] = img4[2];
tmpptr[5] = img5[2];
tmpptr[6] = img6[2];
tmpptr[7] = img7[2];
tmpptr += 8;
tmpptr[0] = img0[3];
tmpptr[1] = img1[3];
tmpptr[2] = img2[3];
tmpptr[3] = img3[3];
tmpptr[4] = img4[3];
tmpptr[5] = img5[3];
tmpptr[6] = img6[3];
tmpptr[7] = img7[3];
tmpptr += 8;
#endif // __ARM_FEATURE_DOTPROD
img0 += size;
img1 += size;
img2 += size;
img3 += size;
img4 += size;
img5 += size;
img6 += size;
img7 += size;
}
}
for (; q + 3 < inch; q += 4)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr[4] = img0[1];
tmpptr[5] = img1[1];
tmpptr[6] = img2[1];
tmpptr[7] = img3[1];
tmpptr += 8;
tmpptr[0] = img0[2];
tmpptr[1] = img1[2];
tmpptr[2] = img2[2];
tmpptr[3] = img3[2];
tmpptr[4] = img0[3];
tmpptr[5] = img1[3];
tmpptr[6] = img2[3];
tmpptr[7] = img3[3];
tmpptr += 8;
img0 += size;
img1 += size;
img2 += size;
img3 += size;
}
}
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr += 4;
img0 += size;
}
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#else
int remain_size_start = 0;
int nn_size = (size - remain_size_start) >> 1;
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2);
#else
signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#endif
#else
signed char* tmpptr = tmp.channel(i / 2);
#endif
int q = 0;
for (; q + 7 < inch; q += 8)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i;
const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i;
const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i;
const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i;
for (int k = 0; k < maxk; k++)
{
#if __ARM_FEATURE_DOTPROD
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr[4] = img0[1];
tmpptr[5] = img1[1];
tmpptr[6] = img2[1];
tmpptr[7] = img3[1];
tmpptr += 8;
tmpptr[0] = img4[0];
tmpptr[1] = img5[0];
tmpptr[2] = img6[0];
tmpptr[3] = img7[0];
tmpptr[4] = img4[1];
tmpptr[5] = img5[1];
tmpptr[6] = img6[1];
tmpptr[7] = img7[1];
tmpptr += 8;
#else
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr[4] = img4[0];
tmpptr[5] = img5[0];
tmpptr[6] = img6[0];
tmpptr[7] = img7[0];
tmpptr += 8;
tmpptr[0] = img0[1];
tmpptr[1] = img1[1];
tmpptr[2] = img2[1];
tmpptr[3] = img3[1];
tmpptr[4] = img4[1];
tmpptr[5] = img5[1];
tmpptr[6] = img6[1];
tmpptr[7] = img7[1];
tmpptr += 8;
#endif // __ARM_FEATURE_DOTPROD
img0 += size;
img1 += size;
img2 += size;
img3 += size;
img4 += size;
img5 += size;
img6 += size;
img7 += size;
}
}
for (; q + 3 < inch; q += 4)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr[4] = img0[1];
tmpptr[5] = img1[1];
tmpptr[6] = img2[1];
tmpptr[7] = img3[1];
tmpptr += 8;
img0 += size;
img1 += size;
img2 += size;
img3 += size;
}
}
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr += 2;
img0 += size;
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
#else
signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#endif
#else
signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
int q = 0;
for (; q + 7 < inch; q += 8)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i;
const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i;
const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i;
const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr[4] = img4[0];
tmpptr[5] = img5[0];
tmpptr[6] = img6[0];
tmpptr[7] = img7[0];
tmpptr += 8;
img0 += size;
img1 += size;
img2 += size;
img3 += size;
img4 += size;
img5 += size;
img6 += size;
img7 += size;
}
}
for (; q + 3 < inch; q += 4)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr += 4;
img0 += size;
img1 += size;
img2 += size;
img3 += size;
}
}
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += size;
}
}
}
}
#else // __ARM_NEON
tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator);
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < size; i++)
{
signed char* tmpptr = tmp.channel(i);
int q = 0;
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += size;
}
}
}
}
#endif // __ARM_NEON
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARM_NEON
nn_outch = outch >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
int* outptr0 = top_blob.channel(p);
int* outptr1 = top_blob.channel(p + 1);
int* outptr2 = top_blob.channel(p + 2);
int* outptr3 = top_blob.channel(p + 3);
int i = 0;
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
for (; i + 15 < size; i += 16)
{
const signed char* tmpptr = tmp.channel(i / 16);
const signed char* kptr0 = kernel.channel(p / 4);
int nn = (inch / 8) * maxk;
int nn4 = ((inch % 8) / 4) * maxk;
int nn1 = (inch % 4) * maxk;
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"cmp %w4, #0 \n"
"beq 1f \n"
"ld1 {v8.16b}, [%8], #16 \n" // _w0123_l
"ld1 {v0.16b}, [%7], #16 \n" // _val0123_l
"0: \n"
"ld1 {v1.16b}, [%7], #16 \n" // _val4567_l
"sdot v16.4s, v8.16b, v0.4b[0] \n"
"sdot v17.4s, v8.16b, v0.4b[1] \n"
"sdot v18.4s, v8.16b, v0.4b[2] \n"
"sdot v19.4s, v8.16b, v0.4b[3] \n"
"ld1 {v2.16b}, [%7], #16 \n" // _val891011_l
"sdot v20.4s, v8.16b, v1.4b[0] \n"
"sdot v21.4s, v8.16b, v1.4b[1] \n"
"sdot v22.4s, v8.16b, v1.4b[2] \n"
"sdot v23.4s, v8.16b, v1.4b[3] \n"
"ld1 {v3.16b}, [%7], #16 \n" // _val12131415_l
"sdot v24.4s, v8.16b, v2.4b[0] \n"
"sdot v25.4s, v8.16b, v2.4b[1] \n"
"ld1 {v9.16b}, [%8], #16 \n" // _w0123_h
"sdot v26.4s, v8.16b, v2.4b[2] \n"
"sdot v27.4s, v8.16b, v2.4b[3] \n"
"ld1 {v4.16b}, [%7], #16 \n" // _val0123_h
"sdot v28.4s, v8.16b, v3.4b[0] \n"
"sdot v29.4s, v8.16b, v3.4b[1] \n"
"sdot v30.4s, v8.16b, v3.4b[2] \n"
"sdot v31.4s, v8.16b, v3.4b[3] \n"
"ld1 {v5.16b}, [%7], #16 \n" // _val4567_h
"sdot v16.4s, v9.16b, v4.4b[0] \n"
"sdot v17.4s, v9.16b, v4.4b[1] \n"
"sdot v18.4s, v9.16b, v4.4b[2] \n"
"sdot v19.4s, v9.16b, v4.4b[3] \n"
"ld1 {v6.16b}, [%7], #16 \n" // _val891011_h
"sdot v20.4s, v9.16b, v5.4b[0] \n"
"sdot v21.4s, v9.16b, v5.4b[1] \n"
"sdot v22.4s, v9.16b, v5.4b[2] \n"
"sdot v23.4s, v9.16b, v5.4b[3] \n"
"ld1 {v7.16b}, [%7], #16 \n" // _val12131415_h
"sdot v24.4s, v9.16b, v6.4b[0] \n"
"sdot v25.4s, v9.16b, v6.4b[1] \n"
"ld1 {v8.16b}, [%8], #16 \n" // _w0123_l
"sdot v26.4s, v9.16b, v6.4b[2] \n"
"sdot v27.4s, v9.16b, v6.4b[3] \n"
"ld1 {v0.16b}, [%7], #16 \n" // _val0123_l
"sdot v28.4s, v9.16b, v7.4b[0] \n"
"sdot v29.4s, v9.16b, v7.4b[1] \n"
"subs %w4, %w4, #1 \n"
"sdot v30.4s, v9.16b, v7.4b[2] \n"
"sdot v31.4s, v9.16b, v7.4b[3] \n"
"bne 0b \n"
"sub %7, %7, #16 \n"
"sub %8, %8, #16 \n"
"1: \n"
"cmp %w5, #0 \n"
"beq 3f \n"
"2: \n"
"ld1 {v8.16b}, [%8], #16 \n"
"ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [%7], #64 \n"
"sdot v16.4s, v8.16b, v0.4b[0] \n"
"sdot v17.4s, v8.16b, v0.4b[1] \n"
"sdot v18.4s, v8.16b, v0.4b[2] \n"
"sdot v19.4s, v8.16b, v0.4b[3] \n"
"sdot v20.4s, v8.16b, v1.4b[0] \n"
"sdot v21.4s, v8.16b, v1.4b[1] \n"
"sdot v22.4s, v8.16b, v1.4b[2] \n"
"sdot v23.4s, v8.16b, v1.4b[3] \n"
"sdot v24.4s, v8.16b, v2.4b[0] \n"
"sdot v25.4s, v8.16b, v2.4b[1] \n"
"sdot v26.4s, v8.16b, v2.4b[2] \n"
"sdot v27.4s, v8.16b, v2.4b[3] \n"
"sdot v28.4s, v8.16b, v3.4b[0] \n"
"sdot v29.4s, v8.16b, v3.4b[1] \n"
"subs %w5, %w5, #1 \n"
"sdot v30.4s, v8.16b, v3.4b[2] \n"
"sdot v31.4s, v8.16b, v3.4b[3] \n"
"bne 2b \n"
"3: \n"
"lsr w4, %w6, #2 \n" // w4 = nn1 >> 2
"cmp w4, #0 \n"
"beq 5f \n"
"4: \n"
"ld1 {v8.8b, v9.8b}, [%8], #16 \n"
"ld4 {v0.16b, v1.16b, v2.16b, v3.16b}, [%7], #64 \n"
"uzp1 v10.8b, v8.8b, v9.8b \n"
"uzp2 v11.8b, v8.8b, v9.8b \n"
"uzp1 v4.16b, v0.16b, v1.16b \n"
"uzp2 v5.16b, v0.16b, v1.16b \n"
"uzp1 v6.16b, v2.16b, v3.16b \n"
"uzp2 v7.16b, v2.16b, v3.16b \n"
"uzp1 v8.8b, v10.8b, v11.8b \n"
"uzp2 v9.8b, v10.8b, v11.8b \n"
"uzp1 v0.16b, v4.16b, v5.16b \n" // 0 1 4 5
"uzp2 v1.16b, v4.16b, v5.16b \n" // 8 9 c d
"mov v8.d[1], v9.d[0] \n" // _w
"uzp1 v2.16b, v6.16b, v7.16b \n" // 2 3 6 7
"uzp2 v3.16b, v6.16b, v7.16b \n" // a b e f
"sdot v16.4s, v8.16b, v0.4b[0] \n"
"sdot v17.4s, v8.16b, v0.4b[1] \n"
"sdot v18.4s, v8.16b, v2.4b[0] \n"
"sdot v19.4s, v8.16b, v2.4b[1] \n"
"sdot v20.4s, v8.16b, v0.4b[2] \n"
"sdot v21.4s, v8.16b, v0.4b[3] \n"
"sdot v22.4s, v8.16b, v2.4b[2] \n"
"sdot v23.4s, v8.16b, v2.4b[3] \n"
"sdot v24.4s, v8.16b, v1.4b[0] \n"
"sdot v25.4s, v8.16b, v1.4b[1] \n"
"sdot v26.4s, v8.16b, v3.4b[0] \n"
"sdot v27.4s, v8.16b, v3.4b[1] \n"
"sdot v28.4s, v8.16b, v1.4b[2] \n"
"sdot v29.4s, v8.16b, v1.4b[3] \n"
"sdot v30.4s, v8.16b, v3.4b[2] \n"
"sdot v31.4s, v8.16b, v3.4b[3] \n"
"subs w4, w4, #1 \n"
"bne 4b \n"
"5: \n"
"and w4, %w6, #3 \n" // w4 = remain = nn1 & 3
"cmp w4, #0 \n" // w4 > 0
"beq 7f \n"
"6: \n"
"ld1 {v1.8b}, [%8] \n"
"ld1 {v0.16b}, [%7] \n"
"sshll v1.8h, v1.8b, #0 \n"
"sshll v2.8h, v0.8b, #0 \n"
"sshll2 v3.8h, v0.16b, #0 \n"
"smlal v16.4s, v1.4h, v2.h[0] \n"
"smlal v17.4s, v1.4h, v2.h[1] \n"
"smlal v18.4s, v1.4h, v2.h[2] \n"
"smlal v19.4s, v1.4h, v2.h[3] \n"
"smlal v20.4s, v1.4h, v2.h[4] \n"
"smlal v21.4s, v1.4h, v2.h[5] \n"
"smlal v22.4s, v1.4h, v2.h[6] \n"
"smlal v23.4s, v1.4h, v2.h[7] \n"
"smlal v24.4s, v1.4h, v3.h[0] \n"
"smlal v25.4s, v1.4h, v3.h[1] \n"
"smlal v26.4s, v1.4h, v3.h[2] \n"
"smlal v27.4s, v1.4h, v3.h[3] \n"
"smlal v28.4s, v1.4h, v3.h[4] \n"
"smlal v29.4s, v1.4h, v3.h[5] \n"
"smlal v30.4s, v1.4h, v3.h[6] \n"
"smlal v31.4s, v1.4h, v3.h[7] \n"
"add %7, %7, #16 \n"
"add %8, %8, #4 \n"
"subs w4, w4, #1 \n"
"bne 6b \n"
"7: \n"
// transpose 4x16
"trn1 v0.4s, v16.4s, v17.4s \n"
"trn2 v1.4s, v16.4s, v17.4s \n"
"trn1 v2.4s, v18.4s, v19.4s \n"
"trn2 v3.4s, v18.4s, v19.4s \n"
"trn1 v4.4s, v20.4s, v21.4s \n"
"trn2 v5.4s, v20.4s, v21.4s \n"
"trn1 v6.4s, v22.4s, v23.4s \n"
"trn2 v7.4s, v22.4s, v23.4s \n"
"trn1 v8.4s, v24.4s, v25.4s \n"
"trn2 v9.4s, v24.4s, v25.4s \n"
"trn1 v10.4s, v26.4s, v27.4s \n"
"trn2 v11.4s, v26.4s, v27.4s \n"
"trn1 v12.4s, v28.4s, v29.4s \n"
"trn2 v13.4s, v28.4s, v29.4s \n"
"trn1 v14.4s, v30.4s, v31.4s \n"
"trn2 v15.4s, v30.4s, v31.4s \n"
"trn1 v16.2d, v0.2d, v2.2d \n"
"trn2 v24.2d, v0.2d, v2.2d \n"
"trn1 v20.2d, v1.2d, v3.2d \n"
"trn2 v28.2d, v1.2d, v3.2d \n"
"trn1 v17.2d, v4.2d, v6.2d \n"
"trn2 v25.2d, v4.2d, v6.2d \n"
"trn1 v21.2d, v5.2d, v7.2d \n"
"trn2 v29.2d, v5.2d, v7.2d \n"
"trn1 v18.2d, v8.2d, v10.2d \n"
"trn2 v26.2d, v8.2d, v10.2d \n"
"trn1 v22.2d, v9.2d, v11.2d \n"
"trn2 v30.2d, v9.2d, v11.2d \n"
"trn1 v19.2d, v12.2d, v14.2d \n"
"trn2 v27.2d, v12.2d, v14.2d \n"
"trn1 v23.2d, v13.2d, v15.2d \n"
"trn2 v31.2d, v13.2d, v15.2d \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%3], #64 \n"
: "=r"(outptr0),
"=r"(outptr1),
"=r"(outptr2),
"=r"(outptr3),
"=r"(nn),
"=r"(nn4),
"=r"(nn1),
"=r"(tmpptr),
"=r"(kptr0)
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(nn),
"5"(nn4),
"6"(nn1),
"7"(tmpptr),
"8"(kptr0)
: "memory", "x4", "x5", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < size; i += 8)
{
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8);
const signed char* kptr0 = kernel.channel(p / 4);
int nn = (inch / 8) * maxk;
int nn4 = ((inch % 8) / 4) * maxk;
int nn1 = (inch % 4) * maxk;
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
int32x4_t _sum4 = vdupq_n_s32(0);
int32x4_t _sum5 = vdupq_n_s32(0);
int32x4_t _sum6 = vdupq_n_s32(0);
int32x4_t _sum7 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int8x16_t _val0123_l = vld1q_s8(tmpptr);
int8x16_t _val4567_l = vld1q_s8(tmpptr + 16);
int8x16_t _w0123_l = vld1q_s8(kptr0);
_sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val0123_l, 0);
_sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val0123_l, 1);
_sum2 = vdotq_laneq_s32(_sum2, _w0123_l, _val0123_l, 2);
_sum3 = vdotq_laneq_s32(_sum3, _w0123_l, _val0123_l, 3);
_sum4 = vdotq_laneq_s32(_sum4, _w0123_l, _val4567_l, 0);
_sum5 = vdotq_laneq_s32(_sum5, _w0123_l, _val4567_l, 1);
_sum6 = vdotq_laneq_s32(_sum6, _w0123_l, _val4567_l, 2);
_sum7 = vdotq_laneq_s32(_sum7, _w0123_l, _val4567_l, 3);
int8x16_t _val0123_h = vld1q_s8(tmpptr + 32);
int8x16_t _val4567_h = vld1q_s8(tmpptr + 48);
int8x16_t _w0123_h = vld1q_s8(kptr0 + 16);
_sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val0123_h, 0);
_sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val0123_h, 1);
_sum2 = vdotq_laneq_s32(_sum2, _w0123_h, _val0123_h, 2);
_sum3 = vdotq_laneq_s32(_sum3, _w0123_h, _val0123_h, 3);
_sum4 = vdotq_laneq_s32(_sum4, _w0123_h, _val4567_h, 0);
_sum5 = vdotq_laneq_s32(_sum5, _w0123_h, _val4567_h, 1);
_sum6 = vdotq_laneq_s32(_sum6, _w0123_h, _val4567_h, 2);
_sum7 = vdotq_laneq_s32(_sum7, _w0123_h, _val4567_h, 3);
tmpptr += 64;
kptr0 += 32;
}
for (int j = 0; j < nn4; j++)
{
int8x16_t _val0123 = vld1q_s8(tmpptr);
int8x16_t _val4567 = vld1q_s8(tmpptr + 16);
int8x16_t _w0 = vld1q_s8(kptr0);
_sum0 = vdotq_laneq_s32(_sum0, _w0, _val0123, 0);
_sum1 = vdotq_laneq_s32(_sum1, _w0, _val0123, 1);
_sum2 = vdotq_laneq_s32(_sum2, _w0, _val0123, 2);
_sum3 = vdotq_laneq_s32(_sum3, _w0, _val0123, 3);
_sum4 = vdotq_laneq_s32(_sum4, _w0, _val4567, 0);
_sum5 = vdotq_laneq_s32(_sum5, _w0, _val4567, 1);
_sum6 = vdotq_laneq_s32(_sum6, _w0, _val4567, 2);
_sum7 = vdotq_laneq_s32(_sum7, _w0, _val4567, 3);
tmpptr += 32;
kptr0 += 16;
}
int j = 0;
for (; j + 3 < nn1; j += 4)
{
int8x8x4_t _val4 = vld4_s8(tmpptr);
int8x8x2_t _val0145 = vuzp_s8(_val4.val[0], _val4.val[1]);
int8x8x2_t _val2367 = vuzp_s8(_val4.val[2], _val4.val[3]);
int8x16_t _val0123 = vcombine_s8(_val0145.val[0], _val2367.val[0]);
int8x16_t _val4567 = vcombine_s8(_val0145.val[1], _val2367.val[1]);
int8x16_t _w = vld1q_s8(kptr0);
int8x8x2_t _w01 = vuzp_s8(vget_low_s8(_w), vget_high_s8(_w));
int8x8x2_t _w0123 = vuzp_s8(_w01.val[0], _w01.val[1]);
int8x16_t _w0123f = vcombine_s8(_w0123.val[0], _w0123.val[1]);
_sum0 = vdotq_laneq_s32(_sum0, _w0123f, _val0123, 0);
_sum1 = vdotq_laneq_s32(_sum1, _w0123f, _val0123, 1);
_sum2 = vdotq_laneq_s32(_sum2, _w0123f, _val0123, 2);
_sum3 = vdotq_laneq_s32(_sum3, _w0123f, _val0123, 3);
_sum4 = vdotq_laneq_s32(_sum4, _w0123f, _val4567, 0);
_sum5 = vdotq_laneq_s32(_sum5, _w0123f, _val4567, 1);
_sum6 = vdotq_laneq_s32(_sum6, _w0123f, _val4567, 2);
_sum7 = vdotq_laneq_s32(_sum7, _w0123f, _val4567, 3);
tmpptr += 32;
kptr0 += 16;
}
for (; j < nn1; j++)
{
int16x4_t _val0 = vdup_n_s16(tmpptr[0]);
int16x4_t _val1 = vdup_n_s16(tmpptr[1]);
int16x4_t _val2 = vdup_n_s16(tmpptr[2]);
int16x4_t _val3 = vdup_n_s16(tmpptr[3]);
int16x4_t _val4 = vdup_n_s16(tmpptr[4]);
int16x4_t _val5 = vdup_n_s16(tmpptr[5]);
int16x4_t _val6 = vdup_n_s16(tmpptr[6]);
int16x4_t _val7 = vdup_n_s16(tmpptr[7]);
int16x4_t _w0123;
_w0123 = vset_lane_s16(kptr0[0], _w0123, 0);
_w0123 = vset_lane_s16(kptr0[1], _w0123, 1);
_w0123 = vset_lane_s16(kptr0[2], _w0123, 2);
_w0123 = vset_lane_s16(kptr0[3], _w0123, 3);
_sum0 = vmlal_s16(_sum0, _val0, _w0123);
_sum1 = vmlal_s16(_sum1, _val1, _w0123);
_sum2 = vmlal_s16(_sum2, _val2, _w0123);
_sum3 = vmlal_s16(_sum3, _val3, _w0123);
_sum4 = vmlal_s16(_sum4, _val4, _w0123);
_sum5 = vmlal_s16(_sum5, _val5, _w0123);
_sum6 = vmlal_s16(_sum6, _val6, _w0123);
_sum7 = vmlal_s16(_sum7, _val7, _w0123);
tmpptr += 8;
kptr0 += 4;
}
// transpose 4x8
int32x4x2_t _s01 = vtrnq_s32(_sum0, _sum1);
int32x4x2_t _s23 = vtrnq_s32(_sum2, _sum3);
int32x4x2_t _s45 = vtrnq_s32(_sum4, _sum5);
int32x4x2_t _s67 = vtrnq_s32(_sum6, _sum7);
_sum0 = vcombine_s32(vget_low_s32(_s01.val[0]), vget_low_s32(_s23.val[0]));
_sum1 = vcombine_s32(vget_low_s32(_s01.val[1]), vget_low_s32(_s23.val[1]));
_sum2 = vcombine_s32(vget_high_s32(_s01.val[0]), vget_high_s32(_s23.val[0]));
_sum3 = vcombine_s32(vget_high_s32(_s01.val[1]), vget_high_s32(_s23.val[1]));
_sum4 = vcombine_s32(vget_low_s32(_s45.val[0]), vget_low_s32(_s67.val[0]));
_sum5 = vcombine_s32(vget_low_s32(_s45.val[1]), vget_low_s32(_s67.val[1]));
_sum6 = vcombine_s32(vget_high_s32(_s45.val[0]), vget_high_s32(_s67.val[0]));
_sum7 = vcombine_s32(vget_high_s32(_s45.val[1]), vget_high_s32(_s67.val[1]));
vst1q_s32(outptr0, _sum0);
vst1q_s32(outptr1, _sum1);
vst1q_s32(outptr2, _sum2);
vst1q_s32(outptr3, _sum3);
vst1q_s32(outptr0 + 4, _sum4);
vst1q_s32(outptr1 + 4, _sum5);
vst1q_s32(outptr2 + 4, _sum6);
vst1q_s32(outptr3 + 4, _sum7);
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
}
#endif
for (; i + 3 < size; i += 4)
{
#if __ARM_FEATURE_DOTPROD
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4);
#else
const signed char* tmpptr = tmp.channel(i / 4);
#endif
const signed char* kptr0 = kernel.channel(p / 4);
int nn = (inch / 8) * maxk;
int nn4 = ((inch % 8) / 4) * maxk;
int nn1 = (inch % 4) * maxk;
#if __ARM_FEATURE_DOTPROD
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int8x16_t _val0123_l = vld1q_s8(tmpptr);
int8x16_t _w0123_l = vld1q_s8(kptr0);
_sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val0123_l, 0);
_sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val0123_l, 1);
_sum2 = vdotq_laneq_s32(_sum2, _w0123_l, _val0123_l, 2);
_sum3 = vdotq_laneq_s32(_sum3, _w0123_l, _val0123_l, 3);
int8x16_t _val0123_h = vld1q_s8(tmpptr + 16);
int8x16_t _w0123_h = vld1q_s8(kptr0 + 16);
_sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val0123_h, 0);
_sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val0123_h, 1);
_sum2 = vdotq_laneq_s32(_sum2, _w0123_h, _val0123_h, 2);
_sum3 = vdotq_laneq_s32(_sum3, _w0123_h, _val0123_h, 3);
tmpptr += 32;
kptr0 += 32;
}
for (int j = 0; j < nn4; j++)
{
int8x16_t _val0123 = vld1q_s8(tmpptr);
int8x16_t _w0 = vld1q_s8(kptr0);
_sum0 = vdotq_laneq_s32(_sum0, _w0, _val0123, 0);
_sum1 = vdotq_laneq_s32(_sum1, _w0, _val0123, 1);
_sum2 = vdotq_laneq_s32(_sum2, _w0, _val0123, 2);
_sum3 = vdotq_laneq_s32(_sum3, _w0, _val0123, 3);
tmpptr += 16;
kptr0 += 16;
}
int j = 0;
for (; j + 3 < nn1; j += 4)
{
int8x16_t _val = vld1q_s8(tmpptr);
int8x8x2_t _val01 = vuzp_s8(vget_low_s8(_val), vget_high_s8(_val));
int8x8x2_t _val0123 = vuzp_s8(_val01.val[0], _val01.val[1]);
int8x16_t _val0123f = vcombine_s8(_val0123.val[0], _val0123.val[1]);
int8x16_t _w = vld1q_s8(kptr0);
int8x8x2_t _w01 = vuzp_s8(vget_low_s8(_w), vget_high_s8(_w));
int8x8x2_t _w0123 = vuzp_s8(_w01.val[0], _w01.val[1]);
int8x16_t _w0123f = vcombine_s8(_w0123.val[0], _w0123.val[1]);
_sum0 = vdotq_laneq_s32(_sum0, _w0123f, _val0123f, 0);
_sum1 = vdotq_laneq_s32(_sum1, _w0123f, _val0123f, 1);
_sum2 = vdotq_laneq_s32(_sum2, _w0123f, _val0123f, 2);
_sum3 = vdotq_laneq_s32(_sum3, _w0123f, _val0123f, 3);
tmpptr += 16;
kptr0 += 16;
}
for (; j < nn1; j++)
{
int16x4_t _val0 = vdup_n_s16(tmpptr[0]);
int16x4_t _val1 = vdup_n_s16(tmpptr[1]);
int16x4_t _val2 = vdup_n_s16(tmpptr[2]);
int16x4_t _val3 = vdup_n_s16(tmpptr[3]);
int16x4_t _w0123;
_w0123 = vset_lane_s16(kptr0[0], _w0123, 0);
_w0123 = vset_lane_s16(kptr0[1], _w0123, 1);
_w0123 = vset_lane_s16(kptr0[2], _w0123, 2);
_w0123 = vset_lane_s16(kptr0[3], _w0123, 3);
_sum0 = vmlal_s16(_sum0, _val0, _w0123);
_sum1 = vmlal_s16(_sum1, _val1, _w0123);
_sum2 = vmlal_s16(_sum2, _val2, _w0123);
_sum3 = vmlal_s16(_sum3, _val3, _w0123);
tmpptr += 4;
kptr0 += 4;
}
// transpose 4x4
int32x4x2_t _s01 = vtrnq_s32(_sum0, _sum1);
int32x4x2_t _s23 = vtrnq_s32(_sum2, _sum3);
_sum0 = vcombine_s32(vget_low_s32(_s01.val[0]), vget_low_s32(_s23.val[0]));
_sum1 = vcombine_s32(vget_low_s32(_s01.val[1]), vget_low_s32(_s23.val[1]));
_sum2 = vcombine_s32(vget_high_s32(_s01.val[0]), vget_high_s32(_s23.val[0]));
_sum3 = vcombine_s32(vget_high_s32(_s01.val[1]), vget_high_s32(_s23.val[1]));
vst1q_s32(outptr0, _sum0);
vst1q_s32(outptr1, _sum1);
vst1q_s32(outptr2, _sum2);
vst1q_s32(outptr3, _sum3);
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
#else // __ARM_FEATURE_DOTPROD
asm volatile(
"eor v0.16b, v0.16b, v0.16b \n"
"eor v1.16b, v1.16b, v1.16b \n"
"eor v2.16b, v2.16b, v2.16b \n"
"eor v3.16b, v3.16b, v3.16b \n"
"cmp %w4, #0 \n"
"beq 3f \n"
"eor v4.16b, v4.16b, v4.16b \n"
"eor v5.16b, v5.16b, v5.16b \n"
"eor v6.16b, v6.16b, v6.16b \n"
"eor v7.16b, v7.16b, v7.16b \n"
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"prfm pldl1keep, [%7, #128] \n"
"prfm pldl1keep, [%8, #256] \n"
"lsr w4, %w4, #1 \n" // w4 = nn >> 1
"cmp w4, #0 \n"
"beq 1f \n"
"prfm pldl1keep, [%8, #512] \n"
"add x5, %7, #16 \n"
"prfm pldl1keep, [x5, #128] \n"
"ld1 {v16.16b}, [%7] \n" // val L H
"ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [%8], #64 \n"
"add %7, %7, #32 \n"
"ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L
"ld1 {v18.16b}, [%7] \n"
"add %7, %7, #32 \n"
"0: \n"
"smull v24.8h, v16.8b, v20.8b \n"
"prfm pldl1keep, [%8, #256] \n"
"smull2 v25.8h, v17.16b, v20.16b \n"
"prfm pldl1keep, [%8, #512] \n"
"smull v26.8h, v16.8b, v21.8b \n"
"subs w4, w4, #1 \n"
"smull2 v27.8h, v17.16b, v21.16b \n"
"ext v19.16b, v18.16b, v18.16b, #8 \n" // val H L
"smlal v24.8h, v18.8b, v22.8b \n"
"smlal2 v25.8h, v19.16b, v22.16b \n"
"smlal v26.8h, v18.8b, v23.8b \n"
"smlal2 v27.8h, v19.16b, v23.16b \n"
"smull2 v29.8h, v16.16b, v20.16b \n"
"sadalp v0.4s, v24.8h \n"
"smull v28.8h, v17.8b, v20.8b \n"
"sadalp v1.4s, v25.8h \n"
"smull2 v31.8h, v16.16b, v21.16b \n"
"ld1 {v16.16b}, [x5] \n" // val L H
"smull v30.8h, v17.8b, v21.8b \n"
"add x5, x5, #32 \n"
"smlal2 v29.8h, v18.16b, v22.16b \n"
"sadalp v2.4s, v26.8h \n"
"smlal v28.8h, v19.8b, v22.8b \n"
"sadalp v3.4s, v27.8h \n"
"smlal2 v31.8h, v18.16b, v23.16b \n"
"ld1 {v18.16b}, [x5] \n"
"smlal v30.8h, v19.8b, v23.8b \n"
"ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L
"smull v24.8h, v16.8b, v20.8b \n"
"add x5, x5, #32 \n"
"smull2 v25.8h, v17.16b, v20.16b \n"
"prfm pldl1keep, [x5, #128] \n"
"smull v26.8h, v16.8b, v21.8b \n"
"prfm pldl1keep, [x5, #384] \n"
"smull2 v27.8h, v17.16b, v21.16b \n"
"ext v19.16b, v18.16b, v18.16b, #8 \n" // val H L
"smlal v24.8h, v18.8b, v22.8b \n"
"sadalp v5.4s, v29.8h \n"
"smlal2 v25.8h, v19.16b, v22.16b \n"
"sadalp v4.4s, v28.8h \n"
"smlal v26.8h, v18.8b, v23.8b \n"
"sadalp v7.4s, v31.8h \n"
"smlal2 v27.8h, v19.16b, v23.16b \n"
"sadalp v6.4s, v30.8h \n"
"smull2 v29.8h, v16.16b, v20.16b \n"
"sadalp v8.4s, v24.8h \n"
"smull v28.8h, v17.8b, v20.8b \n"
"sadalp v9.4s, v25.8h \n"
"smull2 v31.8h, v16.16b, v21.16b \n"
"ld1 {v16.16b}, [%7] \n" // val L H
"smull v30.8h, v17.8b, v21.8b \n"
"add %7, %7, #32 \n"
"smlal2 v29.8h, v18.16b, v22.16b \n"
"sadalp v10.4s, v26.8h \n"
"smlal v28.8h, v19.8b, v22.8b \n"
"sadalp v11.4s, v27.8h \n"
"smlal2 v31.8h, v18.16b, v23.16b \n"
"ld1 {v18.16b}, [%7] \n"
"smlal v30.8h, v19.8b, v23.8b \n"
"add %7, %7, #32 \n"
"ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [%8], #64 \n"
"sadalp v13.4s, v29.8h \n"
"prfm pldl1keep, [%7, #128] \n"
"sadalp v12.4s, v28.8h \n"
"prfm pldl1keep, [%7, #384] \n"
"sadalp v15.4s, v31.8h \n"
"ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L
"sadalp v14.4s, v30.8h \n"
"bne 0b \n"
"sub %7, %7, #64 \n"
"sub %8, %8, #64 \n"
"1: \n"
"and w4, %w4, #1 \n" // w4 = remain = nn & 1
"cmp w4, #0 \n" // w4 > 0
"beq 2f \n"
"ld1 {v16.8b, v17.8b}, [%7], #16 \n"
"ld1 {v20.8b, v21.8b, v22.8b, v23.8b}, [%8], #32 \n"
"smull v24.8h, v16.8b, v20.8b \n"
"smull v25.8h, v16.8b, v21.8b \n"
"smull v26.8h, v16.8b, v22.8b \n"
"ld1 {v18.8b, v19.8b}, [%7], #16 \n"
"smull v27.8h, v16.8b, v23.8b \n"
"sadalp v0.4s, v24.8h \n"
"smull v28.8h, v17.8b, v20.8b \n"
"sadalp v1.4s, v25.8h \n"
"smull v29.8h, v17.8b, v21.8b \n"
"sadalp v2.4s, v26.8h \n"
"smull v30.8h, v17.8b, v22.8b \n"
"sadalp v3.4s, v27.8h \n"
"smull v31.8h, v17.8b, v23.8b \n"
"sadalp v4.4s, v28.8h \n"
"smull v24.8h, v18.8b, v20.8b \n"
"sadalp v5.4s, v29.8h \n"
"smull v25.8h, v18.8b, v21.8b \n"
"sadalp v6.4s, v30.8h \n"
"smull v26.8h, v18.8b, v22.8b \n"
"sadalp v7.4s, v31.8h \n"
"smull v27.8h, v18.8b, v23.8b \n"
"sadalp v8.4s, v24.8h \n"
"smull v28.8h, v19.8b, v20.8b \n"
"sadalp v9.4s, v25.8h \n"
"smull v29.8h, v19.8b, v21.8b \n"
"sadalp v10.4s, v26.8h \n"
"smull v30.8h, v19.8b, v22.8b \n"
"sadalp v11.4s, v27.8h \n"
"smull v31.8h, v19.8b, v23.8b \n"
"sadalp v12.4s, v28.8h \n"
"sadalp v13.4s, v29.8h \n"
"sadalp v14.4s, v30.8h \n"
"sadalp v15.4s, v31.8h \n"
"2: \n"
"addp v0.4s, v0.4s, v1.4s \n"
"addp v2.4s, v2.4s, v3.4s \n"
"addp v4.4s, v4.4s, v5.4s \n"
"addp v6.4s, v6.4s, v7.4s \n"
"addp v8.4s, v8.4s, v9.4s \n"
"addp v10.4s, v10.4s, v11.4s \n"
"addp v12.4s, v12.4s, v13.4s \n"
"addp v14.4s, v14.4s, v15.4s \n"
"addp v0.4s, v0.4s, v2.4s \n"
"addp v1.4s, v4.4s, v6.4s \n"
"addp v2.4s, v8.4s, v10.4s \n"
"addp v3.4s, v12.4s, v14.4s \n"
"3: \n"
"cmp %w5, #0 \n"
"beq 7f \n"
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"lsr w4, %w5, #1 \n" // w4 = nn4 >> 1
"cmp w4, #0 \n"
"beq 5f \n"
"4: \n"
"ld1 {v16.8b, v17.8b}, [%7], #16 \n"
"ld1 {v22.8b, v23.8b}, [%8], #16 \n"
"zip1 v18.2s, v16.2s, v16.2s \n" // _val00
"zip2 v19.2s, v16.2s, v16.2s \n" // _val11
"smull v24.8h, v18.8b, v22.8b \n"
"smull v25.8h, v18.8b, v23.8b \n"
"zip1 v20.2s, v17.2s, v17.2s \n" // _val22
"smull v26.8h, v19.8b, v22.8b \n"
"smull v27.8h, v19.8b, v23.8b \n"
"zip2 v21.2s, v17.2s, v17.2s \n" // _val33
"smull v28.8h, v20.8b, v22.8b \n"
"smull v29.8h, v20.8b, v23.8b \n"
"ld1 {v16.8b, v17.8b}, [%7], #16 \n"
"smull v30.8h, v21.8b, v22.8b \n"
"smull v31.8h, v21.8b, v23.8b \n"
"ld1 {v22.8b, v23.8b}, [%8], #16 \n"
"zip1 v18.2s, v16.2s, v16.2s \n" // _val44
"zip2 v19.2s, v16.2s, v16.2s \n" // _val55
"smlal v24.8h, v18.8b, v22.8b \n"
"smlal v25.8h, v18.8b, v23.8b \n"
"zip1 v20.2s, v17.2s, v17.2s \n" // _val66
"smlal v26.8h, v19.8b, v22.8b \n"
"smlal v27.8h, v19.8b, v23.8b \n"
"zip2 v21.2s, v17.2s, v17.2s \n" // _val77
"sadalp v8.4s, v24.8h \n"
"smlal v28.8h, v20.8b, v22.8b \n"
"sadalp v9.4s, v25.8h \n"
"smlal v29.8h, v20.8b, v23.8b \n"
"sadalp v10.4s, v26.8h \n"
"smlal v30.8h, v21.8b, v22.8b \n"
"sadalp v11.4s, v27.8h \n"
"smlal v31.8h, v21.8b, v23.8b \n"
"sadalp v12.4s, v28.8h \n"
"sadalp v13.4s, v29.8h \n"
"subs w4, w4, #1 \n"
"sadalp v14.4s, v30.8h \n"
"sadalp v15.4s, v31.8h \n"
"bne 4b \n"
"5: \n"
"and w4, %w5, #1 \n" // w4 = remain = nn4 & 1
"cmp w4, #0 \n" // w4 > 0
"beq 6f \n"
"ld1 {v16.8b, v17.8b}, [%7], #16 \n"
"ld1 {v22.8b, v23.8b}, [%8], #16 \n"
"zip1 v18.2s, v16.2s, v16.2s \n" // _val00
"zip2 v19.2s, v16.2s, v16.2s \n" // _val11
"smull v24.8h, v18.8b, v22.8b \n"
"smull v25.8h, v18.8b, v23.8b \n"
"zip1 v20.2s, v17.2s, v17.2s \n" // _val22
"smull v26.8h, v19.8b, v22.8b \n"
"smull v27.8h, v19.8b, v23.8b \n"
"zip2 v21.2s, v17.2s, v17.2s \n" // _val33
"sadalp v8.4s, v24.8h \n"
"smull v28.8h, v20.8b, v22.8b \n"
"sadalp v9.4s, v25.8h \n"
"smull v29.8h, v20.8b, v23.8b \n"
"sadalp v10.4s, v26.8h \n"
"smull v30.8h, v21.8b, v22.8b \n"
"sadalp v11.4s, v27.8h \n"
"smull v31.8h, v21.8b, v23.8b \n"
"sadalp v12.4s, v28.8h \n"
"sadalp v13.4s, v29.8h \n"
"sadalp v14.4s, v30.8h \n"
"sadalp v15.4s, v31.8h \n"
"6: \n"
"addp v8.4s, v8.4s, v9.4s \n"
"addp v10.4s, v10.4s, v11.4s \n"
"addp v12.4s, v12.4s, v13.4s \n"
"addp v14.4s, v14.4s, v15.4s \n"
"add v0.4s, v0.4s, v8.4s \n"
"add v1.4s, v1.4s, v10.4s \n"
"add v2.4s, v2.4s, v12.4s \n"
"add v3.4s, v3.4s, v14.4s \n"
"7: \n"
"lsr w4, %w6, #2 \n" // w4 = nn1 >> 2
"cmp w4, #0 \n"
"beq 9f \n"
"8: \n"
"ld1 {v8.16b}, [%7], #16 \n"
"ld1 {v9.16b}, [%8], #16 \n"
"sshll v4.8h, v8.8b, #0 \n"
"sshll2 v5.8h, v8.16b, #0 \n"
"sshll v6.8h, v9.8b, #0 \n"
"sshll2 v7.8h, v9.16b, #0 \n"
"smlal v0.4s, v6.4h, v4.h[0] \n"
"smlal v1.4s, v6.4h, v4.h[1] \n"
"smlal v2.4s, v6.4h, v4.h[2] \n"
"smlal v3.4s, v6.4h, v4.h[3] \n"
"smlal2 v0.4s, v6.8h, v4.h[4] \n"
"smlal2 v1.4s, v6.8h, v4.h[5] \n"
"smlal2 v2.4s, v6.8h, v4.h[6] \n"
"smlal2 v3.4s, v6.8h, v4.h[7] \n"
"smlal v0.4s, v7.4h, v5.h[0] \n"
"smlal v1.4s, v7.4h, v5.h[1] \n"
"smlal v2.4s, v7.4h, v5.h[2] \n"
"smlal v3.4s, v7.4h, v5.h[3] \n"
"smlal2 v0.4s, v7.8h, v5.h[4] \n"
"smlal2 v1.4s, v7.8h, v5.h[5] \n"
"smlal2 v2.4s, v7.8h, v5.h[6] \n"
"smlal2 v3.4s, v7.8h, v5.h[7] \n"
"subs w4, w4, #1 \n"
"bne 8b \n"
"9: \n"
"and w4, %w6, #3 \n" // w4 = nn1 & 3
"cmp w4, #0 \n" // w4 > 0
"beq 11f \n"
"10: \n"
"ld1 {v4.8b}, [%7] \n"
"ld1 {v6.8b}, [%8] \n"
"sshll v4.8h, v4.8b, #0 \n"
"sshll v6.8h, v6.8b, #0 \n"
"smlal v0.4s, v6.4h, v4.h[0] \n"
"smlal v1.4s, v6.4h, v4.h[1] \n"
"smlal v2.4s, v6.4h, v4.h[2] \n"
"smlal v3.4s, v6.4h, v4.h[3] \n"
"add %7, %7, #4 \n"
"add %8, %8, #4 \n"
"subs w4, w4, #1 \n"
"bne 10b \n"
"11: \n"
// transpose 4x4
"trn1 v4.4s, v0.4s, v1.4s \n"
"trn2 v5.4s, v0.4s, v1.4s \n"
"trn1 v6.4s, v2.4s, v3.4s \n"
"trn2 v7.4s, v2.4s, v3.4s \n"
"trn1 v0.2d, v4.2d, v6.2d \n"
"trn2 v2.2d, v4.2d, v6.2d \n"
"trn1 v1.2d, v5.2d, v7.2d \n"
"trn2 v3.2d, v5.2d, v7.2d \n"
"st1 {v0.4s}, [%0], #16 \n"
"st1 {v1.4s}, [%1], #16 \n"
"st1 {v2.4s}, [%2], #16 \n"
"st1 {v3.4s}, [%3], #16 \n"
: "=r"(outptr0),
"=r"(outptr1),
"=r"(outptr2),
"=r"(outptr3),
"=r"(nn),
"=r"(nn4),
"=r"(nn1),
"=r"(tmpptr),
"=r"(kptr0)
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(nn),
"5"(nn4),
"6"(nn1),
"7"(tmpptr),
"8"(kptr0)
: "memory", "x4", "x5", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
#endif // __ARM_FEATURE_DOTPROD
}
#endif // __aarch64__
for (; i + 1 < size; i += 2)
{
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2);
#else
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#endif
#else
const signed char* tmpptr = tmp.channel(i / 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4);
int nn = (inch / 8) * maxk;
int nn4 = ((inch % 8) / 4) * maxk;
int nn1 = (inch % 4) * maxk;
#if __aarch64__
int32x4_t _sum00 = vdupq_n_s32(0);
int32x4_t _sum10 = vdupq_n_s32(0);
#if __ARM_FEATURE_DOTPROD
for (int j = 0; j < nn; j++)
{
int8x16_t _val01_l_h = vld1q_s8(tmpptr);
int8x16_t _w0123_l = vld1q_s8(kptr0);
_sum00 = vdotq_laneq_s32(_sum00, _w0123_l, _val01_l_h, 0);
_sum10 = vdotq_laneq_s32(_sum10, _w0123_l, _val01_l_h, 1);
int8x16_t _w0123_h = vld1q_s8(kptr0 + 16);
_sum00 = vdotq_laneq_s32(_sum00, _w0123_h, _val01_l_h, 2);
_sum10 = vdotq_laneq_s32(_sum10, _w0123_h, _val01_l_h, 3);
tmpptr += 16;
kptr0 += 32;
}
if (nn4 > 0)
{
int j = 0;
for (; j + 1 < nn4; j += 2)
{
int8x16_t _val0123 = vld1q_s8(tmpptr);
int8x16_t _w0 = vld1q_s8(kptr0);
_sum00 = vdotq_laneq_s32(_sum00, _w0, _val0123, 0);
_sum10 = vdotq_laneq_s32(_sum10, _w0, _val0123, 1);
int8x16_t _w1 = vld1q_s8(kptr0 + 16);
_sum00 = vdotq_laneq_s32(_sum00, _w1, _val0123, 2);
_sum10 = vdotq_laneq_s32(_sum10, _w1, _val0123, 3);
tmpptr += 16;
kptr0 += 32;
}
for (; j < nn4; j++)
{
int8x8_t _val01 = vld1_s8(tmpptr);
int8x16_t _w0 = vld1q_s8(kptr0);
_sum00 = vdotq_lane_s32(_sum00, _w0, _val01, 0);
_sum10 = vdotq_lane_s32(_sum10, _w0, _val01, 1);
tmpptr += 8;
kptr0 += 16;
}
}
#else // __ARM_FEATURE_DOTPROD
if (nn > 0)
{
int32x4_t _sum01 = vdupq_n_s32(0);
int32x4_t _sum02 = vdupq_n_s32(0);
int32x4_t _sum03 = vdupq_n_s32(0);
int32x4_t _sum11 = vdupq_n_s32(0);
int32x4_t _sum12 = vdupq_n_s32(0);
int32x4_t _sum13 = vdupq_n_s32(0);
int j = 0;
for (; j + 1 < nn; j += 2)
{
int8x16_t _val0 = vld1q_s8(tmpptr);
int8x16_t _val1 = vld1q_s8(tmpptr + 16);
int8x16_t _w01 = vld1q_s8(kptr0);
int8x16_t _w23 = vld1q_s8(kptr0 + 16);
int16x8_t _wv00 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w01));
int16x8_t _wv01 = vmull_s8(vget_low_s8(_val0), vget_high_s8(_w01));
int16x8_t _wv02 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w23));
int16x8_t _wv03 = vmull_s8(vget_low_s8(_val0), vget_high_s8(_w23));
int16x8_t _wv10 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w01));
int16x8_t _wv11 = vmull_s8(vget_high_s8(_val0), vget_high_s8(_w01));
int16x8_t _wv12 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w23));
int16x8_t _wv13 = vmull_s8(vget_high_s8(_val0), vget_high_s8(_w23));
int8x16_t _w45 = vld1q_s8(kptr0 + 32);
int8x16_t _w67 = vld1q_s8(kptr0 + 48);
_wv00 = vmlal_s8(_wv00, vget_low_s8(_val1), vget_low_s8(_w45));
_wv01 = vmlal_s8(_wv01, vget_low_s8(_val1), vget_high_s8(_w45));
_wv02 = vmlal_s8(_wv02, vget_low_s8(_val1), vget_low_s8(_w67));
_wv03 = vmlal_s8(_wv03, vget_low_s8(_val1), vget_high_s8(_w67));
_wv10 = vmlal_s8(_wv10, vget_high_s8(_val1), vget_low_s8(_w45));
_wv11 = vmlal_s8(_wv11, vget_high_s8(_val1), vget_high_s8(_w45));
_wv12 = vmlal_s8(_wv12, vget_high_s8(_val1), vget_low_s8(_w67));
_wv13 = vmlal_s8(_wv13, vget_high_s8(_val1), vget_high_s8(_w67));
_sum00 = vpadalq_s16(_sum00, _wv00);
_sum01 = vpadalq_s16(_sum01, _wv01);
_sum02 = vpadalq_s16(_sum02, _wv02);
_sum03 = vpadalq_s16(_sum03, _wv03);
_sum10 = vpadalq_s16(_sum10, _wv10);
_sum11 = vpadalq_s16(_sum11, _wv11);
_sum12 = vpadalq_s16(_sum12, _wv12);
_sum13 = vpadalq_s16(_sum13, _wv13);
tmpptr += 32;
kptr0 += 64;
}
for (; j < nn; j++)
{
int8x16_t _val = vld1q_s8(tmpptr);
int8x16_t _w01 = vld1q_s8(kptr0);
int8x16_t _w23 = vld1q_s8(kptr0 + 16);
int16x8_t _wv00 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w01));
int16x8_t _wv01 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w01));
int16x8_t _wv02 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w23));
int16x8_t _wv03 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w23));
int16x8_t _wv10 = vmull_s8(vget_high_s8(_val), vget_low_s8(_w01));
int16x8_t _wv11 = vmull_s8(vget_high_s8(_val), vget_high_s8(_w01));
int16x8_t _wv12 = vmull_s8(vget_high_s8(_val), vget_low_s8(_w23));
int16x8_t _wv13 = vmull_s8(vget_high_s8(_val), vget_high_s8(_w23));
_sum00 = vpadalq_s16(_sum00, _wv00);
_sum01 = vpadalq_s16(_sum01, _wv01);
_sum02 = vpadalq_s16(_sum02, _wv02);
_sum03 = vpadalq_s16(_sum03, _wv03);
_sum10 = vpadalq_s16(_sum10, _wv10);
_sum11 = vpadalq_s16(_sum11, _wv11);
_sum12 = vpadalq_s16(_sum12, _wv12);
_sum13 = vpadalq_s16(_sum13, _wv13);
tmpptr += 16;
kptr0 += 32;
}
int32x4_t _s001 = vpaddq_s32(_sum00, _sum01);
int32x4_t _s023 = vpaddq_s32(_sum02, _sum03);
int32x4_t _s101 = vpaddq_s32(_sum10, _sum11);
int32x4_t _s123 = vpaddq_s32(_sum12, _sum13);
_sum00 = vpaddq_s32(_s001, _s023);
_sum10 = vpaddq_s32(_s101, _s123);
}
if (nn4 > 0)
{
int32x4_t _sum100 = vdupq_n_s32(0);
int32x4_t _sum101 = vdupq_n_s32(0);
int32x4_t _sum110 = vdupq_n_s32(0);
int32x4_t _sum111 = vdupq_n_s32(0);
int j = 0;
for (; j + 1 < nn4; j += 2)
{
int8x16_t _val0123 = vld1q_s8(tmpptr);
int32x4x2_t _val00221133 = vzipq_s32(vreinterpretq_s32_s8(_val0123), vreinterpretq_s32_s8(_val0123));
int8x8_t _val00 = vreinterpret_s8_s32(vget_low_s32(_val00221133.val[0]));
int8x8_t _val11 = vreinterpret_s8_s32(vget_high_s32(_val00221133.val[0]));
int8x8_t _val22 = vreinterpret_s8_s32(vget_low_s32(_val00221133.val[1]));
int8x8_t _val33 = vreinterpret_s8_s32(vget_high_s32(_val00221133.val[1]));
int8x16_t _w01 = vld1q_s8(kptr0);
int8x16_t _w23 = vld1q_s8(kptr0 + 16);
int16x8_t _wv00 = vmull_s8(_val00, vget_low_s8(_w01));
int16x8_t _wv01 = vmull_s8(_val00, vget_high_s8(_w01));
int16x8_t _wv10 = vmull_s8(_val11, vget_low_s8(_w01));
int16x8_t _wv11 = vmull_s8(_val11, vget_high_s8(_w01));
_wv00 = vmlal_s8(_wv00, _val22, vget_low_s8(_w23));
_wv01 = vmlal_s8(_wv01, _val22, vget_high_s8(_w23));
_wv10 = vmlal_s8(_wv10, _val33, vget_low_s8(_w23));
_wv11 = vmlal_s8(_wv11, _val33, vget_high_s8(_w23));
_sum100 = vpadalq_s16(_sum100, _wv00);
_sum101 = vpadalq_s16(_sum101, _wv01);
_sum110 = vpadalq_s16(_sum110, _wv10);
_sum111 = vpadalq_s16(_sum111, _wv11);
tmpptr += 16;
kptr0 += 32;
}
for (; j < nn4; j++)
{
int8x8_t _val01 = vld1_s8(tmpptr);
int32x2x2_t _val0011 = vzip_s32(vreinterpret_s32_s8(_val01), vreinterpret_s32_s8(_val01));
int8x8_t _val00 = vreinterpret_s8_s32(_val0011.val[0]);
int8x8_t _val11 = vreinterpret_s8_s32(_val0011.val[1]);
int8x16_t _w01 = vld1q_s8(kptr0);
int16x8_t _wv00 = vmull_s8(_val00, vget_low_s8(_w01));
int16x8_t _wv01 = vmull_s8(_val00, vget_high_s8(_w01));
int16x8_t _wv10 = vmull_s8(_val11, vget_low_s8(_w01));
int16x8_t _wv11 = vmull_s8(_val11, vget_high_s8(_w01));
_sum100 = vpadalq_s16(_sum100, _wv00);
_sum101 = vpadalq_s16(_sum101, _wv01);
_sum110 = vpadalq_s16(_sum110, _wv10);
_sum111 = vpadalq_s16(_sum111, _wv11);
tmpptr += 8;
kptr0 += 16;
}
int32x4_t _s001 = vpaddq_s32(_sum100, _sum101);
int32x4_t _s101 = vpaddq_s32(_sum110, _sum111);
_sum00 = vaddq_s32(_sum00, _s001);
_sum10 = vaddq_s32(_sum10, _s101);
}
#endif // __ARM_FEATURE_DOTPROD
int j = 0;
for (; j + 3 < nn1; j += 4)
{
int16x8_t _val01234567 = vmovl_s8(vld1_s8(tmpptr));
int8x16_t _w = vld1q_s8(kptr0);
int16x8_t _w01234567 = vmovl_s8(vget_low_s8(_w));
int16x8_t _w89abcdef = vmovl_s8(vget_high_s8(_w));
int16x4_t _w0123 = vget_low_s16(_w01234567);
int16x4_t _w4567 = vget_high_s16(_w01234567);
int16x4_t _w89ab = vget_low_s16(_w89abcdef);
int16x4_t _wcdef = vget_high_s16(_w89abcdef);
_sum00 = vmlal_laneq_s16(_sum00, _w0123, _val01234567, 0);
_sum10 = vmlal_laneq_s16(_sum10, _w0123, _val01234567, 1);
_sum00 = vmlal_laneq_s16(_sum00, _w4567, _val01234567, 2);
_sum10 = vmlal_laneq_s16(_sum10, _w4567, _val01234567, 3);
_sum00 = vmlal_laneq_s16(_sum00, _w89ab, _val01234567, 4);
_sum10 = vmlal_laneq_s16(_sum10, _w89ab, _val01234567, 5);
_sum00 = vmlal_laneq_s16(_sum00, _wcdef, _val01234567, 6);
_sum10 = vmlal_laneq_s16(_sum10, _wcdef, _val01234567, 7);
tmpptr += 8;
kptr0 += 16;
}
for (; j < nn1; j++)
{
int16x4_t _val0 = vdup_n_s16(tmpptr[0]);
int16x4_t _val1 = vdup_n_s16(tmpptr[1]);
int16x4_t _w0123;
_w0123 = vset_lane_s16(kptr0[0], _w0123, 0);
_w0123 = vset_lane_s16(kptr0[1], _w0123, 1);
_w0123 = vset_lane_s16(kptr0[2], _w0123, 2);
_w0123 = vset_lane_s16(kptr0[3], _w0123, 3);
_sum00 = vmlal_s16(_sum00, _val0, _w0123);
_sum10 = vmlal_s16(_sum10, _val1, _w0123);
tmpptr += 2;
kptr0 += 4;
}
vst1q_lane_s32(outptr0, _sum00, 0);
vst1q_lane_s32(outptr1, _sum00, 1);
vst1q_lane_s32(outptr2, _sum00, 2);
vst1q_lane_s32(outptr3, _sum00, 3);
vst1q_lane_s32(outptr0 + 1, _sum10, 0);
vst1q_lane_s32(outptr1 + 1, _sum10, 1);
vst1q_lane_s32(outptr2 + 1, _sum10, 2);
vst1q_lane_s32(outptr3 + 1, _sum10, 3);
outptr0 += 2;
outptr1 += 2;
outptr2 += 2;
outptr3 += 2;
#else // __aarch64__
asm volatile(
"veor q0, q0 \n"
"veor q1, q1 \n"
"veor q2, q2 \n"
"veor q3, q3 \n"
"veor q4, q4 \n"
"veor q5, q5 \n"
"veor q6, q6 \n"
"veor q7, q7 \n"
"cmp %4, #0 \n"
"beq 3f \n"
"pld [%7, #256] \n"
"lsr r4, %4, #1 \n" // r4 = nn = size >> 1
"cmp r4, #0 \n"
"beq 1f \n"
"add r5, %8, #16 \n"
"pld [%8, #128] \n"
"mov r6, #32 \n"
"pld [%8, #384] \n"
"vld1.s8 {d20-d21}, [%8 :128], r6 \n" // _w01
"vld1.s8 {d16-d19}, [%7 :128]! \n" // _val0 _val1
"vld1.s8 {d22-d23}, [%8 :128], r6 \n" // _w45
"0: \n"
"vmull.s8 q12, d16, d20 \n"
"pld [%7, #256] \n"
"vmull.s8 q13, d16, d21 \n"
"pld [%8, #384] \n"
"vmull.s8 q14, d17, d20 \n"
"vmull.s8 q15, d17, d21 \n"
"vld1.s8 {d20-d21}, [r5 :128], r6 \n" // _w23
"vmlal.s8 q12, d18, d22 \n"
"vmlal.s8 q13, d18, d23 \n"
"subs r4, r4, #1 \n"
"vmlal.s8 q14, d19, d22 \n"
"vmlal.s8 q15, d19, d23 \n"
"vld1.s8 {d22-d23}, [r5 :128], r6 \n" // _w67
"vpadal.s16 q0, q12 \n"
"vmull.s8 q12, d16, d20 \n"
"vpadal.s16 q1, q13 \n"
"vmull.s8 q13, d16, d21 \n"
"vpadal.s16 q4, q14 \n"
"vmull.s8 q14, d17, d20 \n"
"vpadal.s16 q5, q15 \n"
"vmull.s8 q15, d17, d21 \n"
"vld1.s8 {d16-d17}, [%7 :128]! \n" // _val0
"vmlal.s8 q12, d18, d22 \n"
"vld1.s8 {d20-d21}, [%8 :128], r6 \n" // _w01
"vmlal.s8 q13, d18, d23 \n"
"pld [r5, #128] \n"
"vmlal.s8 q14, d19, d22 \n"
"pld [r5, #384] \n"
"vmlal.s8 q15, d19, d23 \n"
"vld1.s8 {d18-d19}, [%7 :128]! \n" // _val1
"vpadal.s16 q2, q12 \n"
"vld1.s8 {d22-d23}, [%8 :128], r6 \n" // _w45
"vpadal.s16 q3, q13 \n"
"pld [%7, #128] \n"
"vpadal.s16 q6, q14 \n"
"pld [%8, #128] \n"
"vpadal.s16 q7, q15 \n"
"bne 0b \n"
"sub %7, %7, #32 \n"
"sub %8, %8, #64 \n"
"1: \n"
"and r4, %4, #1 \n" // r4 = remain = size & 1
"cmp r4, #0 \n" // r4 > 0
"beq 2f \n"
"vld1.s8 {d16-d17}, [%7 :128]! \n" // _val
"vld1.s8 {d20-d21}, [%8 :128]! \n" // _w01
"vmull.s8 q12, d16, d20 \n"
"vld1.s8 {d22-d23}, [%8 :128]! \n" // _w23
"vmull.s8 q13, d16, d21 \n"
"vmull.s8 q14, d17, d20 \n"
"vmull.s8 q15, d17, d21 \n"
"vpadal.s16 q0, q12 \n"
"vmull.s8 q12, d16, d22 \n"
"vpadal.s16 q1, q13 \n"
"vmull.s8 q13, d16, d23 \n"
"vpadal.s16 q4, q14 \n"
"vmull.s8 q14, d17, d22 \n"
"vpadal.s16 q5, q15 \n"
"vmull.s8 q15, d17, d23 \n"
"vpadal.s16 q2, q12 \n"
"vpadal.s16 q3, q13 \n"
"vpadal.s16 q6, q14 \n"
"vpadal.s16 q7, q15 \n"
"2: \n"
"vpadd.s32 d16, d0, d1 \n"
"vpadd.s32 d17, d2, d3 \n"
"vpadd.s32 d18, d4, d5 \n"
"vpadd.s32 d19, d6, d7 \n"
"vpadd.s32 d20, d8, d9 \n"
"vpadd.s32 d21, d10, d11 \n"
"vpadd.s32 d22, d12, d13 \n"
"vpadd.s32 d23, d14, d15 \n"
"vpadd.s32 d0, d16, d17 \n"
"vpadd.s32 d1, d18, d19 \n"
"vpadd.s32 d2, d20, d21 \n"
"vpadd.s32 d3, d22, d23 \n"
"3: \n"
"cmp %5, #0 \n"
"beq 7f \n"
"veor q2, q2 \n"
"veor q3, q3 \n"
"veor q4, q4 \n"
"veor q5, q5 \n"
"lsr r4, %5, #1 \n" // r4 = nn4 >> 1
"cmp r4, #0 \n"
"beq 5f \n"
"4: \n"
"vld1.s8 {d16-d17}, [%7]! \n" // _val0123
"vld1.s8 {d20-d23}, [%8]! \n" // _w01 _w23
"vmov.s8 q9, q8 \n"
"vtrn.s32 q8, q9 \n" // _val00 _val22 _val11 _val33
"vmull.s8 q12, d16, d20 \n"
"vmull.s8 q13, d16, d21 \n"
"vmull.s8 q14, d18, d20 \n"
"vmull.s8 q15, d18, d21 \n"
"vmlal.s8 q12, d17, d22 \n"
"vmlal.s8 q13, d17, d23 \n"
"vmlal.s8 q14, d19, d22 \n"
"vmlal.s8 q15, d19, d23 \n"
"vpadal.s16 q2, q12 \n"
"vpadal.s16 q3, q13 \n"
"vpadal.s16 q4, q14 \n"
"vpadal.s16 q5, q15 \n"
"subs r4, r4, #1 \n"
"bne 4b \n"
"5: \n"
"and r4, %5, #1 \n" // r4 = nn4 & 1
"cmp r4, #0 \n" // r4 > 0
"beq 6f \n"
"vld1.s8 {d16}, [%7]! \n" // _val01
"vld1.s8 {d18-d19}, [%8]! \n" // _w01
"vmov.s8 d17, d16 \n"
"vtrn.s32 d16, d17 \n" // _val00 _val11
"vmull.s8 q12, d16, d18 \n"
"vmull.s8 q13, d16, d19 \n"
"vmull.s8 q14, d17, d18 \n"
"vmull.s8 q15, d17, d19 \n"
"vpadal.s16 q2, q12 \n"
"vpadal.s16 q3, q13 \n"
"vpadal.s16 q4, q14 \n"
"vpadal.s16 q5, q15 \n"
"6: \n"
"vpadd.s32 d16, d4, d5 \n"
"vpadd.s32 d17, d6, d7 \n"
"vpadd.s32 d18, d8, d9 \n"
"vpadd.s32 d19, d10, d11 \n"
"vadd.s32 q0, q0, q8 \n"
"vadd.s32 q1, q1, q9 \n"
"7: \n"
"lsr r4, %6, #2 \n" // r4 = nn1 >> 2
"cmp r4, #0 \n"
"beq 9f \n"
"8: \n"
"vld1.s8 {d4}, [%7]! \n"
"vmovl.s8 q2, d4 \n"
"vld1.s8 {d10-d11}, [%8]! \n"
"vmovl.s8 q3, d10 \n"
"vmovl.s8 q4, d11 \n"
"vmlal.s16 q0, d6, d4[0] \n"
"vmlal.s16 q1, d6, d4[1] \n"
"vmlal.s16 q0, d7, d4[2] \n"
"vmlal.s16 q1, d7, d4[3] \n"
"vmlal.s16 q0, d8, d5[0] \n"
"vmlal.s16 q1, d8, d5[1] \n"
"vmlal.s16 q0, d9, d5[2] \n"
"vmlal.s16 q1, d9, d5[3] \n"
"subs r4, r4, #1 \n"
"bne 8b \n"
"9: \n"
"and r4, %6, #3 \n" // r4 = nn1 & 3
"cmp r4, #0 \n" // w4 > 0
"beq 11f \n"
"10: \n"
"vld1.s8 {d4[]}, [%7]! \n"
"vld1.s8 {d6[]}, [%7]! \n"
"vmovl.s8 q2, d4 \n"
"vmovl.s8 q3, d6 \n"
"vld1.s8 {d8}, [%8] \n"
"vmovl.s8 q4, d8 \n"
"vmlal.s16 q0, d4, d8 \n"
"vmlal.s16 q1, d6, d8 \n"
"add %8, %8, #4 \n"
"subs r4, r4, #1 \n"
"bne 10b \n"
"11: \n"
"vst1.s32 {d0[0]}, [%0]! \n"
"vst1.s32 {d0[1]}, [%1]! \n"
"vst1.s32 {d1[0]}, [%2]! \n"
"vst1.s32 {d1[1]}, [%3]! \n"
"vst1.s32 {d2[0]}, [%0]! \n"
"vst1.s32 {d2[1]}, [%1]! \n"
"vst1.s32 {d3[0]}, [%2]! \n"
"vst1.s32 {d3[1]}, [%3]! \n"
: "=r"(outptr0),
"=r"(outptr1),
"=r"(outptr2),
"=r"(outptr3),
"=r"(nn),
"=r"(nn4),
"=r"(nn1),
"=r"(tmpptr),
"=r"(kptr0)
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(nn),
"5"(nn4),
"6"(nn1),
"7"(tmpptr),
"8"(kptr0)
: "memory", "r4", "r5", "r6", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; i < size; i++)
{
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
#else
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#endif
#else
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4);
int nn = (inch / 8) * maxk;
int nn4 = ((inch % 8) / 4) * maxk;
int nn1 = (inch % 4) * maxk;
int32x4_t _sum0 = vdupq_n_s32(0);
#if __ARM_FEATURE_DOTPROD
for (int j = 0; j < nn; j++)
{
int8x8_t _val0_l_h = vld1_s8(tmpptr);
int8x16_t _w0123_l = vld1q_s8(kptr0);
_sum0 = vdotq_lane_s32(_sum0, _w0123_l, _val0_l_h, 0);
int8x16_t _w0123_h = vld1q_s8(kptr0 + 16);
_sum0 = vdotq_lane_s32(_sum0, _w0123_h, _val0_l_h, 1);
tmpptr += 8;
kptr0 += 32;
}
if (nn4 > 0)
{
int j = 0;
for (; j + 1 < nn4; j += 2)
{
int8x8_t _val01 = vld1_s8(tmpptr);
int8x16_t _w0 = vld1q_s8(kptr0);
_sum0 = vdotq_lane_s32(_sum0, _w0, _val01, 0);
int8x16_t _w1 = vld1q_s8(kptr0 + 16);
_sum0 = vdotq_lane_s32(_sum0, _w1, _val01, 1);
tmpptr += 8;
kptr0 += 32;
}
for (; j < nn4; j++)
{
int8x8_t _val_xxx = vld1_s8(tmpptr);
int8x16_t _w0 = vld1q_s8(kptr0);
_sum0 = vdotq_lane_s32(_sum0, _w0, _val_xxx, 0);
tmpptr += 4;
kptr0 += 16;
}
}
#else // __ARM_FEATURE_DOTPROD
if (nn > 0)
{
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
int j = 0;
for (; j + 1 < nn; j += 2)
{
int8x16_t _val = vld1q_s8(tmpptr);
int8x16_t _w01 = vld1q_s8(kptr0);
int8x16_t _w23 = vld1q_s8(kptr0 + 16);
int16x8_t _wv0 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w01));
int16x8_t _wv1 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w01));
int16x8_t _wv2 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w23));
int16x8_t _wv3 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w23));
int8x16_t _w45 = vld1q_s8(kptr0 + 32);
int8x16_t _w67 = vld1q_s8(kptr0 + 48);
_wv0 = vmlal_s8(_wv0, vget_high_s8(_val), vget_low_s8(_w45));
_wv1 = vmlal_s8(_wv1, vget_high_s8(_val), vget_high_s8(_w45));
_wv2 = vmlal_s8(_wv2, vget_high_s8(_val), vget_low_s8(_w67));
_wv3 = vmlal_s8(_wv3, vget_high_s8(_val), vget_high_s8(_w67));
_sum0 = vpadalq_s16(_sum0, _wv0);
_sum1 = vpadalq_s16(_sum1, _wv1);
_sum2 = vpadalq_s16(_sum2, _wv2);
_sum3 = vpadalq_s16(_sum3, _wv3);
tmpptr += 16;
kptr0 += 64;
}
for (; j < nn; j++)
{
int8x8_t _val = vld1_s8(tmpptr);
int8x16_t _w01 = vld1q_s8(kptr0);
int8x16_t _w23 = vld1q_s8(kptr0 + 16);
int16x8_t _wv0 = vmull_s8(_val, vget_low_s8(_w01));
int16x8_t _wv1 = vmull_s8(_val, vget_high_s8(_w01));
int16x8_t _wv2 = vmull_s8(_val, vget_low_s8(_w23));
int16x8_t _wv3 = vmull_s8(_val, vget_high_s8(_w23));
_sum0 = vpadalq_s16(_sum0, _wv0);
_sum1 = vpadalq_s16(_sum1, _wv1);
_sum2 = vpadalq_s16(_sum2, _wv2);
_sum3 = vpadalq_s16(_sum3, _wv3);
tmpptr += 8;
kptr0 += 32;
}
#if __aarch64__
int32x4_t _s01 = vpaddq_s32(_sum0, _sum1);
int32x4_t _s23 = vpaddq_s32(_sum2, _sum3);
_sum0 = vpaddq_s32(_s01, _s23);
#else
int32x2_t _s01_low = vpadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0));
int32x2_t _s01_high = vpadd_s32(vget_low_s32(_sum1), vget_high_s32(_sum1));
int32x2_t _s23_low = vpadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2));
int32x2_t _s23_high = vpadd_s32(vget_low_s32(_sum3), vget_high_s32(_sum3));
_sum0 = vcombine_s32(vpadd_s32(_s01_low, _s01_high), vpadd_s32(_s23_low, _s23_high));
#endif
}
if (nn4 > 0)
{
int32x4_t _sum10 = vdupq_n_s32(0);
int32x4_t _sum11 = vdupq_n_s32(0);
int j = 0;
for (; j + 1 < nn4; j += 2)
{
int8x8_t _val01 = vld1_s8(tmpptr);
int32x2x2_t _val0011 = vzip_s32(vreinterpret_s32_s8(_val01), vreinterpret_s32_s8(_val01));
int8x8_t _val00 = vreinterpret_s8_s32(_val0011.val[0]);
int8x8_t _val11 = vreinterpret_s8_s32(_val0011.val[1]);
int8x16_t _w0 = vld1q_s8(kptr0);
int8x16_t _w1 = vld1q_s8(kptr0 + 16);
int16x8_t _wv0 = vmull_s8(_val00, vget_low_s8(_w0));
int16x8_t _wv1 = vmull_s8(_val00, vget_high_s8(_w0));
_wv0 = vmlal_s8(_wv0, _val11, vget_low_s8(_w1));
_wv1 = vmlal_s8(_wv1, _val11, vget_high_s8(_w1));
_sum10 = vpadalq_s16(_sum10, _wv0);
_sum11 = vpadalq_s16(_sum11, _wv1);
tmpptr += 8;
kptr0 += 32;
}
for (; j < nn4; j++)
{
int8x8_t _val_xxx = vld1_s8(tmpptr);
int8x8_t _val_val = vreinterpret_s8_s32(vzip_s32(vreinterpret_s32_s8(_val_xxx), vreinterpret_s32_s8(_val_xxx)).val[0]);
int8x16_t _w0 = vld1q_s8(kptr0);
int16x8_t _wv0 = vmull_s8(_val_val, vget_low_s8(_w0));
int16x8_t _wv1 = vmull_s8(_val_val, vget_high_s8(_w0));
_sum10 = vpadalq_s16(_sum10, _wv0);
_sum11 = vpadalq_s16(_sum11, _wv1);
tmpptr += 4;
kptr0 += 16;
}
#if __aarch64__
int32x4_t _s01 = vpaddq_s32(_sum10, _sum11);
#else
int32x2_t _s01_low = vpadd_s32(vget_low_s32(_sum10), vget_high_s32(_sum10));
int32x2_t _s01_high = vpadd_s32(vget_low_s32(_sum11), vget_high_s32(_sum11));
int32x4_t _s01 = vcombine_s32(_s01_low, _s01_high);
#endif
_sum0 = vaddq_s32(_sum0, _s01);
}
#endif // __ARM_FEATURE_DOTPROD
int32x4_t _sum1 = vdupq_n_s32(0);
int j = 0;
for (; j + 3 < nn1; j += 4)
{
int16x4_t _val0123 = vget_low_s16(vmovl_s8(vld1_s8(tmpptr)));
int8x16_t _w = vld1q_s8(kptr0);
int16x8_t _w01234567 = vmovl_s8(vget_low_s8(_w));
int16x8_t _w89abcdef = vmovl_s8(vget_high_s8(_w));
int16x4_t _w0123 = vget_low_s16(_w01234567);
int16x4_t _w4567 = vget_high_s16(_w01234567);
int16x4_t _w89ab = vget_low_s16(_w89abcdef);
int16x4_t _wcdef = vget_high_s16(_w89abcdef);
_sum0 = vmlal_lane_s16(_sum0, _w0123, _val0123, 0);
_sum1 = vmlal_lane_s16(_sum1, _w4567, _val0123, 1);
_sum0 = vmlal_lane_s16(_sum0, _w89ab, _val0123, 2);
_sum1 = vmlal_lane_s16(_sum1, _wcdef, _val0123, 3);
tmpptr += 4;
kptr0 += 16;
}
for (; j < nn1; j++)
{
int16x4_t _val = vdup_n_s16(tmpptr[0]);
int16x4_t _w0123;
_w0123 = vset_lane_s16(kptr0[0], _w0123, 0);
_w0123 = vset_lane_s16(kptr0[1], _w0123, 1);
_w0123 = vset_lane_s16(kptr0[2], _w0123, 2);
_w0123 = vset_lane_s16(kptr0[3], _w0123, 3);
_sum0 = vmlal_s16(_sum0, _val, _w0123);
tmpptr += 1;
kptr0 += 4;
}
_sum0 = vaddq_s32(_sum0, _sum1);
vst1q_lane_s32(outptr0, _sum0, 0);
vst1q_lane_s32(outptr1, _sum0, 1);
vst1q_lane_s32(outptr2, _sum0, 2);
vst1q_lane_s32(outptr3, _sum0, 3);
outptr0 += 1;
outptr1 += 1;
outptr2 += 1;
outptr3 += 1;
}
}
remain_outch_start += nn_outch << 2;
#endif // __ARM_NEON
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
int* outptr0 = top_blob.channel(p);
int i = 0;
#if __ARM_NEON
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
for (; i + 15 < size; i += 16)
{
const signed char* tmpptr = tmp.channel(i / 16);
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = (inch / 8) * maxk;
int nn4 = ((inch % 8) / 4) * maxk;
int nn1 = (inch % 4) * maxk;
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int8x16_t _val0123_l = vld1q_s8(tmpptr);
int8x16_t _val4567_l = vld1q_s8(tmpptr + 16);
int8x16_t _val89ab_l = vld1q_s8(tmpptr + 32);
int8x16_t _valcdef_l = vld1q_s8(tmpptr + 48);
int8x16_t _val0123_h = vld1q_s8(tmpptr + 64);
int8x16_t _val4567_h = vld1q_s8(tmpptr + 80);
int8x16_t _val89ab_h = vld1q_s8(tmpptr + 96);
int8x16_t _valcdef_h = vld1q_s8(tmpptr + 112);
int8x8_t _w_lh = vld1_s8(kptr0);
_sum0 = vdotq_lane_s32(_sum0, _val0123_l, _w_lh, 0);
_sum1 = vdotq_lane_s32(_sum1, _val4567_l, _w_lh, 0);
_sum2 = vdotq_lane_s32(_sum2, _val89ab_l, _w_lh, 0);
_sum3 = vdotq_lane_s32(_sum3, _valcdef_l, _w_lh, 0);
_sum0 = vdotq_lane_s32(_sum0, _val0123_h, _w_lh, 1);
_sum1 = vdotq_lane_s32(_sum1, _val4567_h, _w_lh, 1);
_sum2 = vdotq_lane_s32(_sum2, _val89ab_h, _w_lh, 1);
_sum3 = vdotq_lane_s32(_sum3, _valcdef_h, _w_lh, 1);
tmpptr += 128;
kptr0 += 8;
}
if (nn4 > 0)
{
int32x4_t _sum4 = vdupq_n_s32(0);
int32x4_t _sum5 = vdupq_n_s32(0);
int32x4_t _sum6 = vdupq_n_s32(0);
int32x4_t _sum7 = vdupq_n_s32(0);
for (int j = 0; j < nn4; j++)
{
int8x16_t _val0 = vld1q_s8(tmpptr);
int8x16_t _val1 = vld1q_s8(tmpptr + 16);
int8x16_t _val2 = vld1q_s8(tmpptr + 32);
int8x16_t _val3 = vld1q_s8(tmpptr + 48);
int8x8_t _w_0123_xxxx = vld1_s8(kptr0);
_sum4 = vdotq_lane_s32(_sum4, _val0, _w_0123_xxxx, 0);
_sum5 = vdotq_lane_s32(_sum5, _val1, _w_0123_xxxx, 0);
_sum6 = vdotq_lane_s32(_sum6, _val2, _w_0123_xxxx, 0);
_sum7 = vdotq_lane_s32(_sum7, _val3, _w_0123_xxxx, 0);
tmpptr += 64;
kptr0 += 4;
}
_sum0 = vaddq_s32(_sum0, _sum4);
_sum1 = vaddq_s32(_sum1, _sum5);
_sum2 = vaddq_s32(_sum2, _sum6);
_sum3 = vaddq_s32(_sum3, _sum7);
}
int j = 0;
for (; j < nn1; j++)
{
int8x16_t _val = vld1q_s8(tmpptr);
int8x8_t _w = vld1_dup_s8(kptr0);
int16x8_t _s0 = vmull_s8(vget_low_s8(_val), _w);
int16x8_t _s1 = vmull_s8(vget_high_s8(_val), _w);
_sum0 = vaddw_s16(_sum0, vget_low_s16(_s0));
_sum1 = vaddw_s16(_sum1, vget_high_s16(_s0));
_sum2 = vaddw_s16(_sum2, vget_low_s16(_s1));
_sum3 = vaddw_s16(_sum3, vget_high_s16(_s1));
tmpptr += 16;
kptr0 += 1;
}
vst1q_s32(outptr0, _sum0);
vst1q_s32(outptr0 + 4, _sum1);
vst1q_s32(outptr0 + 8, _sum2);
vst1q_s32(outptr0 + 12, _sum3);
outptr0 += 16;
}
for (; i + 7 < size; i += 8)
{
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8);
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = (inch / 8) * maxk;
int nn4 = ((inch % 8) / 4) * maxk;
int nn1 = (inch % 4) * maxk;
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
if (nn > 0)
{
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int8x16_t _val0123_l = vld1q_s8(tmpptr);
int8x16_t _val4567_l = vld1q_s8(tmpptr + 16);
int8x16_t _val0123_h = vld1q_s8(tmpptr + 32);
int8x16_t _val4567_h = vld1q_s8(tmpptr + 48);
int8x8_t _w_lh = vld1_s8(kptr0);
_sum0 = vdotq_lane_s32(_sum0, _val0123_l, _w_lh, 0);
_sum1 = vdotq_lane_s32(_sum1, _val4567_l, _w_lh, 0);
_sum2 = vdotq_lane_s32(_sum2, _val0123_h, _w_lh, 1);
_sum3 = vdotq_lane_s32(_sum3, _val4567_h, _w_lh, 1);
tmpptr += 64;
kptr0 += 8;
}
_sum0 = vaddq_s32(_sum0, _sum2);
_sum1 = vaddq_s32(_sum1, _sum3);
}
if (nn4 > 0)
{
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
for (int j = 0; j < nn4; j++)
{
int8x16_t _val0 = vld1q_s8(tmpptr);
int8x16_t _val1 = vld1q_s8(tmpptr + 16);
int8x8_t _w_0123_xxxx = vld1_s8(kptr0);
_sum2 = vdotq_lane_s32(_sum2, _val0, _w_0123_xxxx, 0);
_sum3 = vdotq_lane_s32(_sum3, _val1, _w_0123_xxxx, 0);
tmpptr += 32;
kptr0 += 4;
}
_sum0 = vaddq_s32(_sum0, _sum2);
_sum1 = vaddq_s32(_sum1, _sum3);
}
int j = 0;
for (; j < nn1; j++)
{
int8x8_t _val = vld1_s8(tmpptr);
int8x8_t _w = vld1_dup_s8(kptr0);
int16x8_t _s = vmull_s8(_val, _w);
_sum0 = vaddw_s16(_sum0, vget_low_s16(_s));
_sum1 = vaddw_s16(_sum1, vget_high_s16(_s));
tmpptr += 8;
kptr0 += 1;
}
vst1q_s32(outptr0, _sum0);
vst1q_s32(outptr0 + 4, _sum1);
outptr0 += 8;
}
#endif // __ARM_FEATURE_DOTPROD
for (; i + 3 < size; i += 4)
{
#if __ARM_FEATURE_DOTPROD
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4);
#else
const signed char* tmpptr = tmp.channel(i / 4);
#endif
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = (inch / 8) * maxk;
int nn4 = ((inch % 8) / 4) * maxk;
int nn1 = (inch % 4) * maxk;
int32x4_t _sum0 = vdupq_n_s32(0);
if (nn > 0)
{
#if __ARM_FEATURE_DOTPROD
int32x4_t _sum1 = vdupq_n_s32(0);
int j = 0;
for (; j < nn; j++)
{
int8x16_t _val0123_l = vld1q_s8(tmpptr);
int8x16_t _val0123_h = vld1q_s8(tmpptr + 16);
int8x8_t _w_lh = vld1_s8(kptr0);
_sum0 = vdotq_lane_s32(_sum0, _val0123_l, _w_lh, 0);
_sum1 = vdotq_lane_s32(_sum1, _val0123_h, _w_lh, 1);
tmpptr += 32;
kptr0 += 8;
}
_sum0 = vaddq_s32(_sum0, _sum1);
#else // __ARM_FEATURE_DOTPROD
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
int32x4_t _sum4 = vdupq_n_s32(0);
int32x4_t _sum5 = vdupq_n_s32(0);
int32x4_t _sum6 = vdupq_n_s32(0);
int32x4_t _sum7 = vdupq_n_s32(0);
int j = 0;
for (; j + 1 < nn; j += 2)
{
int8x16_t _val0 = vld1q_s8(tmpptr);
int8x16_t _val1 = vld1q_s8(tmpptr + 16);
int8x16_t _val2 = vld1q_s8(tmpptr + 32);
int8x16_t _val3 = vld1q_s8(tmpptr + 48);
int8x16_t _w = vld1q_s8(kptr0);
int16x8_t _s0 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w));
int16x8_t _s1 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w));
int16x8_t _s2 = vmull_s8(vget_low_s8(_val1), vget_low_s8(_w));
int16x8_t _s3 = vmull_s8(vget_high_s8(_val1), vget_low_s8(_w));
_s0 = vmlal_s8(_s0, vget_low_s8(_val2), vget_high_s8(_w));
_s1 = vmlal_s8(_s1, vget_high_s8(_val2), vget_high_s8(_w));
_s2 = vmlal_s8(_s2, vget_low_s8(_val3), vget_high_s8(_w));
_s3 = vmlal_s8(_s3, vget_high_s8(_val3), vget_high_s8(_w));
_sum0 = vaddw_s16(_sum0, vget_low_s16(_s0));
_sum1 = vaddw_s16(_sum1, vget_high_s16(_s0));
_sum2 = vaddw_s16(_sum2, vget_low_s16(_s1));
_sum3 = vaddw_s16(_sum3, vget_high_s16(_s1));
_sum4 = vaddw_s16(_sum4, vget_low_s16(_s2));
_sum5 = vaddw_s16(_sum5, vget_high_s16(_s2));
_sum6 = vaddw_s16(_sum6, vget_low_s16(_s3));
_sum7 = vaddw_s16(_sum7, vget_high_s16(_s3));
tmpptr += 64;
kptr0 += 16;
}
for (; j < nn; j++)
{
int8x16_t _val0 = vld1q_s8(tmpptr);
int8x16_t _val1 = vld1q_s8(tmpptr + 16);
int8x8_t _w = vld1_s8(kptr0);
int16x8_t _s0 = vmull_s8(vget_low_s8(_val0), _w);
int16x8_t _s1 = vmull_s8(vget_high_s8(_val0), _w);
int16x8_t _s2 = vmull_s8(vget_low_s8(_val1), _w);
int16x8_t _s3 = vmull_s8(vget_high_s8(_val1), _w);
_sum0 = vaddw_s16(_sum0, vget_low_s16(_s0));
_sum1 = vaddw_s16(_sum1, vget_high_s16(_s0));
_sum2 = vaddw_s16(_sum2, vget_low_s16(_s1));
_sum3 = vaddw_s16(_sum3, vget_high_s16(_s1));
_sum4 = vaddw_s16(_sum4, vget_low_s16(_s2));
_sum5 = vaddw_s16(_sum5, vget_high_s16(_s2));
_sum6 = vaddw_s16(_sum6, vget_low_s16(_s3));
_sum7 = vaddw_s16(_sum7, vget_high_s16(_s3));
tmpptr += 32;
kptr0 += 8;
}
_sum0 = vaddq_s32(_sum0, _sum1);
_sum2 = vaddq_s32(_sum2, _sum3);
_sum4 = vaddq_s32(_sum4, _sum5);
_sum6 = vaddq_s32(_sum6, _sum7);
int32x2_t _s0 = vadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0));
int32x2_t _s2 = vadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2));
int32x2_t _s4 = vadd_s32(vget_low_s32(_sum4), vget_high_s32(_sum4));
int32x2_t _s6 = vadd_s32(vget_low_s32(_sum6), vget_high_s32(_sum6));
int32x2_t _ss0 = vpadd_s32(_s0, _s2);
int32x2_t _ss1 = vpadd_s32(_s4, _s6);
_sum0 = vcombine_s32(_ss0, _ss1);
#endif // __ARM_FEATURE_DOTPROD
}
int sum0123[4] = {0, 0, 0, 0};
if (nn4 > 0)
{
#if __ARM_FEATURE_DOTPROD
int32x4_t _sum1 = vdupq_n_s32(0);
int j = 0;
for (; j < nn4; j++)
{
int8x16_t _val0123_lh = vld1q_s8(tmpptr);
int8x8_t _w_lh_xx = vld1_s8(kptr0);
_sum1 = vdotq_lane_s32(_sum1, _val0123_lh, _w_lh_xx, 0);
tmpptr += 16;
kptr0 += 4;
}
_sum0 = vaddq_s32(_sum0, _sum1);
#else // __ARM_FEATURE_DOTPROD
int j = 0;
for (; j < nn4; j++)
{
signed char val0 = tmpptr[0];
signed char val1 = tmpptr[1];
signed char val2 = tmpptr[2];
signed char val3 = tmpptr[3];
signed char val4 = tmpptr[4];
signed char val5 = tmpptr[5];
signed char val6 = tmpptr[6];
signed char val7 = tmpptr[7];
signed char val8 = tmpptr[8];
signed char val9 = tmpptr[9];
signed char val10 = tmpptr[10];
signed char val11 = tmpptr[11];
signed char val12 = tmpptr[12];
signed char val13 = tmpptr[13];
signed char val14 = tmpptr[14];
signed char val15 = tmpptr[15];
signed char w0 = kptr0[0];
signed char w1 = kptr0[1];
signed char w2 = kptr0[2];
signed char w3 = kptr0[3];
sum0123[0] += val0 * w0;
sum0123[0] += val1 * w1;
sum0123[0] += val2 * w2;
sum0123[0] += val3 * w3;
sum0123[1] += val4 * w0;
sum0123[1] += val5 * w1;
sum0123[1] += val6 * w2;
sum0123[1] += val7 * w3;
sum0123[2] += val8 * w0;
sum0123[2] += val9 * w1;
sum0123[2] += val10 * w2;
sum0123[2] += val11 * w3;
sum0123[3] += val12 * w0;
sum0123[3] += val13 * w1;
sum0123[3] += val14 * w2;
sum0123[3] += val15 * w3;
tmpptr += 16;
kptr0 += 4;
}
#endif // __ARM_FEATURE_DOTPROD
}
int j = 0;
for (; j < nn1; j++)
{
signed char val0 = tmpptr[0];
signed char val1 = tmpptr[1];
signed char val2 = tmpptr[2];
signed char val3 = tmpptr[3];
signed char w = kptr0[0];
sum0123[0] += val0 * w;
sum0123[1] += val1 * w;
sum0123[2] += val2 * w;
sum0123[3] += val3 * w;
tmpptr += 4;
kptr0 += 1;
}
_sum0 = vaddq_s32(_sum0, vld1q_s32(sum0123));
vst1q_s32(outptr0, _sum0);
outptr0 += 4;
}
#endif // __aarch64__
for (; i + 1 < size; i += 2)
{
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2);
#else
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#endif
#else
const signed char* tmpptr = tmp.channel(i / 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = (inch / 8) * maxk;
int nn4 = ((inch % 8) / 4) * maxk;
int nn1 = (inch % 4) * maxk;
int32x2_t _sum = vdup_n_s32(0);
if (nn > 0)
{
#if __ARM_FEATURE_DOTPROD
int32x2_t _sum0 = vdup_n_s32(0);
int32x2_t _sum1 = vdup_n_s32(0);
int j = 0;
for (; j < nn; j++)
{
int8x16_t _val01_lh = vld1q_s8(tmpptr);
int8x8_t _w_lh = vld1_s8(kptr0);
_sum0 = vdot_lane_s32(_sum0, vget_low_s8(_val01_lh), _w_lh, 0);
_sum1 = vdot_lane_s32(_sum1, vget_high_s8(_val01_lh), _w_lh, 1);
tmpptr += 16;
kptr0 += 8;
}
_sum = vadd_s32(_sum0, _sum1);
#else // __ARM_FEATURE_DOTPROD
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
int j = 0;
for (; j + 1 < nn; j += 2)
{
int8x16_t _val0 = vld1q_s8(tmpptr);
int8x16_t _val1 = vld1q_s8(tmpptr + 16);
int8x16_t _w = vld1q_s8(kptr0);
int16x8_t _s0 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w));
int16x8_t _s1 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w));
_s0 = vmlal_s8(_s0, vget_low_s8(_val1), vget_high_s8(_w));
_s1 = vmlal_s8(_s1, vget_high_s8(_val1), vget_high_s8(_w));
_sum0 = vaddw_s16(_sum0, vget_low_s16(_s0));
_sum1 = vaddw_s16(_sum1, vget_high_s16(_s0));
_sum2 = vaddw_s16(_sum2, vget_low_s16(_s1));
_sum3 = vaddw_s16(_sum3, vget_high_s16(_s1));
tmpptr += 32;
kptr0 += 16;
}
for (; j < nn; j++)
{
int8x16_t _val = vld1q_s8(tmpptr);
int8x8_t _w = vld1_s8(kptr0);
int16x8_t _s0 = vmull_s8(vget_low_s8(_val), _w);
int16x8_t _s1 = vmull_s8(vget_high_s8(_val), _w);
_sum0 = vaddw_s16(_sum0, vget_low_s16(_s0));
_sum1 = vaddw_s16(_sum1, vget_high_s16(_s0));
_sum2 = vaddw_s16(_sum2, vget_low_s16(_s1));
_sum3 = vaddw_s16(_sum3, vget_high_s16(_s1));
tmpptr += 16;
kptr0 += 8;
}
_sum0 = vaddq_s32(_sum0, _sum1);
_sum2 = vaddq_s32(_sum2, _sum3);
int32x2_t _s0 = vadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0));
int32x2_t _s2 = vadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2));
_sum = vpadd_s32(_s0, _s2);
#endif // __ARM_FEATURE_DOTPROD
}
int sum01[2] = {0, 0};
if (nn4 > 0)
{
int j = 0;
for (; j < nn4; j++)
{
signed char val0 = tmpptr[0];
signed char val1 = tmpptr[1];
signed char val2 = tmpptr[2];
signed char val3 = tmpptr[3];
signed char val4 = tmpptr[4];
signed char val5 = tmpptr[5];
signed char val6 = tmpptr[6];
signed char val7 = tmpptr[7];
signed char w0 = kptr0[0];
signed char w1 = kptr0[1];
signed char w2 = kptr0[2];
signed char w3 = kptr0[3];
sum01[0] += val0 * w0;
sum01[0] += val1 * w1;
sum01[0] += val2 * w2;
sum01[0] += val3 * w3;
sum01[1] += val4 * w0;
sum01[1] += val5 * w1;
sum01[1] += val6 * w2;
sum01[1] += val7 * w3;
tmpptr += 8;
kptr0 += 4;
}
}
int j = 0;
for (; j < nn1; j++)
{
signed char val0 = tmpptr[0];
signed char val1 = tmpptr[1];
signed char w = kptr0[0];
sum01[0] += val0 * w;
sum01[1] += val1 * w;
tmpptr += 2;
kptr0 += 1;
}
_sum = vadd_s32(_sum, vld1_s32(sum01));
vst1_s32(outptr0, _sum);
outptr0 += 2;
}
for (; i < size; i++)
{
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
#else
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#endif
#else
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = (inch / 8) * maxk;
int nn4 = ((inch % 8) / 4) * maxk;
int nn1 = (inch % 4) * maxk;
int sum = 0;
if (nn > 0)
{
#if __ARM_FEATURE_DOTPROD
int32x4_t _sum0 = vdupq_n_s32(0);
int32x2_t _sum1 = vdup_n_s32(0);
int j = 0;
for (; j + 1 < nn; j += 2)
{
int8x16_t _val = vld1q_s8(tmpptr);
int8x16_t _w = vld1q_s8(kptr0);
_sum0 = vdotq_s32(_sum0, _val, _w);
tmpptr += 16;
kptr0 += 16;
}
for (; j < nn; j++)
{
int8x8_t _val = vld1_s8(tmpptr);
int8x8_t _w = vld1_s8(kptr0);
_sum1 = vdot_s32(_sum1, _val, _w);
tmpptr += 8;
kptr0 += 8;
}
sum = vaddvq_s32(_sum0) + vaddv_s32(_sum1);
#else // __ARM_FEATURE_DOTPROD
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int j = 0;
for (; j + 1 < nn; j += 2)
{
int8x16_t _val = vld1q_s8(tmpptr);
int8x16_t _w = vld1q_s8(kptr0);
int16x8_t _s8 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w));
_s8 = vmlal_s8(_s8, vget_high_s8(_val), vget_high_s8(_w));
_sum0 = vaddw_s16(_sum0, vget_low_s16(_s8));
_sum1 = vaddw_s16(_sum1, vget_high_s16(_s8));
tmpptr += 16;
kptr0 += 16;
}
for (; j < nn; j++)
{
int8x8_t _val = vld1_s8(tmpptr);
int8x8_t _w = vld1_s8(kptr0);
int16x8_t _s8 = vmull_s8(_val, _w);
_sum0 = vaddw_s16(_sum0, vget_low_s16(_s8));
_sum1 = vaddw_s16(_sum1, vget_high_s16(_s8));
tmpptr += 8;
kptr0 += 8;
}
int32x4_t _sum = vaddq_s32(_sum0, _sum1);
#if __aarch64__
sum = vaddvq_s32(_sum); // dot
#else
int32x2_t _ss = vadd_s32(vget_low_s32(_sum), vget_high_s32(_sum));
_ss = vpadd_s32(_ss, _ss);
sum = vget_lane_s32(_ss, 0);
#endif
#endif // __ARM_FEATURE_DOTPROD
}
if (nn4 > 0)
{
int j = 0;
for (; j < nn4; j++)
{
signed char val0 = tmpptr[0];
signed char val1 = tmpptr[1];
signed char val2 = tmpptr[2];
signed char val3 = tmpptr[3];
signed char w0 = kptr0[0];
signed char w1 = kptr0[1];
signed char w2 = kptr0[2];
signed char w3 = kptr0[3];
sum += val0 * w0;
sum += val1 * w1;
sum += val2 * w2;
sum += val3 * w3;
tmpptr += 4;
kptr0 += 4;
}
}
int j = 0;
for (; j < nn1; j++)
{
signed char val = tmpptr[0];
signed char w = kptr0[0];
sum += val * w;
tmpptr += 1;
kptr0 += 1;
}
outptr0[0] = sum;
outptr0 += 1;
}
#else // __ARM_NEON
for (; i < size; i++)
{
const signed char* tmpptr = tmp.channel(i);
const signed char* kptr0 = kernel.channel(p);
int nn1 = inch * maxk;
int sum = 0;
int j = 0;
for (; j < nn1; j++)
{
signed char val = tmpptr[0];
signed char w = kptr0[0];
sum += val * w;
tmpptr += 1;
kptr0 += 1;
}
outptr0[0] = sum;
outptr0 += 1;
}
#endif // __ARM_NEON
}
}
static void convolution_im2col_sgemm_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
#if __ARM_NEON
// interleave
// src = maxk-inch-outch
// dst = 8a-4b-maxk-inch/8a-outch/4b
// dst = 4a-4b-2-maxk-inch/8a-outch/4b (arm82)
Mat kernel = _kernel.reshape(maxk, inch, outch);
if (outch >= 4)
{
if (inch >= 8)
kernel_tm.create(32 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, outch / 4 + outch % 4, 1u);
else if (inch >= 4)
kernel_tm.create(16 * maxk, inch / 4 + inch % 4, outch / 4 + outch % 4, 1u);
else
kernel_tm.create(4 * maxk, inch, outch / 4 + outch % 4, 1u);
}
else
{
if (inch >= 8)
kernel_tm.create(8 * maxk, inch / 8 + (inch % 8) / 4 + inch % 4, outch, 1u);
else if (inch >= 4)
kernel_tm.create(4 * maxk, inch / 4 + inch % 4, outch, 1u);
else
kernel_tm.create(1 * maxk, inch, outch, 1u);
}
int q = 0;
for (; q + 3 < outch; q += 4)
{
signed char* g00 = kernel_tm.channel(q / 4);
int p = 0;
for (; p + 7 < inch; p += 8)
{
for (int k = 0; k < maxk; k++)
{
#if __ARM_FEATURE_DOTPROD
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
for (int i = 0; i < 4; i++)
{
for (int j = 4; j < 8; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
#else
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 8; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
#endif
}
}
for (; p + 3 < inch; p += 4)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
for (; p < inch; p++)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 4; i++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p);
g00[0] = k00[k];
g00++;
}
}
}
}
// TODO unroll 2
for (; q < outch; q++)
{
signed char* g00 = kernel_tm.channel(q / 4 + q % 4);
int p = 0;
for (; p + 7 < inch; p += 8)
{
for (int k = 0; k < maxk; k++)
{
for (int j = 0; j < 8; j++)
{
const signed char* k00 = kernel.channel(q).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
for (; p + 3 < inch; p += 4)
{
for (int k = 0; k < maxk; k++)
{
for (int j = 0; j < 4; j++)
{
const signed char* k00 = kernel.channel(q).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
for (; p < inch; p++)
{
for (int k = 0; k < maxk; k++)
{
const signed char* k00 = kernel.channel(q).row<const signed char>(p);
g00[0] = k00[k];
g00++;
}
}
}
#else // __ARM_NEON
kernel_tm = _kernel.reshape(maxk, inch, outch);
#endif // __ARM_NEON
}
static void convolution_im2col_sgemm_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 1u, 1, opt.workspace_allocator);
{
const int gap = w * stride_h - outw * stride_w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
signed char* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const signed char* sptr = img.row<const signed char>(dilation_h * u) + dilation_w * v;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
ptr[0] = sptr[0];
ptr[1] = sptr[stride_w];
ptr[2] = sptr[stride_w * 2];
ptr[3] = sptr[stride_w * 3];
sptr += stride_w * 4;
ptr += 4;
}
for (; j + 1 < outw; j += 2)
{
ptr[0] = sptr[0];
ptr[1] = sptr[stride_w];
sptr += stride_w * 2;
ptr += 2;
}
for (; j < outw; j++)
{
ptr[0] = sptr[0];
sptr += stride_w;
ptr += 1;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_int8_neon(bottom_im2col, top_blob, kernel, opt);
}
|
bt_single.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - BT
This benchmark is an OpenMP C version of the NPB BT code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: R. Van der Wijngaart
T. Harris
M. Yarrow
OpenMP C version: S. Satoh
--------------------------------------------------------------------*/
//#include "npb-C.h"
/*
NAS Parallel Benchmarks 2.3 OpenMP C Versions
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#if defined(_OPENMP)
#include <omp.h>
#endif /* _OPENMP */
typedef int boolean;
typedef struct { double real; double imag; } dcomplex;
#define TRUE 1
#define FALSE 0
#define max(a,b) (((a) > (b)) ? (a) : (b))
#define min(a,b) (((a) < (b)) ? (a) : (b))
#define pow2(a) ((a)*(a))
#define get_real(c) c.real
#define get_imag(c) c.imag
#define cadd(c,a,b) (c.real = a.real + b.real, c.imag = a.imag + b.imag)
#define csub(c,a,b) (c.real = a.real - b.real, c.imag = a.imag - b.imag)
#define cmul(c,a,b) (c.real = a.real * b.real - a.imag * b.imag, \
c.imag = a.real * b.imag + a.imag * b.real)
#define crmul(c,a,b) (c.real = a.real * b, c.imag = a.imag * b)
extern double randlc(double *, double);
extern void vranlc(int, double *, double, double *);
extern void timer_clear(int);
extern void timer_start(int);
extern void timer_stop(int);
extern double timer_read(int);
extern void c_print_results(char *name, char cclass, int n1, int n2,
int n3, int niter, int nthreads, double t,
double mops, char *optype, int passed_verification,
char *npbversion, char *compiletime, char *cc,
char *clink, char *c_lib, char *c_inc,
char *cflags, char *clinkflags, char *rand);
/* global variables */
//#include "header.h"
/*--------------------------------------------------------------------
c---------------------------------------------------------------------
c
c header.h
c
c---------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c The following include file is generated automatically by the
c "setparams" utility. It defines
c maxcells: the square root of the maximum number of processors
c problem_size: 12, 64, 102, 162 (for class T, A, B, C)
c dt_default: default time step for this problem size if no
c config file
c niter_default: default number of iterations for this problem size
--------------------------------------------------------------------*/
//#include "npbparams.h"
/******************/
/* default values */
/******************/
#ifndef CLASS
#define CLASS 'W'
#endif
#if CLASS == 'S'
#define PROBLEM_SIZE 12
#define NITER_DEFAULT 60
#define DT_DEFAULT 0.010
#endif
#if CLASS == 'W'
#define PROBLEM_SIZE 24
#define NITER_DEFAULT 200
#define DT_DEFAULT 0.0008
#endif
#if CLASS == 'A'
#define PROBLEM_SIZE 64
#define NITER_DEFAULT 200
#define DT_DEFAULT 0.0008
#endif
#if CLASS == 'B'
#define PROBLEM_SIZE 102
#define NITER_DEFAULT 200
#define DT_DEFAULT 0.0003
#endif
#if CLASS == 'C'
#define PROBLEM_SIZE 162
#define NITER_DEFAULT 200
#define DT_DEFAULT 0.0001
#endif
#define CONVERTDOUBLE FALSE
#define COMPILETIME "27 Oct 2014"
#define NPBVERSION "2.3"
#define CS1 "gcc"
#define CS2 "$(CC)"
#define CS3 "(none)"
#define CS4 "-I../common"
#define CS5 "-fopenmp -O2"
#define CS6 "-lm -fopenmp"
#define CS7 "randdp"
//--------end class definition -----------
#define AA 0
#define BB 1
#define CC 2
#define BLOCK_SIZE 5
/* COMMON block: global */
static int grid_points[3]; /* grid_ponts(1:3) */
/* COMMON block: constants */
static double tx1, tx2, tx3, ty1, ty2, ty3, tz1, tz2, tz3;
static double dx1, dx2, dx3, dx4, dx5;
static double dy1, dy2, dy3, dy4, dy5;
static double dz1, dz2, dz3, dz4, dz5;
static double dssp, dt;
static double ce[5][13]; /* ce(5,13) */
static double dxmax, dymax, dzmax;
static double xxcon1, xxcon2, xxcon3, xxcon4, xxcon5;
static double dx1tx1, dx2tx1, dx3tx1, dx4tx1, dx5tx1;
static double yycon1, yycon2, yycon3, yycon4, yycon5;
static double dy1ty1, dy2ty1, dy3ty1, dy4ty1, dy5ty1;
static double zzcon1, zzcon2, zzcon3, zzcon4, zzcon5;
static double dz1tz1, dz2tz1, dz3tz1, dz4tz1, dz5tz1;
static double dnxm1, dnym1, dnzm1, c1c2, c1c5, c3c4, c1345;
static double conz1, c1, c2, c3, c4, c5, c4dssp, c5dssp, dtdssp;
static double dttx1, dttx2, dtty1, dtty2, dttz1, dttz2;
static double c2dttx1, c2dtty1, c2dttz1, comz1, comz4, comz5, comz6;
static double c3c4tx3, c3c4ty3, c3c4tz3, c2iv, con43, con16;
#define IMAX PROBLEM_SIZE
#define JMAX PROBLEM_SIZE
#define KMAX PROBLEM_SIZE
/*
c to improve cache performance, grid dimensions padded by 1
c for even number sizes only.
*/
/* COMMON block: fields */
static double us[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
static double vs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
static double ws[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
static double qs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
static double rho_i[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
static double square[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1];
static double forcing[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][5+1];
static double u[(IMAX+1)/2*2+1][(JMAX+1)/2*2+1][(KMAX+1)/2*2+1][5];
static double rhs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][5];
static double lhs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][3][5][5];
/* COMMON block: work_1d */
static double cuf[PROBLEM_SIZE];
static double q[PROBLEM_SIZE];
static double ue[PROBLEM_SIZE][5];
static double buf[PROBLEM_SIZE][5];
//Liao, the program may be wrong!!
#pragma omp threadprivate(cuf, q, ue, buf)
/*
c to improve cache performance, grid dimensions (first two for these
c to arrays) padded by 1 for even number sizes only.
*/
/* COMMON block: work_lhs */
static double fjac[IMAX/2*2+1][JMAX/2*2+1][KMAX-1+1][5][5];
/* fjac(5, 5, 0:IMAX/2*2, 0:JMAX/2*2, 0:KMAX-1) */
static double njac[IMAX/2*2+1][JMAX/2*2+1][KMAX-1+1][5][5];
/* njac(5, 5, 0:IMAX/2*2, 0:JMAX/2*2, 0:KMAX-1) */
static double tmp1, tmp2, tmp3;
/* function declarations */
static void add(void);
static void adi(void);
static void error_norm(double rms[5]);
static void rhs_norm(double rms[5]);
static void exact_rhs(void);
static void exact_solution(double xi, double eta, double zeta,
double dtemp[5]);
static void initialize(void);
static void lhsinit(void);
static void lhsx(void);
static void lhsy(void);
static void lhsz(void);
static void compute_rhs(void);
static void set_constants(void);
static void verify(int no_time_steps, char *cclass, boolean *verified);
static void x_solve(void);
static void x_backsubstitute(void);
static void x_solve_cell(void);
static void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]);
static void matmul_sub(double ablock[5][5], double bblock[5][5],
double cblock[5][5]);
static void binvcrhs(double lhs[5][5], double c[5][5], double r[5]);
static void binvrhs(double lhs[5][5], double r[5]);
static void y_solve(void);
static void y_backsubstitute(void);
static void y_solve_cell(void);
static void z_solve(void);
static void z_backsubstitute(void);
static void z_solve_cell(void);
/*--------------------------------------------------------------------
program BT
c-------------------------------------------------------------------*/
int main(int argc, char **argv) {
int niter, step, n3;
int nthreads = 1;
double navg, mflops;
double tmax;
boolean verified;
char cclass;
FILE *fp;
/*--------------------------------------------------------------------
c Root node reads input file (if it exists) else takes
c defaults from parameters
c-------------------------------------------------------------------*/
printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"
" - BT Benchmark\n\n");
fp = fopen("inputbt.data", "r");
if (fp != NULL) {
printf(" Reading from input file inputbt.data");
fscanf(fp, "%d", &niter);
while (fgetc(fp) != '\n');
fscanf(fp, "%lg", &dt);
while (fgetc(fp) != '\n');
fscanf(fp, "%d%d%d",
&grid_points[0], &grid_points[1], &grid_points[2]);
fclose(fp);
} else {
printf(" No input file inputbt.data. Using compiled defaults\n");
niter = NITER_DEFAULT;
dt = DT_DEFAULT;
grid_points[0] = PROBLEM_SIZE;
grid_points[1] = PROBLEM_SIZE;
grid_points[2] = PROBLEM_SIZE;
}
printf(" Size: %3dx%3dx%3d\n",
grid_points[0], grid_points[1], grid_points[2]);
printf(" Iterations: %3d dt: %10.6f\n", niter, dt);
if (grid_points[0] > IMAX ||
grid_points[1] > JMAX ||
grid_points[2] > KMAX) {
printf(" %dx%dx%d\n", grid_points[0], grid_points[1], grid_points[2]);
printf(" Problem size too big for compiled array sizes\n");
exit(1);
}
set_constants();
#pragma omp parallel
{
initialize();
lhsinit();
exact_rhs();
/*--------------------------------------------------------------------
c do one time step to touch all code, and reinitialize
c-------------------------------------------------------------------*/
adi();
initialize();
} /* end parallel */
timer_clear(1);
timer_start(1);
#pragma omp parallel firstprivate(niter) private(step)
{
for (step = 1; step <= niter; step++) {
if (step%20 == 0 || step == 1) {
#pragma omp master
printf(" Time step %4d\n", step);
}
adi();
}
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end parallel */
timer_stop(1);
tmax = timer_read(1);
verify(niter, &cclass, &verified);
n3 = grid_points[0]*grid_points[1]*grid_points[2];
navg = (grid_points[0]+grid_points[1]+grid_points[2])/3.0;
if ( fabs(tmax-0.0)>1.0e-5 ) {
mflops = 1.0e-6*(double)niter*
(3478.8*(double)n3-17655.7*pow2(navg)+28023.7*navg) / tmax;
} else {
mflops = 0.0;
}
c_print_results("BT", cclass, grid_points[0],
grid_points[1], grid_points[2], niter, nthreads,
tmax, mflops, " floating point",
verified, NPBVERSION,COMPILETIME, CS1, CS2, CS3, CS4, CS5,
CS6, "(none)");
return 0;
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void add(void) {
/*--------------------------------------------------------------------
c addition of update to the vector u
c-------------------------------------------------------------------*/
int i, j, k, m;
#pragma omp for private(j,k,m)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
u[i][j][k][m] = u[i][j][k][m] + rhs[i][j][k][m];
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void adi(void) {
compute_rhs();
x_solve();
y_solve();
z_solve();
add();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void error_norm(double rms[5]) {
/*--------------------------------------------------------------------
c this function computes the norm of the difference between the
c computed solution and the exact solution
c-------------------------------------------------------------------*/
int i, j, k, m, d;
double xi, eta, zeta, u_exact[5], add;
for (m = 0; m < 5; m++) {
rms[m] = 0.0;
}
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, u_exact);
for (m = 0; m < 5; m++) {
add = u[i][j][k][m] - u_exact[m];
rms[m] = rms[m] + add*add;
}
}
}
}
for (m = 0; m < 5; m++) {
for (d = 0; d <= 2; d++) {
rms[m] = rms[m] / (double)(grid_points[d]-2);
}
rms[m] = sqrt(rms[m]);
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void rhs_norm(double rms[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
int i, j, k, d, m;
double add;
for (m = 0; m < 5; m++) {
rms[m] = 0.0;
}
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
add = rhs[i][j][k][m];
rms[m] = rms[m] + add*add;
}
}
}
}
for (m = 0; m < 5; m++) {
for (d = 0; d <= 2; d++) {
rms[m] = rms[m] / (double)(grid_points[d]-2);
}
rms[m] = sqrt(rms[m]);
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void exact_rhs(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute the right hand side based on exact solution
c-------------------------------------------------------------------*/
double dtemp[5], xi, eta, zeta, dtpp;
int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1;
/*--------------------------------------------------------------------
c initialize
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m)
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
forcing[i][j][k][m] = 0.0;
}
}
}
}
/*--------------------------------------------------------------------
c xi-direction flux differences
c-------------------------------------------------------------------*/
#pragma omp for private(k,i,m)
for (j = 1; j < grid_points[1]-1; j++) {
eta = (double)j * dnym1;
for (k = 1; k < grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[i][m] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
for (m = 1; m <= 4; m++) {
buf[i][m] = dtpp * dtemp[m];
}
cuf[i] = buf[i][1] * buf[i][1];
buf[i][0] = cuf[i] + buf[i][2] * buf[i][2] +
buf[i][3] * buf[i][3];
q[i] = 0.5*(buf[i][1]*ue[i][1] + buf[i][2]*ue[i][2] +
buf[i][3]*ue[i][3]);
}
for (i = 1; i < grid_points[0]-1; i++) {
im1 = i-1;
ip1 = i+1;
forcing[i][j][k][0] = forcing[i][j][k][0] -
tx2*(ue[ip1][1]-ue[im1][1])+
dx1tx1*(ue[ip1][0]-2.0*ue[i][0]+ue[im1][0]);
forcing[i][j][k][1] = forcing[i][j][k][1] -
tx2 * ((ue[ip1][1]*buf[ip1][1]+c2*(ue[ip1][4]-q[ip1]))-
(ue[im1][1]*buf[im1][1]+c2*(ue[im1][4]-q[im1])))+
xxcon1*(buf[ip1][1]-2.0*buf[i][1]+buf[im1][1])+
dx2tx1*( ue[ip1][1]-2.0* ue[i][1]+ ue[im1][1]);
forcing[i][j][k][2] = forcing[i][j][k][2] -
tx2 * (ue[ip1][2]*buf[ip1][1]-ue[im1][2]*buf[im1][1])+
xxcon2*(buf[ip1][2]-2.0*buf[i][2]+buf[im1][2])+
dx3tx1*( ue[ip1][2]-2.0* ue[i][2]+ ue[im1][2]);
forcing[i][j][k][3] = forcing[i][j][k][3] -
tx2*(ue[ip1][3]*buf[ip1][1]-ue[im1][3]*buf[im1][1])+
xxcon2*(buf[ip1][3]-2.0*buf[i][3]+buf[im1][3])+
dx4tx1*( ue[ip1][3]-2.0* ue[i][3]+ ue[im1][3]);
forcing[i][j][k][4] = forcing[i][j][k][4] -
tx2*(buf[ip1][1]*(c1*ue[ip1][4]-c2*q[ip1])-
buf[im1][1]*(c1*ue[im1][4]-c2*q[im1]))+
0.5*xxcon3*(buf[ip1][0]-2.0*buf[i][0]+buf[im1][0])+
xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+
xxcon5*(buf[ip1][4]-2.0*buf[i][4]+buf[im1][4])+
dx5tx1*( ue[ip1][4]-2.0* ue[i][4]+ ue[im1][4]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
i = 1;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(5.0*ue[i][m] - 4.0*ue[i+1][m] +ue[i+2][m]);
i = 2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(-4.0*ue[i-1][m] + 6.0*ue[i][m] -
4.0*ue[i+1][m] + ue[i+2][m]);
}
for (m = 0; m < 5; m++) {
for (i = 1*3; i <= grid_points[0]-3*1-1; i++) {
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*
(ue[i-2][m] - 4.0*ue[i-1][m] +
6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]);
}
}
for (m = 0; m < 5; m++) {
i = grid_points[0]-3;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[i-2][m] - 4.0*ue[i-1][m] +
6.0*ue[i][m] - 4.0*ue[i+1][m]);
i = grid_points[0]-2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[i-2][m] - 4.0*ue[i-1][m] + 5.0*ue[i][m]);
}
}
}
/*--------------------------------------------------------------------
c eta-direction flux differences
c-------------------------------------------------------------------*/
#pragma omp for private(k,j,m)
for (i = 1; i < grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
for (k = 1; k < grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[j][m] = dtemp[m];
}
dtpp = 1.0/dtemp[0];
for (m = 1; m <= 4; m++) {
buf[j][m] = dtpp * dtemp[m];
}
cuf[j] = buf[j][2] * buf[j][2];
buf[j][0] = cuf[j] + buf[j][1] * buf[j][1] +
buf[j][3] * buf[j][3];
q[j] = 0.5*(buf[j][1]*ue[j][1] + buf[j][2]*ue[j][2] +
buf[j][3]*ue[j][3]);
}
for (j = 1; j < grid_points[1]-1; j++) {
jm1 = j-1;
jp1 = j+1;
forcing[i][j][k][0] = forcing[i][j][k][0] -
ty2*( ue[jp1][2]-ue[jm1][2] )+
dy1ty1*(ue[jp1][0]-2.0*ue[j][0]+ue[jm1][0]);
forcing[i][j][k][1] = forcing[i][j][k][1] -
ty2*(ue[jp1][1]*buf[jp1][2]-ue[jm1][1]*buf[jm1][2])+
yycon2*(buf[jp1][1]-2.0*buf[j][1]+buf[jm1][1])+
dy2ty1*( ue[jp1][1]-2.0* ue[j][1]+ ue[jm1][1]);
forcing[i][j][k][2] = forcing[i][j][k][2] -
ty2*((ue[jp1][2]*buf[jp1][2]+c2*(ue[jp1][4]-q[jp1]))-
(ue[jm1][2]*buf[jm1][2]+c2*(ue[jm1][4]-q[jm1])))+
yycon1*(buf[jp1][2]-2.0*buf[j][2]+buf[jm1][2])+
dy3ty1*( ue[jp1][2]-2.0*ue[j][2] +ue[jm1][2]);
forcing[i][j][k][3] = forcing[i][j][k][3] -
ty2*(ue[jp1][3]*buf[jp1][2]-ue[jm1][3]*buf[jm1][2])+
yycon2*(buf[jp1][3]-2.0*buf[j][3]+buf[jm1][3])+
dy4ty1*( ue[jp1][3]-2.0*ue[j][3]+ ue[jm1][3]);
forcing[i][j][k][4] = forcing[i][j][k][4] -
ty2*(buf[jp1][2]*(c1*ue[jp1][4]-c2*q[jp1])-
buf[jm1][2]*(c1*ue[jm1][4]-c2*q[jm1]))+
0.5*yycon3*(buf[jp1][0]-2.0*buf[j][0]+
buf[jm1][0])+
yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+
yycon5*(buf[jp1][4]-2.0*buf[j][4]+buf[jm1][4])+
dy5ty1*(ue[jp1][4]-2.0*ue[j][4]+ue[jm1][4]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
j = 1;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(5.0*ue[j][m] - 4.0*ue[j+1][m] +ue[j+2][m]);
j = 2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(-4.0*ue[j-1][m] + 6.0*ue[j][m] -
4.0*ue[j+1][m] + ue[j+2][m]);
}
for (m = 0; m < 5; m++) {
for (j = 1*3; j <= grid_points[1]-3*1-1; j++) {
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*
(ue[j-2][m] - 4.0*ue[j-1][m] +
6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]);
}
}
for (m = 0; m < 5; m++) {
j = grid_points[1]-3;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[j-2][m] - 4.0*ue[j-1][m] +
6.0*ue[j][m] - 4.0*ue[j+1][m]);
j = grid_points[1]-2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[j-2][m] - 4.0*ue[j-1][m] + 5.0*ue[j][m]);
}
}
}
/*--------------------------------------------------------------------
c zeta-direction flux differences
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m)
for (i = 1; i < grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
for (j = 1; j < grid_points[1]-1; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[k][m] = dtemp[m];
}
dtpp = 1.0/dtemp[0];
for (m = 1; m <= 4; m++) {
buf[k][m] = dtpp * dtemp[m];
}
cuf[k] = buf[k][3] * buf[k][3];
buf[k][0] = cuf[k] + buf[k][1] * buf[k][1] +
buf[k][2] * buf[k][2];
q[k] = 0.5*(buf[k][1]*ue[k][1] + buf[k][2]*ue[k][2] +
buf[k][3]*ue[k][3]);
}
for (k = 1; k < grid_points[2]-1; k++) {
km1 = k-1;
kp1 = k+1;
forcing[i][j][k][0] = forcing[i][j][k][0] -
tz2*( ue[kp1][3]-ue[km1][3] )+
dz1tz1*(ue[kp1][0]-2.0*ue[k][0]+ue[km1][0]);
forcing[i][j][k][1] = forcing[i][j][k][1] -
tz2 * (ue[kp1][1]*buf[kp1][3]-ue[km1][1]*buf[km1][3])+
zzcon2*(buf[kp1][1]-2.0*buf[k][1]+buf[km1][1])+
dz2tz1*( ue[kp1][1]-2.0* ue[k][1]+ ue[km1][1]);
forcing[i][j][k][2] = forcing[i][j][k][2] -
tz2 * (ue[kp1][2]*buf[kp1][3]-ue[km1][2]*buf[km1][3])+
zzcon2*(buf[kp1][2]-2.0*buf[k][2]+buf[km1][2])+
dz3tz1*(ue[kp1][2]-2.0*ue[k][2]+ue[km1][2]);
forcing[i][j][k][3] = forcing[i][j][k][3] -
tz2 * ((ue[kp1][3]*buf[kp1][3]+c2*(ue[kp1][4]-q[kp1]))-
(ue[km1][3]*buf[km1][3]+c2*(ue[km1][4]-q[km1])))+
zzcon1*(buf[kp1][3]-2.0*buf[k][3]+buf[km1][3])+
dz4tz1*( ue[kp1][3]-2.0*ue[k][3] +ue[km1][3]);
forcing[i][j][k][4] = forcing[i][j][k][4] -
tz2 * (buf[kp1][3]*(c1*ue[kp1][4]-c2*q[kp1])-
buf[km1][3]*(c1*ue[km1][4]-c2*q[km1]))+
0.5*zzcon3*(buf[kp1][0]-2.0*buf[k][0]
+buf[km1][0])+
zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+
zzcon5*(buf[kp1][4]-2.0*buf[k][4]+buf[km1][4])+
dz5tz1*( ue[kp1][4]-2.0*ue[k][4]+ ue[km1][4]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
k = 1;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(5.0*ue[k][m] - 4.0*ue[k+1][m] +ue[k+2][m]);
k = 2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(-4.0*ue[k-1][m] + 6.0*ue[k][m] -
4.0*ue[k+1][m] + ue[k+2][m]);
}
for (m = 0; m < 5; m++) {
for (k = 1*3; k <= grid_points[2]-3*1-1; k++) {
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*
(ue[k-2][m] - 4.0*ue[k-1][m] +
6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]);
}
}
for (m = 0; m < 5; m++) {
k = grid_points[2]-3;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[k-2][m] - 4.0*ue[k-1][m] +
6.0*ue[k][m] - 4.0*ue[k+1][m]);
k = grid_points[2]-2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[k-2][m] - 4.0*ue[k-1][m] + 5.0*ue[k][m]);
}
}
}
/*--------------------------------------------------------------------
c now change the sign of the forcing function,
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
forcing[i][j][k][m] = -1.0 * forcing[i][j][k][m];
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void exact_solution(double xi, double eta, double zeta,
double dtemp[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c this function returns the exact solution at point xi, eta, zeta
c-------------------------------------------------------------------*/
int m;
for (m = 0; m < 5; m++) {
dtemp[m] = ce[m][0] +
xi*(ce[m][1] + xi*(ce[m][4] + xi*(ce[m][7]
+ xi*ce[m][10]))) +
eta*(ce[m][2] + eta*(ce[m][5] + eta*(ce[m][8]
+ eta*ce[m][11])))+
zeta*(ce[m][3] + zeta*(ce[m][6] + zeta*(ce[m][9] +
zeta*ce[m][12])));
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void initialize(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This subroutine initializes the field variable u using
c tri-linear transfinite interpolation of the boundary values
c-------------------------------------------------------------------*/
int i, j, k, m, ix, iy, iz;
double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5];
/*--------------------------------------------------------------------
c Later (in compute_rhs) we compute 1/u for every element. A few of
c the corner elements are not used, but it convenient (and faster)
c to compute the whole thing with a simple loop. Make sure those
c values are nonzero by initializing the whole thing here.
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m)
for (i = 0; i < IMAX; i++) {
for (j = 0; j < IMAX; j++) {
for (k = 0; k < IMAX; k++) {
for (m = 0; m < 5; m++) {
u[i][j][k][m] = 1.0;
}
}
}
}
/*--------------------------------------------------------------------
c first store the "interpolated" values everywhere on the grid
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,ix,iy,iz,m)
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
for (ix = 0; ix < 2; ix++) {
exact_solution((double)ix, eta, zeta,
&(Pface[ix][0][0]));
}
for (iy = 0; iy < 2; iy++) {
exact_solution(xi, (double)iy , zeta,
&Pface[iy][1][0]);
}
for (iz = 0; iz < 2; iz++) {
exact_solution(xi, eta, (double)iz,
&Pface[iz][2][0]);
}
for (m = 0; m < 5; m++) {
Pxi = xi * Pface[1][0][m] +
(1.0-xi) * Pface[0][0][m];
Peta = eta * Pface[1][1][m] +
(1.0-eta) * Pface[0][1][m];
Pzeta = zeta * Pface[1][2][m] +
(1.0-zeta) * Pface[0][2][m];
u[i][j][k][m] = Pxi + Peta + Pzeta -
Pxi*Peta - Pxi*Pzeta - Peta*Pzeta +
Pxi*Peta*Pzeta;
}
}
}
}
/*--------------------------------------------------------------------
c now store the exact values on the boundaries
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c west face
c-------------------------------------------------------------------*/
i = 0;
xi = 0.0;
#pragma omp for private(k,m) nowait
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c east face
c-------------------------------------------------------------------*/
i = grid_points[0]-1;
xi = 1.0;
#pragma omp for private(k,m)
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c south face
c-------------------------------------------------------------------*/
j = 0;
eta = 0.0;
#pragma omp for private(k,m) nowait
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c north face
c-------------------------------------------------------------------*/
j = grid_points[1]-1;
eta = 1.0;
#pragma omp for private(k,m)
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c bottom face
c-------------------------------------------------------------------*/
k = 0;
zeta = 0.0;
#pragma omp for private(j,m) nowait
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i *dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c top face
c-------------------------------------------------------------------*/
k = grid_points[2]-1;
zeta = 1.0;
#pragma omp for private(j,m)
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsinit(void) {
int i, j, k, m, n;
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c zero the whole left hand side for starters
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m,n)
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
for (n = 0; n < 5; n++) {
lhs[i][j][k][0][m][n] = 0.0;
lhs[i][j][k][1][m][n] = 0.0;
lhs[i][j][k][2][m][n] = 0.0;
}
}
}
}
}
/*--------------------------------------------------------------------
c next, set all diagonal values to 1. This is overkill, but convenient
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m)
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
lhs[i][j][k][1][m][m] = 1.0;
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsx(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side in the xi-direction
c-------------------------------------------------------------------*/
int i, j, k;
/*--------------------------------------------------------------------
c determine a (labeled f) and n jacobians
c-------------------------------------------------------------------*/
#pragma omp for private(k,i)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (i = 0; i < grid_points[0]; i++) {
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
fjac[ i][ j][ k][0][0] = 0.0;
fjac[ i][ j][ k][0][1] = 1.0;
fjac[ i][ j][ k][0][2] = 0.0;
fjac[ i][ j][ k][0][3] = 0.0;
fjac[ i][ j][ k][0][4] = 0.0;
fjac[ i][ j][ k][1][0] = -(u[i][j][k][1] * tmp2 *
u[i][j][k][1])
+ c2 * 0.50 * (u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] ) * tmp2;
fjac[i][j][k][1][1] = ( 2.0 - c2 )
* ( u[i][j][k][1] / u[i][j][k][0] );
fjac[i][j][k][1][2] = - c2 * ( u[i][j][k][2] * tmp1 );
fjac[i][j][k][1][3] = - c2 * ( u[i][j][k][3] * tmp1 );
fjac[i][j][k][1][4] = c2;
fjac[i][j][k][2][0] = - ( u[i][j][k][1]*u[i][j][k][2] ) * tmp2;
fjac[i][j][k][2][1] = u[i][j][k][2] * tmp1;
fjac[i][j][k][2][2] = u[i][j][k][1] * tmp1;
fjac[i][j][k][2][3] = 0.0;
fjac[i][j][k][2][4] = 0.0;
fjac[i][j][k][3][0] = - ( u[i][j][k][1]*u[i][j][k][3] ) * tmp2;
fjac[i][j][k][3][1] = u[i][j][k][3] * tmp1;
fjac[i][j][k][3][2] = 0.0;
fjac[i][j][k][3][3] = u[i][j][k][1] * tmp1;
fjac[i][j][k][3][4] = 0.0;
fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] ) * tmp2
- c1 * ( u[i][j][k][4] * tmp1 ) )
* ( u[i][j][k][1] * tmp1 );
fjac[i][j][k][4][1] = c1 * u[i][j][k][4] * tmp1
- 0.50 * c2
* ( 3.0*u[i][j][k][1]*u[i][j][k][1]
+ u[i][j][k][2]*u[i][j][k][2]
+ u[i][j][k][3]*u[i][j][k][3] ) * tmp2;
fjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][1] )
* tmp2;
fjac[i][j][k][4][3] = - c2 * ( u[i][j][k][3]*u[i][j][k][1] )
* tmp2;
fjac[i][j][k][4][4] = c1 * ( u[i][j][k][1] * tmp1 );
njac[i][j][k][0][0] = 0.0;
njac[i][j][k][0][1] = 0.0;
njac[i][j][k][0][2] = 0.0;
njac[i][j][k][0][3] = 0.0;
njac[i][j][k][0][4] = 0.0;
njac[i][j][k][1][0] = - con43 * c3c4 * tmp2 * u[i][j][k][1];
njac[i][j][k][1][1] = con43 * c3c4 * tmp1;
njac[i][j][k][1][2] = 0.0;
njac[i][j][k][1][3] = 0.0;
njac[i][j][k][1][4] = 0.0;
njac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2];
njac[i][j][k][2][1] = 0.0;
njac[i][j][k][2][2] = c3c4 * tmp1;
njac[i][j][k][2][3] = 0.0;
njac[i][j][k][2][4] = 0.0;
njac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3];
njac[i][j][k][3][1] = 0.0;
njac[i][j][k][3][2] = 0.0;
njac[i][j][k][3][3] = c3c4 * tmp1;
njac[i][j][k][3][4] = 0.0;
njac[i][j][k][4][0] = - ( con43 * c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][1]))
- ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2]))
- ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3]))
- c1345 * tmp2 * u[i][j][k][4];
njac[i][j][k][4][1] = ( con43 * c3c4
- c1345 ) * tmp2 * u[i][j][k][1];
njac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2];
njac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3];
njac[i][j][k][4][4] = ( c1345 ) * tmp1;
}
/*--------------------------------------------------------------------
c now jacobians set, so form left hand side in x direction
c-------------------------------------------------------------------*/
for (i = 1; i < grid_points[0]-1; i++) {
tmp1 = dt * tx1;
tmp2 = dt * tx2;
lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i-1][j][k][0][0]
- tmp1 * njac[i-1][j][k][0][0]
- tmp1 * dx1;
lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i-1][j][k][0][1]
- tmp1 * njac[i-1][j][k][0][1];
lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i-1][j][k][0][2]
- tmp1 * njac[i-1][j][k][0][2];
lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i-1][j][k][0][3]
- tmp1 * njac[i-1][j][k][0][3];
lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i-1][j][k][0][4]
- tmp1 * njac[i-1][j][k][0][4];
lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i-1][j][k][1][0]
- tmp1 * njac[i-1][j][k][1][0];
lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i-1][j][k][1][1]
- tmp1 * njac[i-1][j][k][1][1]
- tmp1 * dx2;
lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i-1][j][k][1][2]
- tmp1 * njac[i-1][j][k][1][2];
lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i-1][j][k][1][3]
- tmp1 * njac[i-1][j][k][1][3];
lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i-1][j][k][1][4]
- tmp1 * njac[i-1][j][k][1][4];
lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i-1][j][k][2][0]
- tmp1 * njac[i-1][j][k][2][0];
lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i-1][j][k][2][1]
- tmp1 * njac[i-1][j][k][2][1];
lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i-1][j][k][2][2]
- tmp1 * njac[i-1][j][k][2][2]
- tmp1 * dx3;
lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i-1][j][k][2][3]
- tmp1 * njac[i-1][j][k][2][3];
lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i-1][j][k][2][4]
- tmp1 * njac[i-1][j][k][2][4];
lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i-1][j][k][3][0]
- tmp1 * njac[i-1][j][k][3][0];
lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i-1][j][k][3][1]
- tmp1 * njac[i-1][j][k][3][1];
lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i-1][j][k][3][2]
- tmp1 * njac[i-1][j][k][3][2];
lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i-1][j][k][3][3]
- tmp1 * njac[i-1][j][k][3][3]
- tmp1 * dx4;
lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i-1][j][k][3][4]
- tmp1 * njac[i-1][j][k][3][4];
lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i-1][j][k][4][0]
- tmp1 * njac[i-1][j][k][4][0];
lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i-1][j][k][4][1]
- tmp1 * njac[i-1][j][k][4][1];
lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i-1][j][k][4][2]
- tmp1 * njac[i-1][j][k][4][2];
lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i-1][j][k][4][3]
- tmp1 * njac[i-1][j][k][4][3];
lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i-1][j][k][4][4]
- tmp1 * njac[i-1][j][k][4][4]
- tmp1 * dx5;
lhs[i][j][k][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][0][0]
+ tmp1 * 2.0 * dx1;
lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];
lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];
lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];
lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];
lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];
lhs[i][j][k][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][1][1]
+ tmp1 * 2.0 * dx2;
lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];
lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];
lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];
lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];
lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];
lhs[i][j][k][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][2][2]
+ tmp1 * 2.0 * dx3;
lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];
lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];
lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];
lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];
lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];
lhs[i][j][k][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][3][3]
+ tmp1 * 2.0 * dx4;
lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];
lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];
lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];
lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];
lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];
lhs[i][j][k][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][4][4]
+ tmp1 * 2.0 * dx5;
lhs[i][j][k][CC][0][0] = tmp2 * fjac[i+1][j][k][0][0]
- tmp1 * njac[i+1][j][k][0][0]
- tmp1 * dx1;
lhs[i][j][k][CC][0][1] = tmp2 * fjac[i+1][j][k][0][1]
- tmp1 * njac[i+1][j][k][0][1];
lhs[i][j][k][CC][0][2] = tmp2 * fjac[i+1][j][k][0][2]
- tmp1 * njac[i+1][j][k][0][2];
lhs[i][j][k][CC][0][3] = tmp2 * fjac[i+1][j][k][0][3]
- tmp1 * njac[i+1][j][k][0][3];
lhs[i][j][k][CC][0][4] = tmp2 * fjac[i+1][j][k][0][4]
- tmp1 * njac[i+1][j][k][0][4];
lhs[i][j][k][CC][1][0] = tmp2 * fjac[i+1][j][k][1][0]
- tmp1 * njac[i+1][j][k][1][0];
lhs[i][j][k][CC][1][1] = tmp2 * fjac[i+1][j][k][1][1]
- tmp1 * njac[i+1][j][k][1][1]
- tmp1 * dx2;
lhs[i][j][k][CC][1][2] = tmp2 * fjac[i+1][j][k][1][2]
- tmp1 * njac[i+1][j][k][1][2];
lhs[i][j][k][CC][1][3] = tmp2 * fjac[i+1][j][k][1][3]
- tmp1 * njac[i+1][j][k][1][3];
lhs[i][j][k][CC][1][4] = tmp2 * fjac[i+1][j][k][1][4]
- tmp1 * njac[i+1][j][k][1][4];
lhs[i][j][k][CC][2][0] = tmp2 * fjac[i+1][j][k][2][0]
- tmp1 * njac[i+1][j][k][2][0];
lhs[i][j][k][CC][2][1] = tmp2 * fjac[i+1][j][k][2][1]
- tmp1 * njac[i+1][j][k][2][1];
lhs[i][j][k][CC][2][2] = tmp2 * fjac[i+1][j][k][2][2]
- tmp1 * njac[i+1][j][k][2][2]
- tmp1 * dx3;
lhs[i][j][k][CC][2][3] = tmp2 * fjac[i+1][j][k][2][3]
- tmp1 * njac[i+1][j][k][2][3];
lhs[i][j][k][CC][2][4] = tmp2 * fjac[i+1][j][k][2][4]
- tmp1 * njac[i+1][j][k][2][4];
lhs[i][j][k][CC][3][0] = tmp2 * fjac[i+1][j][k][3][0]
- tmp1 * njac[i+1][j][k][3][0];
lhs[i][j][k][CC][3][1] = tmp2 * fjac[i+1][j][k][3][1]
- tmp1 * njac[i+1][j][k][3][1];
lhs[i][j][k][CC][3][2] = tmp2 * fjac[i+1][j][k][3][2]
- tmp1 * njac[i+1][j][k][3][2];
lhs[i][j][k][CC][3][3] = tmp2 * fjac[i+1][j][k][3][3]
- tmp1 * njac[i+1][j][k][3][3]
- tmp1 * dx4;
lhs[i][j][k][CC][3][4] = tmp2 * fjac[i+1][j][k][3][4]
- tmp1 * njac[i+1][j][k][3][4];
lhs[i][j][k][CC][4][0] = tmp2 * fjac[i+1][j][k][4][0]
- tmp1 * njac[i+1][j][k][4][0];
lhs[i][j][k][CC][4][1] = tmp2 * fjac[i+1][j][k][4][1]
- tmp1 * njac[i+1][j][k][4][1];
lhs[i][j][k][CC][4][2] = tmp2 * fjac[i+1][j][k][4][2]
- tmp1 * njac[i+1][j][k][4][2];
lhs[i][j][k][CC][4][3] = tmp2 * fjac[i+1][j][k][4][3]
- tmp1 * njac[i+1][j][k][4][3];
lhs[i][j][k][CC][4][4] = tmp2 * fjac[i+1][j][k][4][4]
- tmp1 * njac[i+1][j][k][4][4]
- tmp1 * dx5;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsy(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side for the three y-factors
c-------------------------------------------------------------------*/
int i, j, k;
/*--------------------------------------------------------------------
c Compute the indices for storing the tri-diagonal matrix;
c determine a (labeled f) and n jacobians for cell c
c-------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[ i][ j][ k][0][0] = 0.0;
fjac[ i][ j][ k][0][1] = 0.0;
fjac[ i][ j][ k][0][2] = 1.0;
fjac[ i][ j][ k][0][3] = 0.0;
fjac[ i][ j][ k][0][4] = 0.0;
fjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][2] )
* tmp2;
fjac[i][j][k][1][1] = u[i][j][k][2] * tmp1;
fjac[i][j][k][1][2] = u[i][j][k][1] * tmp1;
fjac[i][j][k][1][3] = 0.0;
fjac[i][j][k][1][4] = 0.0;
fjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][2]*tmp2)
+ 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
* tmp2 );
fjac[i][j][k][2][1] = - c2 * u[i][j][k][1] * tmp1;
fjac[i][j][k][2][2] = ( 2.0 - c2 )
* u[i][j][k][2] * tmp1;
fjac[i][j][k][2][3] = - c2 * u[i][j][k][3] * tmp1;
fjac[i][j][k][2][4] = c2;
fjac[i][j][k][3][0] = - ( u[i][j][k][2]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][3][1] = 0.0;
fjac[i][j][k][3][2] = u[i][j][k][3] * tmp1;
fjac[i][j][k][3][3] = u[i][j][k][2] * tmp1;
fjac[i][j][k][3][4] = 0.0;
fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
* tmp2
- c1 * u[i][j][k][4] * tmp1 )
* u[i][j][k][2] * tmp1;
fjac[i][j][k][4][1] = - c2 * u[i][j][k][1]*u[i][j][k][2]
* tmp2;
fjac[i][j][k][4][2] = c1 * u[i][j][k][4] * tmp1
- 0.50 * c2
* ( ( u[i][j][k][1]*u[i][j][k][1]
+ 3.0 * u[i][j][k][2]*u[i][j][k][2]
+ u[i][j][k][3]*u[i][j][k][3] )
* tmp2 );
fjac[i][j][k][4][3] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][4][4] = c1 * u[i][j][k][2] * tmp1;
njac[i][j][k][0][0] = 0.0;
njac[i][j][k][0][1] = 0.0;
njac[i][j][k][0][2] = 0.0;
njac[i][j][k][0][3] = 0.0;
njac[i][j][k][0][4] = 0.0;
njac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1];
njac[i][j][k][1][1] = c3c4 * tmp1;
njac[i][j][k][1][2] = 0.0;
njac[i][j][k][1][3] = 0.0;
njac[i][j][k][1][4] = 0.0;
njac[i][j][k][2][0] = - con43 * c3c4 * tmp2 * u[i][j][k][2];
njac[i][j][k][2][1] = 0.0;
njac[i][j][k][2][2] = con43 * c3c4 * tmp1;
njac[i][j][k][2][3] = 0.0;
njac[i][j][k][2][4] = 0.0;
njac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3];
njac[i][j][k][3][1] = 0.0;
njac[i][j][k][3][2] = 0.0;
njac[i][j][k][3][3] = c3c4 * tmp1;
njac[i][j][k][3][4] = 0.0;
njac[i][j][k][4][0] = - ( c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][1]))
- ( con43 * c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][2]))
- ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3]))
- c1345 * tmp2 * u[i][j][k][4];
njac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1];
njac[i][j][k][4][2] = ( con43 * c3c4
- c1345 ) * tmp2 * u[i][j][k][2];
njac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3];
njac[i][j][k][4][4] = ( c1345 ) * tmp1;
}
}
}
/*--------------------------------------------------------------------
c now joacobians set, so form left hand side in y direction
c-------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
tmp1 = dt * ty1;
tmp2 = dt * ty2;
lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j-1][k][0][0]
- tmp1 * njac[i][j-1][k][0][0]
- tmp1 * dy1;
lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j-1][k][0][1]
- tmp1 * njac[i][j-1][k][0][1];
lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j-1][k][0][2]
- tmp1 * njac[i][j-1][k][0][2];
lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j-1][k][0][3]
- tmp1 * njac[i][j-1][k][0][3];
lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j-1][k][0][4]
- tmp1 * njac[i][j-1][k][0][4];
lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j-1][k][1][0]
- tmp1 * njac[i][j-1][k][1][0];
lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j-1][k][1][1]
- tmp1 * njac[i][j-1][k][1][1]
- tmp1 * dy2;
lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j-1][k][1][2]
- tmp1 * njac[i][j-1][k][1][2];
lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j-1][k][1][3]
- tmp1 * njac[i][j-1][k][1][3];
lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j-1][k][1][4]
- tmp1 * njac[i][j-1][k][1][4];
lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j-1][k][2][0]
- tmp1 * njac[i][j-1][k][2][0];
lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j-1][k][2][1]
- tmp1 * njac[i][j-1][k][2][1];
lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j-1][k][2][2]
- tmp1 * njac[i][j-1][k][2][2]
- tmp1 * dy3;
lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j-1][k][2][3]
- tmp1 * njac[i][j-1][k][2][3];
lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j-1][k][2][4]
- tmp1 * njac[i][j-1][k][2][4];
lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j-1][k][3][0]
- tmp1 * njac[i][j-1][k][3][0];
lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j-1][k][3][1]
- tmp1 * njac[i][j-1][k][3][1];
lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j-1][k][3][2]
- tmp1 * njac[i][j-1][k][3][2];
lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j-1][k][3][3]
- tmp1 * njac[i][j-1][k][3][3]
- tmp1 * dy4;
lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j-1][k][3][4]
- tmp1 * njac[i][j-1][k][3][4];
lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j-1][k][4][0]
- tmp1 * njac[i][j-1][k][4][0];
lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j-1][k][4][1]
- tmp1 * njac[i][j-1][k][4][1];
lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j-1][k][4][2]
- tmp1 * njac[i][j-1][k][4][2];
lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j-1][k][4][3]
- tmp1 * njac[i][j-1][k][4][3];
lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j-1][k][4][4]
- tmp1 * njac[i][j-1][k][4][4]
- tmp1 * dy5;
lhs[i][j][k][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][0][0]
+ tmp1 * 2.0 * dy1;
lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];
lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];
lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];
lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];
lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];
lhs[i][j][k][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][1][1]
+ tmp1 * 2.0 * dy2;
lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];
lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];
lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];
lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];
lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];
lhs[i][j][k][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][2][2]
+ tmp1 * 2.0 * dy3;
lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];
lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];
lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];
lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];
lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];
lhs[i][j][k][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][3][3]
+ tmp1 * 2.0 * dy4;
lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];
lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];
lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];
lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];
lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];
lhs[i][j][k][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][4][4]
+ tmp1 * 2.0 * dy5;
lhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j+1][k][0][0]
- tmp1 * njac[i][j+1][k][0][0]
- tmp1 * dy1;
lhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j+1][k][0][1]
- tmp1 * njac[i][j+1][k][0][1];
lhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j+1][k][0][2]
- tmp1 * njac[i][j+1][k][0][2];
lhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j+1][k][0][3]
- tmp1 * njac[i][j+1][k][0][3];
lhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j+1][k][0][4]
- tmp1 * njac[i][j+1][k][0][4];
lhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j+1][k][1][0]
- tmp1 * njac[i][j+1][k][1][0];
lhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j+1][k][1][1]
- tmp1 * njac[i][j+1][k][1][1]
- tmp1 * dy2;
lhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j+1][k][1][2]
- tmp1 * njac[i][j+1][k][1][2];
lhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j+1][k][1][3]
- tmp1 * njac[i][j+1][k][1][3];
lhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j+1][k][1][4]
- tmp1 * njac[i][j+1][k][1][4];
lhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j+1][k][2][0]
- tmp1 * njac[i][j+1][k][2][0];
lhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j+1][k][2][1]
- tmp1 * njac[i][j+1][k][2][1];
lhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j+1][k][2][2]
- tmp1 * njac[i][j+1][k][2][2]
- tmp1 * dy3;
lhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j+1][k][2][3]
- tmp1 * njac[i][j+1][k][2][3];
lhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j+1][k][2][4]
- tmp1 * njac[i][j+1][k][2][4];
lhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j+1][k][3][0]
- tmp1 * njac[i][j+1][k][3][0];
lhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j+1][k][3][1]
- tmp1 * njac[i][j+1][k][3][1];
lhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j+1][k][3][2]
- tmp1 * njac[i][j+1][k][3][2];
lhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j+1][k][3][3]
- tmp1 * njac[i][j+1][k][3][3]
- tmp1 * dy4;
lhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j+1][k][3][4]
- tmp1 * njac[i][j+1][k][3][4];
lhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j+1][k][4][0]
- tmp1 * njac[i][j+1][k][4][0];
lhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j+1][k][4][1]
- tmp1 * njac[i][j+1][k][4][1];
lhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j+1][k][4][2]
- tmp1 * njac[i][j+1][k][4][2];
lhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j+1][k][4][3]
- tmp1 * njac[i][j+1][k][4][3];
lhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j+1][k][4][4]
- tmp1 * njac[i][j+1][k][4][4]
- tmp1 * dy5;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsz(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side for the three z-factors
c-------------------------------------------------------------------*/
int i, j, k;
/*--------------------------------------------------------------------
c Compute the indices for storing the block-diagonal matrix;
c determine c (labeled f) and s jacobians
c---------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 0; k < grid_points[2]; k++) {
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[i][j][k][0][0] = 0.0;
fjac[i][j][k][0][1] = 0.0;
fjac[i][j][k][0][2] = 0.0;
fjac[i][j][k][0][3] = 1.0;
fjac[i][j][k][0][4] = 0.0;
fjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][1][1] = u[i][j][k][3] * tmp1;
fjac[i][j][k][1][2] = 0.0;
fjac[i][j][k][1][3] = u[i][j][k][1] * tmp1;
fjac[i][j][k][1][4] = 0.0;
fjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][2][1] = 0.0;
fjac[i][j][k][2][2] = u[i][j][k][3] * tmp1;
fjac[i][j][k][2][3] = u[i][j][k][2] * tmp1;
fjac[i][j][k][2][4] = 0.0;
fjac[i][j][k][3][0] = - (u[i][j][k][3]*u[i][j][k][3] * tmp2 )
+ 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] ) * tmp2 );
fjac[i][j][k][3][1] = - c2 * u[i][j][k][1] * tmp1;
fjac[i][j][k][3][2] = - c2 * u[i][j][k][2] * tmp1;
fjac[i][j][k][3][3] = ( 2.0 - c2 )
* u[i][j][k][3] * tmp1;
fjac[i][j][k][3][4] = c2;
fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
* tmp2
- c1 * ( u[i][j][k][4] * tmp1 ) )
* ( u[i][j][k][3] * tmp1 );
fjac[i][j][k][4][1] = - c2 * ( u[i][j][k][1]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][4][3] = c1 * ( u[i][j][k][4] * tmp1 )
- 0.50 * c2
* ( ( u[i][j][k][1]*u[i][j][k][1]
+ u[i][j][k][2]*u[i][j][k][2]
+ 3.0*u[i][j][k][3]*u[i][j][k][3] )
* tmp2 );
fjac[i][j][k][4][4] = c1 * u[i][j][k][3] * tmp1;
njac[i][j][k][0][0] = 0.0;
njac[i][j][k][0][1] = 0.0;
njac[i][j][k][0][2] = 0.0;
njac[i][j][k][0][3] = 0.0;
njac[i][j][k][0][4] = 0.0;
njac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1];
njac[i][j][k][1][1] = c3c4 * tmp1;
njac[i][j][k][1][2] = 0.0;
njac[i][j][k][1][3] = 0.0;
njac[i][j][k][1][4] = 0.0;
njac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2];
njac[i][j][k][2][1] = 0.0;
njac[i][j][k][2][2] = c3c4 * tmp1;
njac[i][j][k][2][3] = 0.0;
njac[i][j][k][2][4] = 0.0;
njac[i][j][k][3][0] = - con43 * c3c4 * tmp2 * u[i][j][k][3];
njac[i][j][k][3][1] = 0.0;
njac[i][j][k][3][2] = 0.0;
njac[i][j][k][3][3] = con43 * c3 * c4 * tmp1;
njac[i][j][k][3][4] = 0.0;
njac[i][j][k][4][0] = - ( c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][1]))
- ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2]))
- ( con43 * c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][3]))
- c1345 * tmp2 * u[i][j][k][4];
njac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1];
njac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2];
njac[i][j][k][4][3] = ( con43 * c3c4
- c1345 ) * tmp2 * u[i][j][k][3];
njac[i][j][k][4][4] = ( c1345 )* tmp1;
}
}
}
/*--------------------------------------------------------------------
c now jacobians set, so form left hand side in z direction
c-------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
tmp1 = dt * tz1;
tmp2 = dt * tz2;
lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j][k-1][0][0]
- tmp1 * njac[i][j][k-1][0][0]
- tmp1 * dz1;
lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j][k-1][0][1]
- tmp1 * njac[i][j][k-1][0][1];
lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j][k-1][0][2]
- tmp1 * njac[i][j][k-1][0][2];
lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j][k-1][0][3]
- tmp1 * njac[i][j][k-1][0][3];
lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j][k-1][0][4]
- tmp1 * njac[i][j][k-1][0][4];
lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j][k-1][1][0]
- tmp1 * njac[i][j][k-1][1][0];
lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j][k-1][1][1]
- tmp1 * njac[i][j][k-1][1][1]
- tmp1 * dz2;
lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j][k-1][1][2]
- tmp1 * njac[i][j][k-1][1][2];
lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j][k-1][1][3]
- tmp1 * njac[i][j][k-1][1][3];
lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j][k-1][1][4]
- tmp1 * njac[i][j][k-1][1][4];
lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j][k-1][2][0]
- tmp1 * njac[i][j][k-1][2][0];
lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j][k-1][2][1]
- tmp1 * njac[i][j][k-1][2][1];
lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j][k-1][2][2]
- tmp1 * njac[i][j][k-1][2][2]
- tmp1 * dz3;
lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j][k-1][2][3]
- tmp1 * njac[i][j][k-1][2][3];
lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j][k-1][2][4]
- tmp1 * njac[i][j][k-1][2][4];
lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j][k-1][3][0]
- tmp1 * njac[i][j][k-1][3][0];
lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j][k-1][3][1]
- tmp1 * njac[i][j][k-1][3][1];
lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j][k-1][3][2]
- tmp1 * njac[i][j][k-1][3][2];
lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j][k-1][3][3]
- tmp1 * njac[i][j][k-1][3][3]
- tmp1 * dz4;
lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j][k-1][3][4]
- tmp1 * njac[i][j][k-1][3][4];
lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j][k-1][4][0]
- tmp1 * njac[i][j][k-1][4][0];
lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j][k-1][4][1]
- tmp1 * njac[i][j][k-1][4][1];
lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j][k-1][4][2]
- tmp1 * njac[i][j][k-1][4][2];
lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j][k-1][4][3]
- tmp1 * njac[i][j][k-1][4][3];
lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j][k-1][4][4]
- tmp1 * njac[i][j][k-1][4][4]
- tmp1 * dz5;
lhs[i][j][k][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][0][0]
+ tmp1 * 2.0 * dz1;
lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];
lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];
lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];
lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];
lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];
lhs[i][j][k][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][1][1]
+ tmp1 * 2.0 * dz2;
lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];
lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];
lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];
lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];
lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];
lhs[i][j][k][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][2][2]
+ tmp1 * 2.0 * dz3;
lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];
lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];
lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];
lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];
lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];
lhs[i][j][k][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][3][3]
+ tmp1 * 2.0 * dz4;
lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];
lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];
lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];
lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];
lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];
lhs[i][j][k][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][4][4]
+ tmp1 * 2.0 * dz5;
lhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j][k+1][0][0]
- tmp1 * njac[i][j][k+1][0][0]
- tmp1 * dz1;
lhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j][k+1][0][1]
- tmp1 * njac[i][j][k+1][0][1];
lhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j][k+1][0][2]
- tmp1 * njac[i][j][k+1][0][2];
lhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j][k+1][0][3]
- tmp1 * njac[i][j][k+1][0][3];
lhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j][k+1][0][4]
- tmp1 * njac[i][j][k+1][0][4];
lhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j][k+1][1][0]
- tmp1 * njac[i][j][k+1][1][0];
lhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j][k+1][1][1]
- tmp1 * njac[i][j][k+1][1][1]
- tmp1 * dz2;
lhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j][k+1][1][2]
- tmp1 * njac[i][j][k+1][1][2];
lhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j][k+1][1][3]
- tmp1 * njac[i][j][k+1][1][3];
lhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j][k+1][1][4]
- tmp1 * njac[i][j][k+1][1][4];
lhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j][k+1][2][0]
- tmp1 * njac[i][j][k+1][2][0];
lhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j][k+1][2][1]
- tmp1 * njac[i][j][k+1][2][1];
lhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j][k+1][2][2]
- tmp1 * njac[i][j][k+1][2][2]
- tmp1 * dz3;
lhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j][k+1][2][3]
- tmp1 * njac[i][j][k+1][2][3];
lhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j][k+1][2][4]
- tmp1 * njac[i][j][k+1][2][4];
lhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j][k+1][3][0]
- tmp1 * njac[i][j][k+1][3][0];
lhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j][k+1][3][1]
- tmp1 * njac[i][j][k+1][3][1];
lhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j][k+1][3][2]
- tmp1 * njac[i][j][k+1][3][2];
lhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j][k+1][3][3]
- tmp1 * njac[i][j][k+1][3][3]
- tmp1 * dz4;
lhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j][k+1][3][4]
- tmp1 * njac[i][j][k+1][3][4];
lhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j][k+1][4][0]
- tmp1 * njac[i][j][k+1][4][0];
lhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j][k+1][4][1]
- tmp1 * njac[i][j][k+1][4][1];
lhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j][k+1][4][2]
- tmp1 * njac[i][j][k+1][4][2];
lhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j][k+1][4][3]
- tmp1 * njac[i][j][k+1][4][3];
lhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j][k+1][4][4]
- tmp1 * njac[i][j][k+1][4][4]
- tmp1 * dz5;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void compute_rhs(void) {
int i, j, k, m;
double rho_inv, uijk, up1, um1, vijk, vp1, vm1, wijk, wp1, wm1;
/*--------------------------------------------------------------------
c compute the reciprocal of density, and the kinetic energy,
c and the speed of sound.
c-------------------------------------------------------------------*/
#pragma omp for private(j,k) nowait
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
rho_inv = 1.0/u[i][j][k][0];
rho_i[i][j][k] = rho_inv;
us[i][j][k] = u[i][j][k][1] * rho_inv;
vs[i][j][k] = u[i][j][k][2] * rho_inv;
ws[i][j][k] = u[i][j][k][3] * rho_inv;
square[i][j][k] = 0.5 * (u[i][j][k][1]*u[i][j][k][1] +
u[i][j][k][2]*u[i][j][k][2] +
u[i][j][k][3]*u[i][j][k][3] ) * rho_inv;
qs[i][j][k] = square[i][j][k] * rho_inv;
}
}
}
/*--------------------------------------------------------------------
c copy the exact forcing term to the right hand side; because
c this forcing term is known, we can store it on the whole grid
c including the boundary
c-------------------------------------------------------------------*/
#pragma omp for private(j,k,m)
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = forcing[i][j][k][m];
}
}
}
}
/*--------------------------------------------------------------------
c compute xi-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
uijk = us[i][j][k];
up1 = us[i+1][j][k];
um1 = us[i-1][j][k];
rhs[i][j][k][0] = rhs[i][j][k][0] + dx1tx1 *
(u[i+1][j][k][0] - 2.0*u[i][j][k][0] +
u[i-1][j][k][0]) -
tx2 * (u[i+1][j][k][1] - u[i-1][j][k][1]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dx2tx1 *
(u[i+1][j][k][1] - 2.0*u[i][j][k][1] +
u[i-1][j][k][1]) +
xxcon2*con43 * (up1 - 2.0*uijk + um1) -
tx2 * (u[i+1][j][k][1]*up1 -
u[i-1][j][k][1]*um1 +
(u[i+1][j][k][4]- square[i+1][j][k]-
u[i-1][j][k][4]+ square[i-1][j][k])*
c2);
rhs[i][j][k][2] = rhs[i][j][k][2] + dx3tx1 *
(u[i+1][j][k][2] - 2.0*u[i][j][k][2] +
u[i-1][j][k][2]) +
xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] +
vs[i-1][j][k]) -
tx2 * (u[i+1][j][k][2]*up1 -
u[i-1][j][k][2]*um1);
rhs[i][j][k][3] = rhs[i][j][k][3] + dx4tx1 *
(u[i+1][j][k][3] - 2.0*u[i][j][k][3] +
u[i-1][j][k][3]) +
xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] +
ws[i-1][j][k]) -
tx2 * (u[i+1][j][k][3]*up1 -
u[i-1][j][k][3]*um1);
rhs[i][j][k][4] = rhs[i][j][k][4] + dx5tx1 *
(u[i+1][j][k][4] - 2.0*u[i][j][k][4] +
u[i-1][j][k][4]) +
xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] +
qs[i-1][j][k]) +
xxcon4 * (up1*up1 - 2.0*uijk*uijk +
um1*um1) +
xxcon5 * (u[i+1][j][k][4]*rho_i[i+1][j][k] -
2.0*u[i][j][k][4]*rho_i[i][j][k] +
u[i-1][j][k][4]*rho_i[i-1][j][k]) -
tx2 * ( (c1*u[i+1][j][k][4] -
c2*square[i+1][j][k])*up1 -
(c1*u[i-1][j][k][4] -
c2*square[i-1][j][k])*um1 );
}
}
}
/*--------------------------------------------------------------------
c add fourth order xi-direction dissipation
c-------------------------------------------------------------------*/
i = 1;
#pragma omp for private(k,m) nowait
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m]- dssp *
( 5.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] +
u[i+2][j][k][m]);
}
}
}
i = 2;
#pragma omp for private(k,m) nowait
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
(-4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] -
4.0*u[i+1][j][k][m] + u[i+2][j][k][m]);
}
}
}
#pragma omp for private(j,k,m) nowait
for (i = 3; i < grid_points[0]-3; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] +
6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] +
u[i+2][j][k][m] );
}
}
}
}
i = grid_points[0]-3;
#pragma omp for private(k,m) nowait
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] +
6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] );
}
}
}
i = grid_points[0]-2;
#pragma omp for private(k,m)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i-2][j][k][m] - 4.*u[i-1][j][k][m] +
5.0*u[i][j][k][m] );
}
}
}
/*--------------------------------------------------------------------
c compute eta-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
vijk = vs[i][j][k];
vp1 = vs[i][j+1][k];
vm1 = vs[i][j-1][k];
rhs[i][j][k][0] = rhs[i][j][k][0] + dy1ty1 *
(u[i][j+1][k][0] - 2.0*u[i][j][k][0] +
u[i][j-1][k][0]) -
ty2 * (u[i][j+1][k][2] - u[i][j-1][k][2]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dy2ty1 *
(u[i][j+1][k][1] - 2.0*u[i][j][k][1] +
u[i][j-1][k][1]) +
yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] +
us[i][j-1][k]) -
ty2 * (u[i][j+1][k][1]*vp1 -
u[i][j-1][k][1]*vm1);
rhs[i][j][k][2] = rhs[i][j][k][2] + dy3ty1 *
(u[i][j+1][k][2] - 2.0*u[i][j][k][2] +
u[i][j-1][k][2]) +
yycon2*con43 * (vp1 - 2.0*vijk + vm1) -
ty2 * (u[i][j+1][k][2]*vp1 -
u[i][j-1][k][2]*vm1 +
(u[i][j+1][k][4] - square[i][j+1][k] -
u[i][j-1][k][4] + square[i][j-1][k])
*c2);
rhs[i][j][k][3] = rhs[i][j][k][3] + dy4ty1 *
(u[i][j+1][k][3] - 2.0*u[i][j][k][3] +
u[i][j-1][k][3]) +
yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] +
ws[i][j-1][k]) -
ty2 * (u[i][j+1][k][3]*vp1 -
u[i][j-1][k][3]*vm1);
rhs[i][j][k][4] = rhs[i][j][k][4] + dy5ty1 *
(u[i][j+1][k][4] - 2.0*u[i][j][k][4] +
u[i][j-1][k][4]) +
yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] +
qs[i][j-1][k]) +
yycon4 * (vp1*vp1 - 2.0*vijk*vijk +
vm1*vm1) +
yycon5 * (u[i][j+1][k][4]*rho_i[i][j+1][k] -
2.0*u[i][j][k][4]*rho_i[i][j][k] +
u[i][j-1][k][4]*rho_i[i][j-1][k]) -
ty2 * ((c1*u[i][j+1][k][4] -
c2*square[i][j+1][k]) * vp1 -
(c1*u[i][j-1][k][4] -
c2*square[i][j-1][k]) * vm1);
}
}
}
/*--------------------------------------------------------------------
c add fourth order eta-direction dissipation
c-------------------------------------------------------------------*/
j = 1;
#pragma omp for private(k,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m]- dssp *
( 5.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] +
u[i][j+2][k][m]);
}
}
}
j = 2;
#pragma omp for private(k,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
(-4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] -
4.0*u[i][j+1][k][m] + u[i][j+2][k][m]);
}
}
}
#pragma omp for private(j,k,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 3; j < grid_points[1]-3; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] +
6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] +
u[i][j+2][k][m] );
}
}
}
}
j = grid_points[1]-3;
#pragma omp for private(k,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] +
6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] );
}
}
}
j = grid_points[1]-2;
#pragma omp for private(k,m)
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j-2][k][m] - 4.*u[i][j-1][k][m] +
5.*u[i][j][k][m] );
}
}
}
/*--------------------------------------------------------------------
c compute zeta-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for private(j,k)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
wijk = ws[i][j][k];
wp1 = ws[i][j][k+1];
wm1 = ws[i][j][k-1];
rhs[i][j][k][0] = rhs[i][j][k][0] + dz1tz1 *
(u[i][j][k+1][0] - 2.0*u[i][j][k][0] +
u[i][j][k-1][0]) -
tz2 * (u[i][j][k+1][3] - u[i][j][k-1][3]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dz2tz1 *
(u[i][j][k+1][1] - 2.0*u[i][j][k][1] +
u[i][j][k-1][1]) +
zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] +
us[i][j][k-1]) -
tz2 * (u[i][j][k+1][1]*wp1 -
u[i][j][k-1][1]*wm1);
rhs[i][j][k][2] = rhs[i][j][k][2] + dz3tz1 *
(u[i][j][k+1][2] - 2.0*u[i][j][k][2] +
u[i][j][k-1][2]) +
zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] +
vs[i][j][k-1]) -
tz2 * (u[i][j][k+1][2]*wp1 -
u[i][j][k-1][2]*wm1);
rhs[i][j][k][3] = rhs[i][j][k][3] + dz4tz1 *
(u[i][j][k+1][3] - 2.0*u[i][j][k][3] +
u[i][j][k-1][3]) +
zzcon2*con43 * (wp1 - 2.0*wijk + wm1) -
tz2 * (u[i][j][k+1][3]*wp1 -
u[i][j][k-1][3]*wm1 +
(u[i][j][k+1][4] - square[i][j][k+1] -
u[i][j][k-1][4] + square[i][j][k-1])
*c2);
rhs[i][j][k][4] = rhs[i][j][k][4] + dz5tz1 *
(u[i][j][k+1][4] - 2.0*u[i][j][k][4] +
u[i][j][k-1][4]) +
zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] +
qs[i][j][k-1]) +
zzcon4 * (wp1*wp1 - 2.0*wijk*wijk +
wm1*wm1) +
zzcon5 * (u[i][j][k+1][4]*rho_i[i][j][k+1] -
2.0*u[i][j][k][4]*rho_i[i][j][k] +
u[i][j][k-1][4]*rho_i[i][j][k-1]) -
tz2 * ( (c1*u[i][j][k+1][4] -
c2*square[i][j][k+1])*wp1 -
(c1*u[i][j][k-1][4] -
c2*square[i][j][k-1])*wm1);
}
}
}
/*--------------------------------------------------------------------
c add fourth order zeta-direction dissipation
c-------------------------------------------------------------------*/
k = 1;
#pragma omp for private(j,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m]- dssp *
( 5.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] +
u[i][j][k+2][m]);
}
}
}
k = 2;
#pragma omp for private(j,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
(-4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] -
4.0*u[i][j][k+1][m] + u[i][j][k+2][m]);
}
}
}
#pragma omp for private(j,k,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 3; k < grid_points[2]-3; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] +
6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] +
u[i][j][k+2][m] );
}
}
}
}
k = grid_points[2]-3;
#pragma omp for private(j,m) nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] +
6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] );
}
}
}
k = grid_points[2]-2;
#pragma omp for private(j,m)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] +
5.0*u[i][j][k][m] );
}
}
}
#pragma omp for private(k,m,i)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
for (i = 1; i < grid_points[0]-1; i++) {
rhs[i][j][k][m] = rhs[i][j][k][m] * dt;
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void set_constants(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
ce[0][0] = 2.0;
ce[0][1] = 0.0;
ce[0][2] = 0.0;
ce[0][3] = 4.0;
ce[0][4] = 5.0;
ce[0][5] = 3.0;
ce[0][6] = 0.5;
ce[0][7] = 0.02;
ce[0][8] = 0.01;
ce[0][9] = 0.03;
ce[0][10] = 0.5;
ce[0][11] = 0.4;
ce[0][12] = 0.3;
ce[1][0] = 1.0;
ce[1][1] = 0.0;
ce[1][2] = 0.0;
ce[1][3] = 0.0;
ce[1][4] = 1.0;
ce[1][5] = 2.0;
ce[1][6] = 3.0;
ce[1][7] = 0.01;
ce[1][8] = 0.03;
ce[1][9] = 0.02;
ce[1][10] = 0.4;
ce[1][11] = 0.3;
ce[1][12] = 0.5;
ce[2][0] = 2.0;
ce[2][1] = 2.0;
ce[2][2] = 0.0;
ce[2][3] = 0.0;
ce[2][4] = 0.0;
ce[2][5] = 2.0;
ce[2][6] = 3.0;
ce[2][7] = 0.04;
ce[2][8] = 0.03;
ce[2][9] = 0.05;
ce[2][10] = 0.3;
ce[2][11] = 0.5;
ce[2][12] = 0.4;
ce[3][0] = 2.0;
ce[3][1] = 2.0;
ce[3][2] = 0.0;
ce[3][3] = 0.0;
ce[3][4] = 0.0;
ce[3][5] = 2.0;
ce[3][6] = 3.0;
ce[3][7] = 0.03;
ce[3][8] = 0.05;
ce[3][9] = 0.04;
ce[3][10] = 0.2;
ce[3][11] = 0.1;
ce[3][12] = 0.3;
ce[4][0] = 5.0;
ce[4][1] = 4.0;
ce[4][2] = 3.0;
ce[4][3] = 2.0;
ce[4][4] = 0.1;
ce[4][5] = 0.4;
ce[4][6] = 0.3;
ce[4][7] = 0.05;
ce[4][8] = 0.04;
ce[4][9] = 0.03;
ce[4][10] = 0.1;
ce[4][11] = 0.3;
ce[4][12] = 0.2;
c1 = 1.4;
c2 = 0.4;
c3 = 0.1;
c4 = 1.0;
c5 = 1.4;
dnxm1 = 1.0 / (double)(grid_points[0]-1);
dnym1 = 1.0 / (double)(grid_points[1]-1);
dnzm1 = 1.0 / (double)(grid_points[2]-1);
c1c2 = c1 * c2;
c1c5 = c1 * c5;
c3c4 = c3 * c4;
c1345 = c1c5 * c3c4;
conz1 = (1.0-c1c5);
tx1 = 1.0 / (dnxm1 * dnxm1);
tx2 = 1.0 / (2.0 * dnxm1);
tx3 = 1.0 / dnxm1;
ty1 = 1.0 / (dnym1 * dnym1);
ty2 = 1.0 / (2.0 * dnym1);
ty3 = 1.0 / dnym1;
tz1 = 1.0 / (dnzm1 * dnzm1);
tz2 = 1.0 / (2.0 * dnzm1);
tz3 = 1.0 / dnzm1;
dx1 = 0.75;
dx2 = 0.75;
dx3 = 0.75;
dx4 = 0.75;
dx5 = 0.75;
dy1 = 0.75;
dy2 = 0.75;
dy3 = 0.75;
dy4 = 0.75;
dy5 = 0.75;
dz1 = 1.0;
dz2 = 1.0;
dz3 = 1.0;
dz4 = 1.0;
dz5 = 1.0;
dxmax = max(dx3, dx4);
dymax = max(dy2, dy4);
dzmax = max(dz2, dz3);
dssp = 0.25 * max(dx1, max(dy1, dz1) );
c4dssp = 4.0 * dssp;
c5dssp = 5.0 * dssp;
dttx1 = dt*tx1;
dttx2 = dt*tx2;
dtty1 = dt*ty1;
dtty2 = dt*ty2;
dttz1 = dt*tz1;
dttz2 = dt*tz2;
c2dttx1 = 2.0*dttx1;
c2dtty1 = 2.0*dtty1;
c2dttz1 = 2.0*dttz1;
dtdssp = dt*dssp;
comz1 = dtdssp;
comz4 = 4.0*dtdssp;
comz5 = 5.0*dtdssp;
comz6 = 6.0*dtdssp;
c3c4tx3 = c3c4*tx3;
c3c4ty3 = c3c4*ty3;
c3c4tz3 = c3c4*tz3;
dx1tx1 = dx1*tx1;
dx2tx1 = dx2*tx1;
dx3tx1 = dx3*tx1;
dx4tx1 = dx4*tx1;
dx5tx1 = dx5*tx1;
dy1ty1 = dy1*ty1;
dy2ty1 = dy2*ty1;
dy3ty1 = dy3*ty1;
dy4ty1 = dy4*ty1;
dy5ty1 = dy5*ty1;
dz1tz1 = dz1*tz1;
dz2tz1 = dz2*tz1;
dz3tz1 = dz3*tz1;
dz4tz1 = dz4*tz1;
dz5tz1 = dz5*tz1;
c2iv = 2.5;
con43 = 4.0/3.0;
con16 = 1.0/6.0;
xxcon1 = c3c4tx3*con43*tx3;
xxcon2 = c3c4tx3*tx3;
xxcon3 = c3c4tx3*conz1*tx3;
xxcon4 = c3c4tx3*con16*tx3;
xxcon5 = c3c4tx3*c1c5*tx3;
yycon1 = c3c4ty3*con43*ty3;
yycon2 = c3c4ty3*ty3;
yycon3 = c3c4ty3*conz1*ty3;
yycon4 = c3c4ty3*con16*ty3;
yycon5 = c3c4ty3*c1c5*ty3;
zzcon1 = c3c4tz3*con43*tz3;
zzcon2 = c3c4tz3*tz3;
zzcon3 = c3c4tz3*conz1*tz3;
zzcon4 = c3c4tz3*con16*tz3;
zzcon5 = c3c4tz3*c1c5*tz3;
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void verify(int no_time_steps, char *cclass, boolean *verified) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c verification routine
c-------------------------------------------------------------------*/
double xcrref[5],xceref[5],xcrdif[5],xcedif[5],
epsilon, xce[5], xcr[5], dtref;
int m;
/*--------------------------------------------------------------------
c tolerance level
c-------------------------------------------------------------------*/
epsilon = 1.0e-08;
/*--------------------------------------------------------------------
c compute the error norm and the residual norm, and exit if not printing
c-------------------------------------------------------------------*/
error_norm(xce);
compute_rhs();
rhs_norm(xcr);
for (m = 0; m < 5; m++) {
xcr[m] = xcr[m] / dt;
}
*cclass = 'U';
*verified = TRUE;
for (m = 0; m < 5; m++) {
xcrref[m] = 1.0;
xceref[m] = 1.0;
}
/*--------------------------------------------------------------------
c reference data for 12X12X12 grids after 100 time steps, with DT = 1.0d-02
c-------------------------------------------------------------------*/
if (grid_points[0] == 12 &&
grid_points[1] == 12 &&
grid_points[2] == 12 &&
no_time_steps == 60) {
*cclass = 'S';
dtref = 1.0e-2;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 1.7034283709541311e-01;
xcrref[1] = 1.2975252070034097e-02;
xcrref[2] = 3.2527926989486055e-02;
xcrref[3] = 2.6436421275166801e-02;
xcrref[4] = 1.9211784131744430e-01;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 4.9976913345811579e-04;
xceref[1] = 4.5195666782961927e-05;
xceref[2] = 7.3973765172921357e-05;
xceref[3] = 7.3821238632439731e-05;
xceref[4] = 8.9269630987491446e-04;
/*--------------------------------------------------------------------
c reference data for 24X24X24 grids after 200 time steps, with DT = 0.8d-3
c-------------------------------------------------------------------*/
} else if (grid_points[0] == 24 &&
grid_points[1] == 24 &&
grid_points[2] == 24 &&
no_time_steps == 200) {
*cclass = 'W';
dtref = 0.8e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 0.1125590409344e+03;
xcrref[1] = 0.1180007595731e+02;
xcrref[2] = 0.2710329767846e+02;
xcrref[3] = 0.2469174937669e+02;
xcrref[4] = 0.2638427874317e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 0.4419655736008e+01;
xceref[1] = 0.4638531260002e+00;
xceref[2] = 0.1011551749967e+01;
xceref[3] = 0.9235878729944e+00;
xceref[4] = 0.1018045837718e+02;
/*--------------------------------------------------------------------
c reference data for 64X64X64 grids after 200 time steps, with DT = 0.8d-3
c-------------------------------------------------------------------*/
} else if (grid_points[0] == 64 &&
grid_points[1] == 64 &&
grid_points[2] == 64 &&
no_time_steps == 200) {
*cclass = 'A';
dtref = 0.8e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 1.0806346714637264e+02;
xcrref[1] = 1.1319730901220813e+01;
xcrref[2] = 2.5974354511582465e+01;
xcrref[3] = 2.3665622544678910e+01;
xcrref[4] = 2.5278963211748344e+02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 4.2348416040525025e+00;
xceref[1] = 4.4390282496995698e-01;
xceref[2] = 9.6692480136345650e-01;
xceref[3] = 8.8302063039765474e-01;
xceref[4] = 9.7379901770829278e+00;
/*--------------------------------------------------------------------
c reference data for 102X102X102 grids after 200 time steps,
c with DT = 3.0d-04
c-------------------------------------------------------------------*/
} else if (grid_points[0] == 102 &&
grid_points[1] == 102 &&
grid_points[2] == 102 &&
no_time_steps == 200) {
*cclass = 'B';
dtref = 3.0e-4;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 1.4233597229287254e+03;
xcrref[1] = 9.9330522590150238e+01;
xcrref[2] = 3.5646025644535285e+02;
xcrref[3] = 3.2485447959084092e+02;
xcrref[4] = 3.2707541254659363e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 5.2969847140936856e+01;
xceref[1] = 4.4632896115670668e+00;
xceref[2] = 1.3122573342210174e+01;
xceref[3] = 1.2006925323559144e+01;
xceref[4] = 1.2459576151035986e+02;
/*--------------------------------------------------------------------
c reference data for 162X162X162 grids after 200 time steps,
c with DT = 1.0d-04
c-------------------------------------------------------------------*/
} else if (grid_points[0] == 162 &&
grid_points[1] == 162 &&
grid_points[2] == 162 &&
no_time_steps == 200) {
*cclass = 'C';
dtref = 1.0e-4;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 0.62398116551764615e+04;
xcrref[1] = 0.50793239190423964e+03;
xcrref[2] = 0.15423530093013596e+04;
xcrref[3] = 0.13302387929291190e+04;
xcrref[4] = 0.11604087428436455e+05;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 0.16462008369091265e+03;
xceref[1] = 0.11497107903824313e+02;
xceref[2] = 0.41207446207461508e+02;
xceref[3] = 0.37087651059694167e+02;
xceref[4] = 0.36211053051841265e+03;
} else {
*verified = FALSE;
}
/*--------------------------------------------------------------------
c verification test for residuals if gridsize is either 12X12X12 or
c 64X64X64 or 102X102X102 or 162X162X162
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Compute the difference of solution values and the known reference values.
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]);
xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]);
}
/*--------------------------------------------------------------------
c Output the comparison of computed results to known cases.
c-------------------------------------------------------------------*/
if (*cclass != 'U') {
printf(" Verification being performed for class %1c\n", *cclass);
printf(" accuracy setting for epsilon = %20.13e\n", epsilon);
if (fabs(dt-dtref) > epsilon) {
*verified = FALSE;
*cclass = 'U';
printf(" DT does not match the reference value of %15.8e\n", dtref);
}
} else {
printf(" Unknown class\n");
}
if (*cclass != 'U') {
printf(" Comparison of RMS-norms of residual\n");
} else {
printf(" RMS-norms of residual\n");
}
for (m = 0; m < 5; m++) {
if (*cclass == 'U') {
printf(" %2d%20.13e\n", m, xcr[m]);
} else if (xcrdif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n",
m, xcr[m], xcrref[m], xcrdif[m]);
} else {
printf(" %2d%20.13e%20.13e%20.13e\n",
m, xcr[m], xcrref[m], xcrdif[m]);
}
}
if (*cclass != 'U') {
printf(" Comparison of RMS-norms of solution error\n");
} else {
printf(" RMS-norms of solution error\n");
}
for (m = 0; m < 5; m++) {
if (*cclass == 'U') {
printf(" %2d%20.13e\n", m, xce[m]);
} else if (xcedif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n",
m, xce[m], xceref[m], xcedif[m]);
} else {
printf(" %2d%20.13e%20.13e%20.13e\n",
m, xce[m], xceref[m], xcedif[m]);
}
}
if (*cclass == 'U') {
printf(" No reference values provided\n");
printf(" No verification performed\n");
} else if (*verified == TRUE) {
printf(" Verification Successful\n");
} else {
printf(" Verification failed\n");
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void x_solve(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c
c Performs line solves in X direction by first factoring
c the block-tridiagonal matrix into an upper triangular matrix,
c and then performing back substitution to solve for the unknow
c vectors of each line.
c
c Make sure we treat elements zero to cell_size in the direction
c of the sweep.
c
c-------------------------------------------------------------------*/
lhsx();
x_solve_cell();
x_backsubstitute();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void x_backsubstitute(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c back solve: if last cell, then generate U(isize)=rhs[isize)
c else assume U(isize) is loaded in un pack backsub_info
c so just use it
c after call u(istart) will be sent to next cell
c-------------------------------------------------------------------*/
int i, j, k, m, n;
for (i = grid_points[0]-2; i >= 0; i--) {
#pragma omp for private(k,m,n)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < BLOCK_SIZE; m++) {
for (n = 0; n < BLOCK_SIZE; n++) {
rhs[i][j][k][m] = rhs[i][j][k][m]
- lhs[i][j][k][CC][m][n]*rhs[i+1][j][k][n];
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void x_solve_cell(void) {
/*--------------------------------------------------------------------
c performs guaussian elimination on this cell.
c
c assumes that unpacking routines for non-first cells
c preload C' and rhs' from previous cell.
c
c assumed send happens outside this routine, but that
c c'(IMAX) and rhs'(IMAX) will be sent to next cell
c-------------------------------------------------------------------*/
int i,j,k,isize;
isize = grid_points[0]-1;
/*--------------------------------------------------------------------
c outer most do loops - sweeping in i direction
c-------------------------------------------------------------------*/
#pragma omp for private(k)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c multiply c(0,j,k) by b_inverse and copy back to c
c multiply rhs(0) by b_inverse(0) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[0][j][k][BB],
lhs[0][j][k][CC],
rhs[0][j][k] );
}
}
/*--------------------------------------------------------------------
c begin inner most do loop
c do all the elements of the cell unless last
c-------------------------------------------------------------------*/
for (i = 1; i < isize; i++) {
#pragma omp for private(k)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c rhs(i) = rhs(i) - A*rhs(i-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][j][k][AA],
rhs[i-1][j][k], rhs[i][j][k]);
/*--------------------------------------------------------------------
c B(i) = B(i) - C(i-1)*A(i)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][j][k][AA],
lhs[i-1][j][k][CC],
lhs[i][j][k][BB]);
/*--------------------------------------------------------------------
c multiply c(i,j,k) by b_inverse and copy back to c
c multiply rhs(1,j,k) by b_inverse(1,j,k) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][j][k][BB],
lhs[i][j][k][CC],
rhs[i][j][k] );
}
}
}
#pragma omp for private(k)
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c rhs(isize) = rhs(isize) - A*rhs(isize-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[isize][j][k][AA],
rhs[isize-1][j][k], rhs[isize][j][k]);
/*--------------------------------------------------------------------
c B(isize) = B(isize) - C(isize-1)*A(isize)
c-------------------------------------------------------------------*/
matmul_sub(lhs[isize][j][k][AA],
lhs[isize-1][j][k][CC],
lhs[isize][j][k][BB]);
/*--------------------------------------------------------------------
c multiply rhs() by b_inverse() and copy to rhs
c-------------------------------------------------------------------*/
binvrhs( lhs[i][j][k][BB],
rhs[i][j][k] );
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c subtracts bvec=bvec - ablock*avec
c-------------------------------------------------------------------*/
int i;
for (i = 0; i < 5; i++) {
/*--------------------------------------------------------------------
c rhs(i,ic,jc,kc,ccell) = rhs(i,ic,jc,kc,ccell)
c $ - lhs[i,1,ablock,ia,ja,ka,acell)*
c-------------------------------------------------------------------*/
bvec[i] = bvec[i] - ablock[i][0]*avec[0]
- ablock[i][1]*avec[1]
- ablock[i][2]*avec[2]
- ablock[i][3]*avec[3]
- ablock[i][4]*avec[4];
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void matmul_sub(double ablock[5][5], double bblock[5][5],
double cblock[5][5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c subtracts a(i,j,k) X b(i,j,k) from c(i,j,k)
c-------------------------------------------------------------------*/
int j;
for (j = 0; j < 5; j++) {
cblock[0][j] = cblock[0][j] - ablock[0][0]*bblock[0][j]
- ablock[0][1]*bblock[1][j]
- ablock[0][2]*bblock[2][j]
- ablock[0][3]*bblock[3][j]
- ablock[0][4]*bblock[4][j];
cblock[1][j] = cblock[1][j] - ablock[1][0]*bblock[0][j]
- ablock[1][1]*bblock[1][j]
- ablock[1][2]*bblock[2][j]
- ablock[1][3]*bblock[3][j]
- ablock[1][4]*bblock[4][j];
cblock[2][j] = cblock[2][j] - ablock[2][0]*bblock[0][j]
- ablock[2][1]*bblock[1][j]
- ablock[2][2]*bblock[2][j]
- ablock[2][3]*bblock[3][j]
- ablock[2][4]*bblock[4][j];
cblock[3][j] = cblock[3][j] - ablock[3][0]*bblock[0][j]
- ablock[3][1]*bblock[1][j]
- ablock[3][2]*bblock[2][j]
- ablock[3][3]*bblock[3][j]
- ablock[3][4]*bblock[4][j];
cblock[4][j] = cblock[4][j] - ablock[4][0]*bblock[0][j]
- ablock[4][1]*bblock[1][j]
- ablock[4][2]*bblock[2][j]
- ablock[4][3]*bblock[3][j]
- ablock[4][4]*bblock[4][j];
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void binvcrhs(double lhs[5][5], double c[5][5], double r[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
double pivot, coeff;
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
pivot = 1.00/lhs[0][0];
lhs[0][1] = lhs[0][1]*pivot;
lhs[0][2] = lhs[0][2]*pivot;
lhs[0][3] = lhs[0][3]*pivot;
lhs[0][4] = lhs[0][4]*pivot;
c[0][0] = c[0][0]*pivot;
c[0][1] = c[0][1]*pivot;
c[0][2] = c[0][2]*pivot;
c[0][3] = c[0][3]*pivot;
c[0][4] = c[0][4]*pivot;
r[0] = r[0] *pivot;
coeff = lhs[1][0];
lhs[1][1]= lhs[1][1] - coeff*lhs[0][1];
lhs[1][2]= lhs[1][2] - coeff*lhs[0][2];
lhs[1][3]= lhs[1][3] - coeff*lhs[0][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[0][4];
c[1][0] = c[1][0] - coeff*c[0][0];
c[1][1] = c[1][1] - coeff*c[0][1];
c[1][2] = c[1][2] - coeff*c[0][2];
c[1][3] = c[1][3] - coeff*c[0][3];
c[1][4] = c[1][4] - coeff*c[0][4];
r[1] = r[1] - coeff*r[0];
coeff = lhs[2][0];
lhs[2][1]= lhs[2][1] - coeff*lhs[0][1];
lhs[2][2]= lhs[2][2] - coeff*lhs[0][2];
lhs[2][3]= lhs[2][3] - coeff*lhs[0][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[0][4];
c[2][0] = c[2][0] - coeff*c[0][0];
c[2][1] = c[2][1] - coeff*c[0][1];
c[2][2] = c[2][2] - coeff*c[0][2];
c[2][3] = c[2][3] - coeff*c[0][3];
c[2][4] = c[2][4] - coeff*c[0][4];
r[2] = r[2] - coeff*r[0];
coeff = lhs[3][0];
lhs[3][1]= lhs[3][1] - coeff*lhs[0][1];
lhs[3][2]= lhs[3][2] - coeff*lhs[0][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[0][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[0][4];
c[3][0] = c[3][0] - coeff*c[0][0];
c[3][1] = c[3][1] - coeff*c[0][1];
c[3][2] = c[3][2] - coeff*c[0][2];
c[3][3] = c[3][3] - coeff*c[0][3];
c[3][4] = c[3][4] - coeff*c[0][4];
r[3] = r[3] - coeff*r[0];
coeff = lhs[4][0];
lhs[4][1]= lhs[4][1] - coeff*lhs[0][1];
lhs[4][2]= lhs[4][2] - coeff*lhs[0][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[0][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[0][4];
c[4][0] = c[4][0] - coeff*c[0][0];
c[4][1] = c[4][1] - coeff*c[0][1];
c[4][2] = c[4][2] - coeff*c[0][2];
c[4][3] = c[4][3] - coeff*c[0][3];
c[4][4] = c[4][4] - coeff*c[0][4];
r[4] = r[4] - coeff*r[0];
pivot = 1.00/lhs[1][1];
lhs[1][2] = lhs[1][2]*pivot;
lhs[1][3] = lhs[1][3]*pivot;
lhs[1][4] = lhs[1][4]*pivot;
c[1][0] = c[1][0]*pivot;
c[1][1] = c[1][1]*pivot;
c[1][2] = c[1][2]*pivot;
c[1][3] = c[1][3]*pivot;
c[1][4] = c[1][4]*pivot;
r[1] = r[1] *pivot;
coeff = lhs[0][1];
lhs[0][2]= lhs[0][2] - coeff*lhs[1][2];
lhs[0][3]= lhs[0][3] - coeff*lhs[1][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[1][4];
c[0][0] = c[0][0] - coeff*c[1][0];
c[0][1] = c[0][1] - coeff*c[1][1];
c[0][2] = c[0][2] - coeff*c[1][2];
c[0][3] = c[0][3] - coeff*c[1][3];
c[0][4] = c[0][4] - coeff*c[1][4];
r[0] = r[0] - coeff*r[1];
coeff = lhs[2][1];
lhs[2][2]= lhs[2][2] - coeff*lhs[1][2];
lhs[2][3]= lhs[2][3] - coeff*lhs[1][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[1][4];
c[2][0] = c[2][0] - coeff*c[1][0];
c[2][1] = c[2][1] - coeff*c[1][1];
c[2][2] = c[2][2] - coeff*c[1][2];
c[2][3] = c[2][3] - coeff*c[1][3];
c[2][4] = c[2][4] - coeff*c[1][4];
r[2] = r[2] - coeff*r[1];
coeff = lhs[3][1];
lhs[3][2]= lhs[3][2] - coeff*lhs[1][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[1][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[1][4];
c[3][0] = c[3][0] - coeff*c[1][0];
c[3][1] = c[3][1] - coeff*c[1][1];
c[3][2] = c[3][2] - coeff*c[1][2];
c[3][3] = c[3][3] - coeff*c[1][3];
c[3][4] = c[3][4] - coeff*c[1][4];
r[3] = r[3] - coeff*r[1];
coeff = lhs[4][1];
lhs[4][2]= lhs[4][2] - coeff*lhs[1][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[1][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[1][4];
c[4][0] = c[4][0] - coeff*c[1][0];
c[4][1] = c[4][1] - coeff*c[1][1];
c[4][2] = c[4][2] - coeff*c[1][2];
c[4][3] = c[4][3] - coeff*c[1][3];
c[4][4] = c[4][4] - coeff*c[1][4];
r[4] = r[4] - coeff*r[1];
pivot = 1.00/lhs[2][2];
lhs[2][3] = lhs[2][3]*pivot;
lhs[2][4] = lhs[2][4]*pivot;
c[2][0] = c[2][0]*pivot;
c[2][1] = c[2][1]*pivot;
c[2][2] = c[2][2]*pivot;
c[2][3] = c[2][3]*pivot;
c[2][4] = c[2][4]*pivot;
r[2] = r[2] *pivot;
coeff = lhs[0][2];
lhs[0][3]= lhs[0][3] - coeff*lhs[2][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[2][4];
c[0][0] = c[0][0] - coeff*c[2][0];
c[0][1] = c[0][1] - coeff*c[2][1];
c[0][2] = c[0][2] - coeff*c[2][2];
c[0][3] = c[0][3] - coeff*c[2][3];
c[0][4] = c[0][4] - coeff*c[2][4];
r[0] = r[0] - coeff*r[2];
coeff = lhs[1][2];
lhs[1][3]= lhs[1][3] - coeff*lhs[2][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[2][4];
c[1][0] = c[1][0] - coeff*c[2][0];
c[1][1] = c[1][1] - coeff*c[2][1];
c[1][2] = c[1][2] - coeff*c[2][2];
c[1][3] = c[1][3] - coeff*c[2][3];
c[1][4] = c[1][4] - coeff*c[2][4];
r[1] = r[1] - coeff*r[2];
coeff = lhs[3][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[2][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[2][4];
c[3][0] = c[3][0] - coeff*c[2][0];
c[3][1] = c[3][1] - coeff*c[2][1];
c[3][2] = c[3][2] - coeff*c[2][2];
c[3][3] = c[3][3] - coeff*c[2][3];
c[3][4] = c[3][4] - coeff*c[2][4];
r[3] = r[3] - coeff*r[2];
coeff = lhs[4][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[2][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[2][4];
c[4][0] = c[4][0] - coeff*c[2][0];
c[4][1] = c[4][1] - coeff*c[2][1];
c[4][2] = c[4][2] - coeff*c[2][2];
c[4][3] = c[4][3] - coeff*c[2][3];
c[4][4] = c[4][4] - coeff*c[2][4];
r[4] = r[4] - coeff*r[2];
pivot = 1.00/lhs[3][3];
lhs[3][4] = lhs[3][4]*pivot;
c[3][0] = c[3][0]*pivot;
c[3][1] = c[3][1]*pivot;
c[3][2] = c[3][2]*pivot;
c[3][3] = c[3][3]*pivot;
c[3][4] = c[3][4]*pivot;
r[3] = r[3] *pivot;
coeff = lhs[0][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[3][4];
c[0][0] = c[0][0] - coeff*c[3][0];
c[0][1] = c[0][1] - coeff*c[3][1];
c[0][2] = c[0][2] - coeff*c[3][2];
c[0][3] = c[0][3] - coeff*c[3][3];
c[0][4] = c[0][4] - coeff*c[3][4];
r[0] = r[0] - coeff*r[3];
coeff = lhs[1][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[3][4];
c[1][0] = c[1][0] - coeff*c[3][0];
c[1][1] = c[1][1] - coeff*c[3][1];
c[1][2] = c[1][2] - coeff*c[3][2];
c[1][3] = c[1][3] - coeff*c[3][3];
c[1][4] = c[1][4] - coeff*c[3][4];
r[1] = r[1] - coeff*r[3];
coeff = lhs[2][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[3][4];
c[2][0] = c[2][0] - coeff*c[3][0];
c[2][1] = c[2][1] - coeff*c[3][1];
c[2][2] = c[2][2] - coeff*c[3][2];
c[2][3] = c[2][3] - coeff*c[3][3];
c[2][4] = c[2][4] - coeff*c[3][4];
r[2] = r[2] - coeff*r[3];
coeff = lhs[4][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[3][4];
c[4][0] = c[4][0] - coeff*c[3][0];
c[4][1] = c[4][1] - coeff*c[3][1];
c[4][2] = c[4][2] - coeff*c[3][2];
c[4][3] = c[4][3] - coeff*c[3][3];
c[4][4] = c[4][4] - coeff*c[3][4];
r[4] = r[4] - coeff*r[3];
pivot = 1.00/lhs[4][4];
c[4][0] = c[4][0]*pivot;
c[4][1] = c[4][1]*pivot;
c[4][2] = c[4][2]*pivot;
c[4][3] = c[4][3]*pivot;
c[4][4] = c[4][4]*pivot;
r[4] = r[4] *pivot;
coeff = lhs[0][4];
c[0][0] = c[0][0] - coeff*c[4][0];
c[0][1] = c[0][1] - coeff*c[4][1];
c[0][2] = c[0][2] - coeff*c[4][2];
c[0][3] = c[0][3] - coeff*c[4][3];
c[0][4] = c[0][4] - coeff*c[4][4];
r[0] = r[0] - coeff*r[4];
coeff = lhs[1][4];
c[1][0] = c[1][0] - coeff*c[4][0];
c[1][1] = c[1][1] - coeff*c[4][1];
c[1][2] = c[1][2] - coeff*c[4][2];
c[1][3] = c[1][3] - coeff*c[4][3];
c[1][4] = c[1][4] - coeff*c[4][4];
r[1] = r[1] - coeff*r[4];
coeff = lhs[2][4];
c[2][0] = c[2][0] - coeff*c[4][0];
c[2][1] = c[2][1] - coeff*c[4][1];
c[2][2] = c[2][2] - coeff*c[4][2];
c[2][3] = c[2][3] - coeff*c[4][3];
c[2][4] = c[2][4] - coeff*c[4][4];
r[2] = r[2] - coeff*r[4];
coeff = lhs[3][4];
c[3][0] = c[3][0] - coeff*c[4][0];
c[3][1] = c[3][1] - coeff*c[4][1];
c[3][2] = c[3][2] - coeff*c[4][2];
c[3][3] = c[3][3] - coeff*c[4][3];
c[3][4] = c[3][4] - coeff*c[4][4];
r[3] = r[3] - coeff*r[4];
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void binvrhs( double lhs[5][5], double r[5] ) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
double pivot, coeff;
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
pivot = 1.00/lhs[0][0];
lhs[0][1] = lhs[0][1]*pivot;
lhs[0][2] = lhs[0][2]*pivot;
lhs[0][3] = lhs[0][3]*pivot;
lhs[0][4] = lhs[0][4]*pivot;
r[0] = r[0] *pivot;
coeff = lhs[1][0];
lhs[1][1]= lhs[1][1] - coeff*lhs[0][1];
lhs[1][2]= lhs[1][2] - coeff*lhs[0][2];
lhs[1][3]= lhs[1][3] - coeff*lhs[0][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[0][4];
r[1] = r[1] - coeff*r[0];
coeff = lhs[2][0];
lhs[2][1]= lhs[2][1] - coeff*lhs[0][1];
lhs[2][2]= lhs[2][2] - coeff*lhs[0][2];
lhs[2][3]= lhs[2][3] - coeff*lhs[0][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[0][4];
r[2] = r[2] - coeff*r[0];
coeff = lhs[3][0];
lhs[3][1]= lhs[3][1] - coeff*lhs[0][1];
lhs[3][2]= lhs[3][2] - coeff*lhs[0][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[0][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[0][4];
r[3] = r[3] - coeff*r[0];
coeff = lhs[4][0];
lhs[4][1]= lhs[4][1] - coeff*lhs[0][1];
lhs[4][2]= lhs[4][2] - coeff*lhs[0][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[0][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[0][4];
r[4] = r[4] - coeff*r[0];
pivot = 1.00/lhs[1][1];
lhs[1][2] = lhs[1][2]*pivot;
lhs[1][3] = lhs[1][3]*pivot;
lhs[1][4] = lhs[1][4]*pivot;
r[1] = r[1] *pivot;
coeff = lhs[0][1];
lhs[0][2]= lhs[0][2] - coeff*lhs[1][2];
lhs[0][3]= lhs[0][3] - coeff*lhs[1][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[1][4];
r[0] = r[0] - coeff*r[1];
coeff = lhs[2][1];
lhs[2][2]= lhs[2][2] - coeff*lhs[1][2];
lhs[2][3]= lhs[2][3] - coeff*lhs[1][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[1][4];
r[2] = r[2] - coeff*r[1];
coeff = lhs[3][1];
lhs[3][2]= lhs[3][2] - coeff*lhs[1][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[1][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[1][4];
r[3] = r[3] - coeff*r[1];
coeff = lhs[4][1];
lhs[4][2]= lhs[4][2] - coeff*lhs[1][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[1][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[1][4];
r[4] = r[4] - coeff*r[1];
pivot = 1.00/lhs[2][2];
lhs[2][3] = lhs[2][3]*pivot;
lhs[2][4] = lhs[2][4]*pivot;
r[2] = r[2] *pivot;
coeff = lhs[0][2];
lhs[0][3]= lhs[0][3] - coeff*lhs[2][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[2][4];
r[0] = r[0] - coeff*r[2];
coeff = lhs[1][2];
lhs[1][3]= lhs[1][3] - coeff*lhs[2][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[2][4];
r[1] = r[1] - coeff*r[2];
coeff = lhs[3][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[2][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[2][4];
r[3] = r[3] - coeff*r[2];
coeff = lhs[4][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[2][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[2][4];
r[4] = r[4] - coeff*r[2];
pivot = 1.00/lhs[3][3];
lhs[3][4] = lhs[3][4]*pivot;
r[3] = r[3] *pivot;
coeff = lhs[0][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[3][4];
r[0] = r[0] - coeff*r[3];
coeff = lhs[1][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[3][4];
r[1] = r[1] - coeff*r[3];
coeff = lhs[2][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[3][4];
r[2] = r[2] - coeff*r[3];
coeff = lhs[4][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[3][4];
r[4] = r[4] - coeff*r[3];
pivot = 1.00/lhs[4][4];
r[4] = r[4] *pivot;
coeff = lhs[0][4];
r[0] = r[0] - coeff*r[4];
coeff = lhs[1][4];
r[1] = r[1] - coeff*r[4];
coeff = lhs[2][4];
r[2] = r[2] - coeff*r[4];
coeff = lhs[3][4];
r[3] = r[3] - coeff*r[4];
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void y_solve(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Performs line solves in Y direction by first factoring
c the block-tridiagonal matrix into an upper triangular matrix][
c and then performing back substitution to solve for the unknow
c vectors of each line.
c
c Make sure we treat elements zero to cell_size in the direction
c of the sweep.
c-------------------------------------------------------------------*/
lhsy();
y_solve_cell();
y_backsubstitute();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void y_backsubstitute(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c back solve: if last cell][ then generate U(jsize)=rhs(jsize)
c else assume U(jsize) is loaded in un pack backsub_info
c so just use it
c after call u(jstart) will be sent to next cell
c-------------------------------------------------------------------*/
int i, j, k, m, n;
for (j = grid_points[1]-2; j >= 0; j--) {
#pragma omp for private(k,m,n)
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < BLOCK_SIZE; m++) {
for (n = 0; n < BLOCK_SIZE; n++) {
rhs[i][j][k][m] = rhs[i][j][k][m]
- lhs[i][j][k][CC][m][n]*rhs[i][j+1][k][n];
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void y_solve_cell(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c performs guaussian elimination on this cell.
c
c assumes that unpacking routines for non-first cells
c preload C' and rhs' from previous cell.
c
c assumed send happens outside this routine, but that
c c'(JMAX) and rhs'(JMAX) will be sent to next cell
c-------------------------------------------------------------------*/
int i, j, k, jsize;
jsize = grid_points[1]-1;
#pragma omp for private(k)
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c multiply c(i,0,k) by b_inverse and copy back to c
c multiply rhs(0) by b_inverse(0) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][0][k][BB],
lhs[i][0][k][CC],
rhs[i][0][k] );
}
}
/*--------------------------------------------------------------------
c begin inner most do loop
c do all the elements of the cell unless last
c-------------------------------------------------------------------*/
for (j = 1; j < jsize; j++) {
#pragma omp for private(k)
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c subtract A*lhs_vector(j-1) from lhs_vector(j)
c
c rhs(j) = rhs(j) - A*rhs(j-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][j][k][AA],
rhs[i][j-1][k], rhs[i][j][k]);
/*--------------------------------------------------------------------
c B(j) = B(j) - C(j-1)*A(j)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][j][k][AA],
lhs[i][j-1][k][CC],
lhs[i][j][k][BB]);
/*--------------------------------------------------------------------
c multiply c(i,j,k) by b_inverse and copy back to c
c multiply rhs(i,1,k) by b_inverse(i,1,k) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][j][k][BB],
lhs[i][j][k][CC],
rhs[i][j][k] );
}
}
}
#pragma omp for private(k)
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c rhs(jsize) = rhs(jsize) - A*rhs(jsize-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][jsize][k][AA],
rhs[i][jsize-1][k], rhs[i][jsize][k]);
/*--------------------------------------------------------------------
c B(jsize) = B(jsize) - C(jsize-1)*A(jsize)
c call matmul_sub(aa,i,jsize,k,c,
c $ cc,i,jsize-1,k,c,BB,i,jsize,k)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][jsize][k][AA],
lhs[i][jsize-1][k][CC],
lhs[i][jsize][k][BB]);
/*--------------------------------------------------------------------
c multiply rhs(jsize) by b_inverse(jsize) and copy to rhs
c-------------------------------------------------------------------*/
binvrhs( lhs[i][jsize][k][BB],
rhs[i][jsize][k] );
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void z_solve(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Performs line solves in Z direction by first factoring
c the block-tridiagonal matrix into an upper triangular matrix,
c and then performing back substitution to solve for the unknow
c vectors of each line.
c
c Make sure we treat elements zero to cell_size in the direction
c of the sweep.
c-------------------------------------------------------------------*/
lhsz();
z_solve_cell();
z_backsubstitute();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void z_backsubstitute(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c back solve: if last cell, then generate U(ksize)=rhs(ksize)
c else assume U(ksize) is loaded in un pack backsub_info
c so just use it
c after call u(kstart) will be sent to next cell
c-------------------------------------------------------------------*/
int i, j, k, m, n;
#pragma omp for private(j,k,m,n)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = grid_points[2]-2; k >= 0; k--) {
for (m = 0; m < BLOCK_SIZE; m++) {
for (n = 0; n < BLOCK_SIZE; n++) {
rhs[i][j][k][m] = rhs[i][j][k][m]
- lhs[i][j][k][CC][m][n]*rhs[i][j][k+1][n];
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void z_solve_cell(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c performs guaussian elimination on this cell.
c
c assumes that unpacking routines for non-first cells
c preload C' and rhs' from previous cell.
c
c assumed send happens outside this routine, but that
c c'(KMAX) and rhs'(KMAX) will be sent to next cell.
c-------------------------------------------------------------------*/
int i,j,k,ksize;
ksize = grid_points[2]-1;
/*--------------------------------------------------------------------
c outer most do loops - sweeping in i direction
c-------------------------------------------------------------------*/
#pragma omp for private(j)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
/*--------------------------------------------------------------------
c multiply c(i,j,0) by b_inverse and copy back to c
c multiply rhs(0) by b_inverse(0) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][j][0][BB],
lhs[i][j][0][CC],
rhs[i][j][0] );
}
}
/*--------------------------------------------------------------------
c begin inner most do loop
c do all the elements of the cell unless last
c-------------------------------------------------------------------*/
for (k = 1; k < ksize; k++) {
#pragma omp for private(j)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
/*--------------------------------------------------------------------
c subtract A*lhs_vector(k-1) from lhs_vector(k)
c
c rhs(k) = rhs(k) - A*rhs(k-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][j][k][AA],
rhs[i][j][k-1], rhs[i][j][k]);
/*--------------------------------------------------------------------
c B(k) = B(k) - C(k-1)*A(k)
c call matmul_sub(aa,i,j,k,c,cc,i,j,k-1,c,BB,i,j,k)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][j][k][AA],
lhs[i][j][k-1][CC],
lhs[i][j][k][BB]);
/*--------------------------------------------------------------------
c multiply c(i,j,k) by b_inverse and copy back to c
c multiply rhs(i,j,1) by b_inverse(i,j,1) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][j][k][BB],
lhs[i][j][k][CC],
rhs[i][j][k] );
}
}
}
/*--------------------------------------------------------------------
c Now finish up special cases for last cell
c-------------------------------------------------------------------*/
#pragma omp for private(j)
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
/*--------------------------------------------------------------------
c rhs(ksize) = rhs(ksize) - A*rhs(ksize-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][j][ksize][AA],
rhs[i][j][ksize-1], rhs[i][j][ksize]);
/*--------------------------------------------------------------------
c B(ksize) = B(ksize) - C(ksize-1)*A(ksize)
c call matmul_sub(aa,i,j,ksize,c,
c $ cc,i,j,ksize-1,c,BB,i,j,ksize)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][j][ksize][AA],
lhs[i][j][ksize-1][CC],
lhs[i][j][ksize][BB]);
/*--------------------------------------------------------------------
c multiply rhs(ksize) by b_inverse(ksize) and copy to rhs
c-------------------------------------------------------------------*/
binvrhs( lhs[i][j][ksize][BB],
rhs[i][j][ksize] );
}
}
}
/* cat ./common/c_print_results.c */
/*****************************************************************/
/****** C _ P R I N T _ R E S U L T S ******/
/*****************************************************************/
void c_print_results( char *name,
char cclass,
int n1,
int n2,
int n3,
int niter,
int nthreads,
double t,
double mops,
char *optype,
int passed_verification,
char *npbversion,
char *compiletime,
char *cc,
char *clink,
char *c_lib,
char *c_inc,
char *cflags,
char *clinkflags,
char *rand)
{
char *evalue="1000";
printf( "\n\n %s Benchmark Completed\n", name );
printf( " Class = %c\n", cclass );
if( n2 == 0 && n3 == 0 )
printf( " Size = %12d\n", n1 ); /* as in IS */
else
printf( " Size = %3dx%3dx%3d\n", n1,n2,n3 );
printf( " Iterations = %12d\n", niter );
printf( " Threads = %12d\n", nthreads );
printf( " Time in seconds = %12.2f\n", t );
printf( " Mop/s total = %12.2f\n", mops );
printf( " Operation type = %24s\n", optype);
if( passed_verification )
printf( " Verification = SUCCESSFUL\n" );
else
printf( " Verification = UNSUCCESSFUL\n" );
printf( " Version = %12s\n", npbversion );
printf( " Compile date = %12s\n", compiletime );
printf( "\n Compile options:\n" );
printf( " CC = %s\n", cc );
printf( " CLINK = %s\n", clink );
printf( " C_LIB = %s\n", c_lib );
printf( " C_INC = %s\n", c_inc );
printf( " CFLAGS = %s\n", cflags );
printf( " CLINKFLAGS = %s\n", clinkflags );
printf( " RAND = %s\n", rand );
#ifdef SMP
evalue = getenv("MP_SET_NUMTHREADS");
printf( " MULTICPUS = %s\n", evalue );
#endif
/* printf( "\n\n" );
printf( " Please send the results of this run to:\n\n" );
printf( " NPB Development Team\n" );
printf( " Internet: npb@nas.nasa.gov\n \n" );
printf( " If email is not available, send this to:\n\n" );
printf( " MS T27A-1\n" );
printf( " NASA Ames Research Center\n" );
printf( " Moffett Field, CA 94035-1000\n\n" );
printf( " Fax: 415-604-3957\n\n" );*/
}
/*
cat ./common/c_timers.c
*/
/*
#include "wtime.h"
#if defined(IBM)
#define wtime wtime
#elif defined(CRAY)
#define wtime WTIME
#else
#define wtime wtime_
#endif
*/
/* Prototype */
void wtime( double * );
/*****************************************************************/
/****** E L A P S E D _ T I M E ******/
/*****************************************************************/
double elapsed_time( void )
{
double t;
wtime( &t );
return( t );
}
double start[64], elapsed[64];
/*****************************************************************/
/****** T I M E R _ C L E A R ******/
/*****************************************************************/
void timer_clear( int n )
{
elapsed[n] = 0.0;
}
/*****************************************************************/
/****** T I M E R _ S T A R T ******/
/*****************************************************************/
void timer_start( int n )
{
start[n] = elapsed_time();
}
/*****************************************************************/
/****** T I M E R _ S T O P ******/
/*****************************************************************/
void timer_stop( int n )
{
double t, now;
now = elapsed_time();
t = now - start[n];
elapsed[n] += t;
}
/*****************************************************************/
/****** T I M E R _ R E A D ******/
/*****************************************************************/
double timer_read( int n )
{
return( elapsed[n] );
}
void wtime(double *t)
{
static int sec = -1;
struct timeval tv;
// gettimeofday(&tv, (void *)0);
gettimeofday(&tv, (struct timezone *)0);
if (sec < 0) sec = tv.tv_sec;
*t = (tv.tv_sec - sec) + 1.0e-6*tv.tv_usec;
}
|
3.race3.c | // RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
#include <omp.h>
#define N 20
int main() {
int A[N][N][N];
for (int i = 1; i < N; i++)
for (int j = 1; j < N; j++)
#pragma omp parallel for
for (int k = 1; k < N; k++)
A[i][j][k] = A[i][j][k - 1];
}
// CHECK: Data Race detected
// END
|
opmphm.c | /**
* @file
*
* @brief The Order Preserving Minimal Perfect Hash Map C benchmark.
*
* @copyright BSD License (see LICENSE.md or https://www.libelektra.org)
*/
// ==== DEFINE SECTION ====
#define _GNU_SOURCE
#define KDBRAND_BENCHMARK // allows the seed injection into Elektra
// uncomment to use OPENMP and set USE_OPENMP in CMakeLists.txt
//~ #define USE_OPENMP
#ifdef USE_OPENMP
// set here you number of threads
#define NUMBEROFTHREADS 8
#else
#define NUMBEROFTHREADS 1
#endif
// ==== INCLUDE SECTION ====
#include "benchmarks.h"
#ifdef HAVE_HSEARCHR
#include <search.h>
#endif
#ifdef USE_OPENMP
#include <omp.h>
#endif
#include "../src/libs/elektra/opmphm.c"
#include "../src/libs/elektra/opmphmpredictor.c"
#include "../src/libs/elektra/rand.c"
#include <sys/time.h>
int32_t elektraRandBenchmarkInitSeed;
// benchmarks helpers
static int32_t * getRandomSeed (int32_t * seed);
static FILE * openOutFileWithRPartitePostfix (const char * name, uint8_t r);
static const char * getString (void * data);
static size_t getPower (size_t p, size_t q);
static int cmpInteger (const void * a, const void * b);
// generate KeySets
static KeySetShape * getKeySetShapes (void);
const size_t numberOfShapes = 8;
/**
* General structure of a benchmark
*
* `name` is a unique name of the benchmark and `benchmarkF` is the independent function executing the benchmark.
* Execute a benchmark with benchmark_opmphm `name`.
*/
typedef struct
{
char * name;
size_t numberOfSeedsNeeded;
void (*benchmarkF) (char *);
} Benchmark;
/**
* START ======================================= Measures the Opmphm Hash Function time ============================================== START
*
* This benchmark measures the time for hashing a whole KeySet, variegating in the size. Executed multiple times.
*
* The output has the following header: n;n;n;n;... (for each KeySetShape)
*
* This benchmark takes numberOfShapes * nCount seeds
*/
static void benchmarkHashFunctionTime (char * name)
{
const size_t nCount = 4;
const size_t n[] = { 10, 100, 1000, 10000 };
const size_t runs = 11;
// init results
size_t * results = elektraMalloc (nCount * numberOfShapes * runs * sizeof (size_t));
if (!results)
{
printExit ("malloc");
}
// benchmark
printf ("Run Benchmark %s:\n", name);
KeySetShape * keySetShapes = getKeySetShapes ();
for (size_t i = 0; i < nCount; ++i)
{
for (size_t s = 0; s < numberOfShapes; ++s)
{
printf ("now at n: %zu/%zu shape: %zu/%zu\r", i, nCount, s, numberOfShapes);
fflush (stdout);
int32_t seed;
if (getRandomSeed (&seed) != &seed) printExit ("Seed Parsing Error or feed me more seeds");
KeySet * ks = generateKeySet (n[i], &seed, &keySetShapes[s]);
for (size_t r = 0; r < runs; ++r)
{
Key * key;
ksRewind (ks);
struct timeval start;
struct timeval end;
__asm__("");
gettimeofday (&start, 0);
__asm__("");
// measure
while ((key = ksNext (ks)))
{
__asm__("");
opmphmHashfunction (keyName (key), strlen (keyName (key)), 1337);
__asm__("");
}
__asm__("");
gettimeofday (&end, 0);
__asm__("");
results[i * (numberOfShapes * runs) + s * runs + r] =
(end.tv_sec - start.tv_sec) * 1000000 + (end.tv_usec - start.tv_usec);
}
ksDel (ks);
}
}
elektraFree (keySetShapes);
// wirte out results
FILE * out = openOutFileWithRPartitePostfix ("benchmark_opmphm_hashfunctiontime", 0);
if (!out)
{
printExit ("open out file");
}
// print header
for (size_t i = 0; i < nCount; ++i)
{
for (size_t s = 0; s < numberOfShapes; ++s)
{
if (!s && !i)
{
fprintf (out, "%zu", n[i]);
}
else
{
fprintf (out, ";%zu", n[i]);
}
}
}
fprintf (out, "\n");
// print data
for (size_t r = 0; r < runs; ++r)
{
for (size_t i = 0; i < nCount; ++i)
{
for (size_t s = 0; s < numberOfShapes; ++s)
{
if (!s && !i)
{
fprintf (out, "%zu", results[i * (numberOfShapes * runs) + s * runs + r]);
}
else
{
fprintf (out, ";%zu", results[i * (numberOfShapes * runs) + s * runs + r]);
}
}
}
fprintf (out, "\n");
}
fclose (out);
elektraFree (results);
}
/**
* END ========================================= Measures the Opmphm Hash Function time ================================================ END
*/
/**
* START ======================================================= Mapping ============================================================= START
*
* This benchmark counts the opmphmMapping (...) invocations until success, for each KeySet size (n) and space influencing parameter (c).
* First the KeySets are build, for every KeySet size (n) there are numberOfShapes * keySetsPerShape KeySets.
* Then the benchmarking for every KeySet size (n) and space influencing parameter (c) takes place, with a fixed set of seeds for
* the opmphmMapping (...) invocations.
* At the end the results are written out in the following format:
*
* trials;n_%zuc_%f;... (each n and c are unique)
*
* The number of needed seeds for this benchmarks is: nCount * numberOfShapes * keySetsPerShape (KeySets generation) + numberOfSeeds (tested
* seeds)
*/
static void benchmarkMappingCheckOpmphm (Opmphm * opmphm, OpmphmGraph * graph, size_t n, OpmphmInit * init, size_t mappings,
size_t maxMappings)
{
if (n < 5 && mappings != maxMappings)
{
// assign
if (opmphmAssignment (opmphm, graph, n, 1))
{
printExit ("check assignment failed");
}
for (size_t i = 0; i < n; ++i)
{
if (i != opmphmLookup (opmphm, n, init->getName (init->data[i])))
{
printExit ("check assignment failed");
}
}
opmphmClear (opmphm);
}
}
static void benchmarkMapping (char * name)
{
size_t rUniPar = 3;
const size_t nCount = 15;
const size_t n[] = { 10, 15, 20, 30, 40, 60, 80, 120, 160, 240, 320, 480, 640, 960, 1280 }; // 15
const size_t cCount = 15;
const double c[] = { 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5 }; // 15
const size_t keySetsPerShape = 20;
const size_t numberOfKeySets = nCount * numberOfShapes * keySetsPerShape;
const size_t numberOfSeeds = 10000;
const size_t maxMappings = 10; // the maximum trials for one opmphmMapping (...) invocation series.
// init seed population, used for opmphmMapping (...) invocation.
int32_t * seeds = elektraMalloc (numberOfSeeds * sizeof (int32_t));
if (!seeds)
{
printExit ("malloc");
}
// get seeds
for (size_t i = 0; i < numberOfSeeds; ++i)
{
if (getRandomSeed (&seeds[i]) != &seeds[i]) printExit ("Seed Parsing Error or feed me more seeds");
}
// init results
size_t * results = elektraMalloc (nCount * cCount * maxMappings * sizeof (size_t));
if (!results)
{
printExit ("malloc");
}
memset (results, 0, nCount * cCount * maxMappings * sizeof (size_t));
// Generate all KeySets
KeySetShape * keySetShapes = getKeySetShapes ();
KeySet ** keySetsCache = elektraMalloc (numberOfKeySets * sizeof (KeySet *));
if (!keySetsCache)
{
printExit ("malloc");
}
printf ("KeySet Cache Build:\n");
for (size_t nI = 0; nI < nCount; ++nI)
{
printf ("now at: %zu/%zu\r", nI + 1, nCount);
fflush (stdout);
for (size_t shapeI = 0; shapeI < numberOfShapes; ++shapeI)
{
for (size_t ksPshapeI = 0; ksPshapeI < keySetsPerShape; ++ksPshapeI)
{
int32_t genSeed;
if (getRandomSeed (&genSeed) != &genSeed) printExit ("Seed Parsing Error or feed me more seeds");
keySetsCache[nI * (numberOfShapes * keySetsPerShape) + shapeI * keySetsPerShape + ksPshapeI] =
generateKeySet (n[nI], &genSeed, &keySetShapes[shapeI]);
}
}
}
printf ("\nRun Benchmark %s:\n", name);
#ifdef USE_OPENMP
omp_set_num_threads (NUMBEROFTHREADS);
// lock
omp_lock_t writeLock;
omp_init_lock (&writeLock);
#endif
// split
if (numberOfSeeds % NUMBEROFTHREADS != 0) printExit ("seeds % NUMBEROFTHREADS != 0");
size_t partSize = numberOfSeeds / NUMBEROFTHREADS;
// init threads local results
size_t * localResults[NUMBEROFTHREADS];
for (size_t i = 0; i < NUMBEROFTHREADS; ++i)
{
localResults[i] = elektraMalloc (nCount * cCount * maxMappings * sizeof (size_t));
if (!localResults[i])
{
printExit ("malloc");
}
}
// for all nCount
for (size_t nI = 0; nI < nCount; ++nI)
{
// and cCount
for (size_t cI = 0; cI < cCount; ++cI)
{
printf ("now at: n = %zu/%zu c = %zu/%zu\r", nI + 1, nCount, cI + 1, cCount);
fflush (stdout);
// OPMPHM for all threads
Opmphm * opmphms[NUMBEROFTHREADS];
OpmphmGraph * graphs[NUMBEROFTHREADS];
for (size_t i = 0; i < NUMBEROFTHREADS; ++i)
{
opmphms[i] = opmphmNew ();
if (!opmphms[i]) printExit ("opmphm");
graphs[i] = opmphmGraphNew (opmphms[i], rUniPar, n[nI], opmphmMinC (rUniPar) + c[cI]);
if (!graphs[i]) printExit ("graph");
}
// OPMPHM
// go through all KeySets from n
for (size_t ksCacheI = 0; ksCacheI < numberOfShapes * keySetsPerShape; ++ksCacheI)
{
KeySet * ks = keySetsCache[nI * (numberOfShapes * keySetsPerShape) + ksCacheI];
#ifdef USE_OPENMP
#pragma omp parallel
#endif
{
size_t threadI = 0;
// OPMPHM
OpmphmInit init;
init.getName = getString;
init.data = (void **) (ks->array);
// OPMPHM
#ifdef USE_OPENMP
threadI = omp_get_thread_num ();
#endif
// reset local result
memset (localResults[threadI], 0, nCount * cCount * maxMappings * sizeof (size_t));
// try each seed part
for (size_t seedI = threadI * partSize; seedI < (threadI + 1) * partSize; ++seedI)
{
size_t mappings = 0; // counts mapping invocations
// OPMPHM
init.initSeed = seeds[seedI];
// fresh OpmphmGraph
opmphmGraphClear (opmphms[threadI], graphs[threadI]);
// do benchmark
int ret;
do
{
ret = opmphmMapping (opmphms[threadI], graphs[threadI], &init, n[nI]);
++mappings;
} while (ret && mappings < maxMappings);
// OPMPHM
if (mappings < 1 || mappings > maxMappings)
{
printExit ("benchmarkSeedRangeMappingCount: mappings out of range");
}
// check opmphm
benchmarkMappingCheckOpmphm (opmphms[threadI], graphs[threadI], n[nI], &init, mappings,
maxMappings);
// save result
// shift, because 0 not used
--mappings;
++localResults[threadI][nI * (cCount * maxMappings) + cI * maxMappings + mappings];
}
#ifdef USE_OPENMP
// write local to global
omp_set_lock (&writeLock);
#endif
for (size_t i = 0; i < nCount * cCount * maxMappings; ++i)
{
results[i] += localResults[threadI][i];
}
#ifdef USE_OPENMP
omp_unset_lock (&writeLock);
#endif
}
}
for (size_t i = 0; i < NUMBEROFTHREADS; ++i)
{
// OPMPHM
opmphmDel (opmphms[i]);
opmphmGraphDel (graphs[i]);
// OPMPHM
}
}
// end for all nCount
}
#ifdef USE_OPENMP
omp_destroy_lock (&writeLock);
#endif
for (size_t i = 0; i < NUMBEROFTHREADS; ++i)
{
free (localResults[i]);
}
printf ("\n");
/*
* results sanity check
*
* each n and c should have in sum (numberOfShapes * keySetsPerShape) for each KeySet times (numberOfSeeds) seeds trials
*/
for (size_t nI = 0; nI < nCount; ++nI)
{
for (size_t cI = 0; cI < cCount; ++cI)
{
size_t sum = 0;
for (size_t mappingI = 0; mappingI < maxMappings; ++mappingI)
{
sum += results[nI * (cCount * maxMappings) + cI * maxMappings + mappingI];
}
if (sum != numberOfShapes * keySetsPerShape * numberOfSeeds)
{
printExit ("benchmarkSeedRangeMappingCount: results sanity check failed");
}
}
}
// write out
FILE * out = openOutFileWithRPartitePostfix ("benchmark_opmphm_mapping", rUniPar);
if (!out)
{
printExit ("open out file");
}
// print header
fprintf (out, "trials");
for (size_t nI = 0; nI < nCount; ++nI)
{
for (size_t cI = 0; cI < cCount; ++cI)
{
fprintf (out, ";n_%zuc_%f", n[nI], opmphmMinC (rUniPar) + c[cI]);
}
}
fprintf (out, "\n");
// print data
for (size_t mappingI = 0; mappingI < maxMappings; ++mappingI)
{
fprintf (out, "%zu", mappingI + 1); // unshift, because 0 is not a result
for (size_t nI = 0; nI < nCount; ++nI)
{
for (size_t cI = 0; cI < cCount; ++cI)
{
fprintf (out, ";%zu", results[nI * (cCount * maxMappings) + cI * maxMappings + mappingI]);
}
}
fprintf (out, "\n");
}
// cleanup
for (size_t i = 0; i < numberOfKeySets; ++i)
{
ksDel (keySetsCache[i]);
}
elektraFree (keySetsCache);
fclose (out);
elektraFree (keySetShapes);
elektraFree (seeds);
elektraFree (results);
}
/**
* END ========================================================= Mapping =============================================================== END
*/
/**
* START ============================================== Mapping with Optimization ==================================================== START
*
* This benchmark counts the opmphmMapping (...) invocations until success, for each KeySet size.
* First the KeySets are build, for every KeySet size (n) there are numberOfShapes * keySetsPerShape KeySets.
* Then the benchmarking for every KeySet size (n) takes place, with a fixed set of seeds for the opmphmMapping (...) invocations.
* At the end the results are written out in the following format:
*
* trials;n_%zur_%uc_%f;... (each n is unique)
*
* The number of needed seeds for this benchmarks is: nCount * numberOfShapes * keySetsPerShape (KeySets generation) + numberOfSeeds (tested
* seeds)
*/
static void benchmarkMappingOpt (char * name)
{
// create the n array
const size_t nCount = 132;
size_t * n = elektraMalloc (nCount * sizeof (size_t));
if (!n)
{
printExit ("malloc");
}
size_t controlCount = 0;
for (size_t i = 2; i <= 38; ++i)
{
n[controlCount] = i;
++controlCount;
}
for (size_t i = 39; i <= 239; i = i + 5)
{
n[controlCount] = i;
++controlCount;
}
n[controlCount] = 240;
++controlCount;
for (size_t i = 259; i <= 1279; i = i + 20)
{
n[controlCount] = i;
++controlCount;
}
n[controlCount] = 1280;
++controlCount;
if (controlCount != nCount)
{
printExit ("controlCount != nCount");
}
const size_t keySetsPerShape = 70;
const size_t numberOfKeySets = nCount * numberOfShapes * keySetsPerShape;
const size_t numberOfSeeds = 20000;
const size_t maxMappings = 10; // the maximum trials for one opmphmMapping (...) invocation series.
// init seed population, used for opmphmMapping (...) invocation.
int32_t * seeds = elektraMalloc (numberOfSeeds * sizeof (int32_t));
if (!seeds)
{
printExit ("malloc");
}
// get seeds
for (size_t i = 0; i < numberOfSeeds; ++i)
{
if (getRandomSeed (&seeds[i]) != &seeds[i]) printExit ("Seed Parsing Error or feed me more seeds");
}
// init results
size_t * results = elektraMalloc (nCount * maxMappings * sizeof (size_t));
if (!results)
{
printExit ("malloc");
}
memset (results, 0, nCount * maxMappings * sizeof (size_t));
// Generate all KeySets
KeySetShape * keySetShapes = getKeySetShapes ();
KeySet ** keySetsCache = elektraMalloc (numberOfKeySets * sizeof (KeySet *));
if (!keySetsCache)
{
printExit ("malloc");
}
printf ("KeySet Cache Build:\n");
for (size_t nI = 0; nI < nCount; ++nI)
{
printf ("now at: %zu/%zu\r", nI + 1, nCount);
fflush (stdout);
for (size_t shapeI = 0; shapeI < numberOfShapes; ++shapeI)
{
for (size_t ksPshapeI = 0; ksPshapeI < keySetsPerShape; ++ksPshapeI)
{
int32_t genSeed;
if (getRandomSeed (&genSeed) != &genSeed) printExit ("Seed Parsing Error or feed me more seeds");
keySetsCache[nI * (numberOfShapes * keySetsPerShape) + shapeI * keySetsPerShape + ksPshapeI] =
generateKeySet (n[nI], &genSeed, &keySetShapes[shapeI]);
}
}
}
printf ("\nRun Benchmark %s:\n", name);
#ifdef USE_OPENMP
omp_set_num_threads (NUMBEROFTHREADS);
// lock
omp_lock_t writeLock;
omp_init_lock (&writeLock);
#endif
// split
if (numberOfSeeds % NUMBEROFTHREADS != 0) printExit ("seeds % NUMBEROFTHREADS != 0");
size_t partSize = numberOfSeeds / NUMBEROFTHREADS;
// init threads local results
size_t * localResults[NUMBEROFTHREADS];
for (size_t i = 0; i < NUMBEROFTHREADS; ++i)
{
localResults[i] = elektraMalloc (nCount * maxMappings * sizeof (size_t));
if (!localResults[i])
{
printExit ("malloc");
}
}
// for all nCount
for (size_t nI = 0; nI < nCount; ++nI)
{
printf ("now at: n = %zu/%zu\r", nI + 1, nCount);
fflush (stdout);
// OPMPHM for all threads
Opmphm * opmphms[NUMBEROFTHREADS];
OpmphmGraph * graphs[NUMBEROFTHREADS];
for (size_t i = 0; i < NUMBEROFTHREADS; ++i)
{
opmphms[i] = opmphmNew ();
if (!opmphms[i]) printExit ("opmphm");
uint8_t r = opmphmOptR (n[nI]);
graphs[i] = opmphmGraphNew (opmphms[i], r, n[nI], opmphmMinC (r) + opmphmOptC (n[nI]));
if (!graphs[i]) printExit ("graph");
}
// OPMPHM
// go through all KeySets from n
for (size_t ksCacheI = 0; ksCacheI < numberOfShapes * keySetsPerShape; ++ksCacheI)
{
KeySet * ks = keySetsCache[nI * (numberOfShapes * keySetsPerShape) + ksCacheI];
#ifdef USE_OPENMP
#pragma omp parallel
#endif
{
size_t threadI = 0;
// OPMPHM
OpmphmInit init;
init.getName = getString;
init.data = (void **) (ks->array);
// OPMPHM
#ifdef USE_OPENMP
threadI = omp_get_thread_num ();
#endif
// reset local result
memset (localResults[threadI], 0, nCount * maxMappings * sizeof (size_t));
// try each seed part
for (size_t seedI = threadI * partSize; seedI < (threadI + 1) * partSize; ++seedI)
{
size_t mappings = 0; // counts mapping invocations
// OPMPHM
init.initSeed = seeds[seedI];
// fresh OpmphmGraph
opmphmGraphClear (opmphms[threadI], graphs[threadI]);
// do benchmark
int ret;
do
{
ret = opmphmMapping (opmphms[threadI], graphs[threadI], &init, n[nI]);
++mappings;
} while (ret && mappings < maxMappings);
// OPMPHM
if (mappings < 1 || mappings > maxMappings)
{
printExit ("benchmarkSeedRangeMappingCount: mappings out of range");
}
// check assignment
if (nI < 5 && mappings != maxMappings)
{
// assign
if (opmphmAssignment (opmphms[threadI], graphs[threadI], n[nI], 1))
{
printExit ("check assignment failed");
}
for (size_t i = 0; i < n[nI]; ++i)
{
if (i != opmphmLookup (opmphms[threadI], n[nI], init.getName (init.data[i])))
{
printExit ("check assignment failed");
}
}
opmphmClear (opmphms[threadI]);
}
// save result
// shift, because 0 not used
--mappings;
++localResults[threadI][nI * maxMappings + mappings];
}
#ifdef USE_OPENMP
// write local to global
omp_set_lock (&writeLock);
#endif
for (size_t i = 0; i < nCount * maxMappings; ++i)
{
results[i] += localResults[threadI][i];
}
#ifdef USE_OPENMP
omp_unset_lock (&writeLock);
#endif
}
}
for (size_t i = 0; i < NUMBEROFTHREADS; ++i)
{
// OPMPHM
opmphmDel (opmphms[i]);
opmphmGraphDel (graphs[i]);
// OPMPHM
}
// end for all nCount
}
#ifdef USE_OPENMP
omp_destroy_lock (&writeLock);
#endif
for (size_t i = 0; i < NUMBEROFTHREADS; ++i)
{
free (localResults[i]);
}
printf ("\n");
/*
* results sanity check
*
* each n should have in sum (numberOfShapes * keySetsPerShape) for each KeySet times (numberOfSeeds) seeds trials
*/
for (size_t nI = 0; nI < nCount; ++nI)
{
size_t sum = 0;
for (size_t mappingI = 0; mappingI < maxMappings; ++mappingI)
{
sum += results[nI * maxMappings + mappingI];
}
if (sum != numberOfShapes * keySetsPerShape * numberOfSeeds)
{
printExit ("benchmarkSeedRangeMappingCount: results sanity check failed");
}
}
// write out
FILE * out = fopen ("benchmark_opmphm_mapping_opt.csv", "w");
if (!out)
{
printExit ("open out file");
}
// print header
fprintf (out, "trials");
for (size_t nI = 0; nI < nCount; ++nI)
{
fprintf (out, ";n_%zur_%uc_%f", n[nI], opmphmOptR (n[nI]), opmphmMinC (opmphmOptR (n[nI])) + opmphmOptC (n[nI]));
}
fprintf (out, "\n");
// print data
for (size_t mappingI = 0; mappingI < maxMappings; ++mappingI)
{
fprintf (out, "%zu", mappingI + 1); // unshift, because 0 is not a result
for (size_t nI = 0; nI < nCount; ++nI)
{
fprintf (out, ";%zu", results[nI * maxMappings + mappingI]);
}
fprintf (out, "\n");
}
// cleanup
for (size_t i = 0; i < numberOfKeySets; ++i)
{
ksDel (keySetsCache[i]);
}
elektraFree (n);
elektraFree (keySetsCache);
fclose (out);
elektraFree (keySetShapes);
elektraFree (seeds);
elektraFree (results);
}
/**
* END ================================================ Mapping with Optimization ====================================================== END
*/
/**
* START ================================================== Mapping All Seeds ======================================================== START
*
* This benchmark counts the opmphmMapping (...) invocations until success, for each KeySet size and all seeds.
* First the KeySets are build, for every KeySet size (n). Then the benchmarking for every KeySet size (n) takes place,
* the seeds start at 1 and go to ELEKTRARANDMAX - 1 = 2147483646.
* At the end the results are written out in the following format:
*
* trials;n_%zur_%uc_%f;... (each n is unique)
*
* The number of needed seeds for this benchmarks is: nCount (KeySets generation)
*/
static void benchmarkMappingAllSeeds (char * name)
{
// create the n array
const size_t nCount = 7;
size_t * n = elektraMalloc (nCount * sizeof (size_t));
if (!n)
{
printExit ("malloc");
}
n[0] = 9;
n[1] = 29;
n[2] = 49;
n[3] = 69;
n[4] = 89;
n[5] = 109;
n[6] = 129;
// seeds limits
const int32_t startSeed = 1;
const int32_t endSeed = ELEKTRARANDMAX - 1; // = ELEKTRARANDMAX;
const size_t maxMappings = 10; // the maximum trials for one opmphmMapping (...) invocation series.
// init results
size_t * results = elektraMalloc (nCount * maxMappings * sizeof (size_t));
if (!results)
{
printExit ("malloc");
}
memset (results, 0, nCount * maxMappings * sizeof (size_t));
// Generate all KeySets
KeySetShape * keySetShapes = getKeySetShapes ();
KeySet ** keySetsCache = elektraMalloc (nCount * sizeof (KeySet *));
if (!keySetsCache)
{
printExit ("malloc");
}
for (size_t nI = 0; nI < nCount; ++nI)
{
int32_t genSeed;
if (getRandomSeed (&genSeed) != &genSeed) printExit ("Seed Parsing Error or feed me more seeds");
keySetsCache[nI] = generateKeySet (n[nI], &genSeed, &keySetShapes[0]); // shape 0 is shapefConstBinary with 0 parents
}
printf ("\nRun Benchmark %s:\n", name);
#ifdef USE_OPENMP
omp_set_num_threads (NUMBEROFTHREADS);
// lock
omp_lock_t writeLock;
omp_init_lock (&writeLock);
#endif
// split the job
int32_t partIntervals[NUMBEROFTHREADS * 2];
int32_t onePart = (endSeed - startSeed) / NUMBEROFTHREADS;
int32_t iterateIntervals = startSeed;
for (size_t i = 0; i < NUMBEROFTHREADS; ++i)
{
if (i == NUMBEROFTHREADS - 1)
{
// give last thread the remaining seeds
partIntervals[i * 2] = iterateIntervals;
partIntervals[(i * 2) + 1] = endSeed;
}
else
{
partIntervals[i * 2] = iterateIntervals;
partIntervals[(i * 2) + 1] = iterateIntervals + onePart - 1;
iterateIntervals += onePart;
}
}
// init threads local results
size_t * localResults[NUMBEROFTHREADS];
for (size_t i = 0; i < NUMBEROFTHREADS; ++i)
{
localResults[i] = elektraMalloc (nCount * maxMappings * sizeof (size_t));
if (!localResults[i])
{
printExit ("malloc");
}
}
// for all nCount
for (size_t nI = 0; nI < nCount; ++nI)
{
// OPMPHM for all threads
Opmphm * opmphms[NUMBEROFTHREADS];
OpmphmGraph * graphs[NUMBEROFTHREADS];
for (size_t i = 0; i < NUMBEROFTHREADS; ++i)
{
opmphms[i] = opmphmNew ();
if (!opmphms[i]) printExit ("opmphm");
uint8_t r = opmphmOptR (n[nI]);
graphs[i] = opmphmGraphNew (opmphms[i], r, n[nI], opmphmMinC (r) + opmphmOptC (n[nI]));
if (!graphs[i]) printExit ("graph");
}
// OPMPHM
KeySet * ks = keySetsCache[nI];
#ifdef USE_OPENMP
#pragma omp parallel
#endif
{
size_t threadI = 0;
// OPMPHM
OpmphmInit init;
init.getName = getString;
init.data = (void **) (ks->array);
// OPMPHM
#ifdef USE_OPENMP
threadI = omp_get_thread_num ();
#endif
// reset local result
memset (localResults[threadI], 0, nCount * maxMappings * sizeof (size_t));
// try each seed part
for (int32_t seed = partIntervals[threadI * 2];
partIntervals[threadI * 2] <= seed && seed <= partIntervals[(threadI * 2) + 1]; ++seed)
{
if (threadI == 0 && (seed % 1000) == 0)
{
printf ("now at: n = %zu/%zu and seed %i from %i\r", nI + 1, nCount, seed, partIntervals[1]);
fflush (stdout);
}
size_t mappings = 0; // counts mapping invocations
// OPMPHM
init.initSeed = seed;
// fresh OpmphmGraph
opmphmGraphClear (opmphms[threadI], graphs[threadI]);
// do benchmark
int ret;
do
{
ret = opmphmMapping (opmphms[threadI], graphs[threadI], &init, n[nI]);
++mappings;
} while (ret && mappings < maxMappings);
// OPMPHM
if (mappings < 1 || mappings > maxMappings)
{
printExit ("benchmarkSeedRangeMappingCount: mappings out of range");
}
// save result
// shift, because 0 not used
--mappings;
++localResults[threadI][nI * maxMappings + mappings];
}
#ifdef USE_OPENMP
// write local to global
omp_set_lock (&writeLock);
#endif
for (size_t i = 0; i < nCount * maxMappings; ++i)
{
results[i] += localResults[threadI][i];
}
#ifdef USE_OPENMP
omp_unset_lock (&writeLock);
#endif
}
for (size_t i = 0; i < NUMBEROFTHREADS; ++i)
{
// OPMPHM
opmphmDel (opmphms[i]);
opmphmGraphDel (graphs[i]);
// OPMPHM
}
// end for all nCount
}
#ifdef USE_OPENMP
omp_destroy_lock (&writeLock);
#endif
for (size_t i = 0; i < NUMBEROFTHREADS; ++i)
{
free (localResults[i]);
}
printf ("\n");
/*
* results sanity check
*
* each n should have in sum endSeed - startSeed + 1 trials
*/
for (size_t nI = 0; nI < nCount; ++nI)
{
size_t sum = 0;
for (size_t mappingI = 0; mappingI < maxMappings; ++mappingI)
{
sum += results[nI * maxMappings + mappingI];
}
if (sum != (size_t) endSeed - startSeed + 1)
{
printExit ("benchmarkSeedRangeMappingCount: results sanity check failed");
}
}
// write out
FILE * out = fopen ("benchmark_opmphm_mapping_allSeeds.csv", "w");
if (!out)
{
printExit ("open out file");
}
// print header
fprintf (out, "trials");
for (size_t nI = 0; nI < nCount; ++nI)
{
fprintf (out, ";n_%zur_%uc_%f", n[nI], opmphmOptR (n[nI]), opmphmMinC (opmphmOptR (n[nI])) + opmphmOptC (n[nI]));
}
fprintf (out, "\n");
// print data
for (size_t mappingI = 0; mappingI < maxMappings; ++mappingI)
{
fprintf (out, "%zu", mappingI + 1); // unshift, because 0 is not a result
for (size_t nI = 0; nI < nCount; ++nI)
{
fprintf (out, ";%zu", results[nI * maxMappings + mappingI]);
}
fprintf (out, "\n");
}
// cleanup
for (size_t i = 0; i < nCount; ++i)
{
ksDel (keySetsCache[i]);
}
elektraFree (n);
elektraFree (keySetsCache);
fclose (out);
elektraFree (keySetShapes);
elektraFree (results);
}
/**
* END ==================================================== Mapping All Seeds ========================================================== END
*/
/**
* START ================================================== OPMPHM Build Time ======================================================== START
*
* This benchmark measures the time of the OPMPHM build.
* Uses all KeySet shapes except 6, for all n (KeySet size) a fixed set of seeds is used to build the OPMPHM.
* The keyset shape 6 is excluded, because previous evaluation had show that the results with that keyset shape
* where unusable, due to the unnatural long key names.
* For one n (KeySet size) ksPerN KeySets are used.
* The results are written out in the following format:
*
* n;ks;time
*
* The number of needed seeds for this benchmarks is: (numberOfShapes - 1) * ( numberOfSeeds + nCount * ksPerN )
*/
/**
* @brief Measures the OPMPHM build numberOfRepeats time and returns median
*
* @param ks the KeySet
* @param repeats array to store repeated measurements
* @param numberOfRepeats fields in repeats
*
* @retval median time
*/
static size_t benchmarkOPMPHMBuildTimeMeasure (KeySet * ks, size_t * repeats, size_t numberOfRepeats)
{
for (size_t repeatsI = 0; repeatsI < numberOfRepeats; ++repeatsI)
{
// preparation for measurement
struct timeval start;
struct timeval end;
Key * keySearchFor = ks->array[0]; // just some key
Key * keyFound;
// fresh OPMPHM
if (ks->opmphm)
{
opmphmDel (ks->opmphm);
ks->opmphm = NULL;
}
// START MEASUREMENT
__asm__("");
gettimeofday (&start, 0);
__asm__("");
keyFound = ksLookup (ks, keySearchFor, KDB_O_OPMPHM | KDB_O_NOCASCADING);
__asm__("");
gettimeofday (&end, 0);
__asm__("");
// END MEASUREMENT
// save result
repeats[repeatsI] = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_usec - start.tv_usec);
// sanity check
if (!opmphmIsBuild (ks->opmphm))
{
printExit ("Sanity Check Failed: OPMPHM not used");
}
if (keyFound != keySearchFor)
{
printExit ("Sanity Check Failed: found wrong Key");
}
}
// sort repeats
qsort (repeats, numberOfRepeats, sizeof (size_t), cmpInteger);
return repeats[numberOfRepeats / 2]; // take median
}
void benchmarkOPMPHMBuildTime (char * name)
{
const size_t startN = 50;
const size_t stepN = 500;
const size_t endN = 20000;
const size_t ksPerN = 5;
const size_t numberOfSeeds = 51;
const size_t numberOfRepeats = 7;
// check config
if (startN >= endN || startN == 0)
{
printExit ("startN >= endN || startN == 0");
}
if (numberOfRepeats % 2 == 0)
{
printExit ("numberOfRepeats is even");
}
if (numberOfSeeds % 2 == 0)
{
printExit ("numberOfSeeds is even");
}
if (ksPerN % 2 == 0)
{
printExit ("ksPerN is even");
}
// calculate counts
size_t nCount = 0;
for (size_t nI = startN; nI <= endN; nI += stepN)
{
++nCount;
}
// memory allocation and initialization
// init seeds for mapping step in ksLookup (...)
int32_t * seeds = elektraMalloc (numberOfSeeds * sizeof (int32_t));
if (!seeds)
{
printExit ("malloc");
}
// init results
size_t * results = elektraMalloc (nCount * ksPerN * numberOfSeeds * sizeof (size_t));
if (!results)
{
printExit ("malloc");
}
// init repeats
size_t * repeats = elektraMalloc (numberOfRepeats * sizeof (size_t));
if (!repeats)
{
printExit ("malloc");
}
// init KeySetStorage
KeySet ** keySetStorage = elektraMalloc (ksPerN * sizeof (KeySet *));
if (!keySetStorage)
{
printExit ("malloc");
}
// get KeySet shapes
KeySetShape * keySetShapes = getKeySetShapes ();
printf ("Run Benchmark %s:\n", name);
// for all KeySet shapes except 6
for (size_t shapeI = 0; shapeI < numberOfShapes; ++shapeI)
{
if (shapeI == 6)
{
continue;
}
// get seeds for mapping step in ksLookup (...)
for (size_t i = 0; i < numberOfSeeds; ++i)
{
if (getRandomSeed (&seeds[i]) != &seeds[i]) printExit ("Seed Parsing Error or feed me more seeds");
}
KeySetShape * usedKeySetShape = &keySetShapes[shapeI];
// for all Ns
for (size_t nI = startN; nI <= endN; nI += stepN)
{
printf ("now at: shape = %zu/%zu n = %zu/%zu\r", shapeI + 1, numberOfShapes, nI, endN);
fflush (stdout);
// generate KeySets
int32_t genSeed;
for (size_t ksI = 0; ksI < ksPerN; ++ksI)
{
if (getRandomSeed (&genSeed) != &genSeed) printExit ("Seed Parsing Error or feed me more seeds");
keySetStorage[ksI] = generateKeySet (nI, &genSeed, usedKeySetShape);
}
// for all seeds
for (size_t seedI = 0; seedI < numberOfSeeds; ++seedI)
{
// set seed to return by elektraRandGetInitSeed () in the lookup
elektraRandBenchmarkInitSeed = seeds[seedI];
// for all KeySets in the storage
for (size_t ksI = 0; ksI < ksPerN; ++ksI)
{
// measure
size_t res = benchmarkOPMPHMBuildTimeMeasure (keySetStorage[ksI], repeats, numberOfRepeats);
// store res
results[((nI - startN) / stepN) * ksPerN * numberOfSeeds + ksI * numberOfSeeds + seedI] = res;
}
}
// free ks
for (size_t ksI = 0; ksI < ksPerN; ++ksI)
{
ksDel (keySetStorage[ksI]);
}
}
// write out
FILE * out = openOutFileWithRPartitePostfix ("benchmark_opmphm_build_time", shapeI);
if (!out)
{
printExit ("open out file");
}
// print header
fprintf (out, "n;ks;time\n");
// print data
for (size_t nI = startN; nI <= endN; nI += stepN)
{
for (size_t ksI = 0; ksI < ksPerN; ++ksI)
{
for (size_t seedI = 0; seedI < numberOfSeeds; ++seedI)
{
fprintf (out, "%zu;%zu;%zu\n", nI, ksI,
results[((nI - startN) / stepN) * ksPerN * numberOfSeeds + ksI * numberOfSeeds + seedI]);
}
}
}
fclose (out);
}
printf ("\n");
elektraFree (repeats);
elektraFree (keySetStorage);
elektraFree (keySetShapes);
elektraFree (results);
elektraFree (seeds);
}
/**
* END ==================================================== OPMPHM Build Time ========================================================== END
*/
/**
* START ================================================== OPMPHM Search Time ======================================================= START
*
* This benchmark measures the time of the OPMPHM search.
* Uses all KeySet shapes except 6, for one n (KeySet size) ksPerN KeySets are used.
* The keyset shape 6 is excluded, because previous evaluation had show that the results with that keyset shape
* where unusable, due to the unnatural long key names.
* Each measurement done with one KeySet is repeated numberOfRepeats time and summarized with the median.
* For one n (KeySet size) the ksPerN results are also summarized with the median.
* The results are written out in the following format:
*
* n;search_1;search_2;...;search_(numberOfSearches)
*
* The number of needed seeds for this benchmarks is: (numberOfShapes - 1) * nCount * ksPerN * (1 + searchesCount )
*/
/**
* @brief Measures the OPMPHM search time, for searches random Keys, repeats the measurement numberOfRepeats time and returns the media.
*
* The OPMPHM build will be triggerd if KDB_OPMPHM is set!
*
* @param ks the KeySet
* @param searches the number of searches to make
* @param searchSeed the random seed used to determine the Keys to search
* @param option the options passed to the ksLookup (...)
* @param repeats array to store repeated measurements
* @param numberOfRepeats fields in repeats
*
* @retval median time
*/
static size_t benchmarkSearchTimeMeasure (KeySet * ks, size_t searches, int32_t searchSeed, elektraLookupFlags option, size_t * repeats,
size_t numberOfRepeats)
{
if (option & KDB_O_OPMPHM)
{
// trigger OPMPHM build if not build
if (!opmphmIsBuild (ks->opmphm))
{
// set seed to return by elektraRandGetInitSeed () in the lookup
elektraRandBenchmarkInitSeed = searchSeed;
(void) ksLookup (ks, ks->array[0], KDB_O_OPMPHM | KDB_O_NOCASCADING);
if (!opmphmIsBuild (ks->opmphm))
{
printExit ("trigger OPMPHM build");
}
}
}
for (size_t repeatsI = 0; repeatsI < numberOfRepeats; ++repeatsI)
{
// sanity checks
if (option & KDB_O_OPMPHM)
{
if (!opmphmIsBuild (ks->opmphm))
{
printExit ("Sanity Check Failed: OPMPHM not here");
}
}
else
{
if (ks->opmphm)
{
printExit ("Sanity Check Failed: OPMPHM here");
}
}
// preparation for measurement
struct timeval start;
struct timeval end;
Key * keyFound;
int32_t actualSearchSeed = searchSeed;
// START MEASUREMENT
__asm__("");
gettimeofday (&start, 0);
__asm__("");
for (size_t s = 1; s <= searches; ++s)
{
keyFound = ksLookup (ks, ks->array[actualSearchSeed % ks->size], option);
if (!keyFound || keyFound != ks->array[actualSearchSeed % ks->size])
{
printExit ("Sanity Check Failed: found wrong Key");
}
elektraRand (&actualSearchSeed);
}
__asm__("");
gettimeofday (&end, 0);
__asm__("");
// END MEASUREMENT
// sanity checks
if (option & KDB_O_OPMPHM)
{
if (!opmphmIsBuild (ks->opmphm))
{
printExit ("Sanity Check Failed: OPMPHM not here");
}
}
else
{
if (ks->opmphm)
{
printExit ("Sanity Check Failed: OPMPHM here");
}
}
// save result
repeats[repeatsI] = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_usec - start.tv_usec);
}
// sort repeats
qsort (repeats, numberOfRepeats, sizeof (size_t), cmpInteger);
return repeats[numberOfRepeats / 2]; // take median
}
/**
* @brief Common part of search time benchmarks, used by benchmarkOPMPHMSearchTime and benchmarkBinarySearchTime.
*
* @param outFileName the output file name
* @param option the option to pass to the ksLookup (...)
*/
static void benchmarkSearchTime (char * name, char * outFileName, elektraLookupFlags option)
{
const size_t startN = 50;
const size_t stepN = 500;
const size_t endN = 20000;
const size_t ksPerN = 3;
const size_t numberOfRepeats = 7;
const size_t startSearches = 500;
const size_t stepSearches = 500;
const size_t endSearches = 32000;
// check config
if (startN >= endN || startN == 0)
{
printExit ("startN >= endN || startN == 0");
}
if (numberOfRepeats % 2 == 0)
{
printExit ("numberOfRepeats is even");
}
if (ksPerN % 2 == 0)
{
printExit ("ksPerN is even");
}
// calculate counts
size_t nCount = 0;
for (size_t nI = startN; nI <= endN; nI += stepN)
{
++nCount;
}
size_t searchesCount = 0;
for (size_t searchesI = startSearches; searchesI <= endSearches; searchesI += stepSearches)
{
++searchesCount;
}
// memory allocation and initialization
// init results
size_t * results = elektraMalloc (nCount * searchesCount * sizeof (size_t));
if (!results)
{
printExit ("malloc");
}
// init repeats
size_t * repeats = elektraMalloc (numberOfRepeats * sizeof (size_t));
if (!repeats)
{
printExit ("malloc");
}
// init partialResult
size_t * partialResult = elektraMalloc (ksPerN * searchesCount * sizeof (size_t));
if (!partialResult)
{
printExit ("malloc");
}
// init KeySetStorage
KeySet ** keySetStorage = elektraMalloc (ksPerN * sizeof (KeySet *));
if (!keySetStorage)
{
printExit ("malloc");
}
// get KeySet shapes
KeySetShape * keySetShapes = getKeySetShapes ();
printf ("Run Benchmark %s:\n", name);
// for all KeySet shapes except 6
for (size_t shapeI = 0; shapeI < numberOfShapes; ++shapeI)
{
if (shapeI == 6)
{
continue;
}
KeySetShape * usedKeySetShape = &keySetShapes[shapeI];
// for all Ns
for (size_t nI = startN; nI <= endN; nI += stepN)
{
printf ("now at: shape = %zu/%zu n = %zu/%zu\r", shapeI + 1, numberOfShapes, nI, endN);
fflush (stdout);
// generate KeySets
int32_t genSeed;
for (size_t ksI = 0; ksI < ksPerN; ++ksI)
{
if (getRandomSeed (&genSeed) != &genSeed) printExit ("Seed Parsing Error or feed me more seeds");
keySetStorage[ksI] = generateKeySet (nI, &genSeed, usedKeySetShape);
}
// for all number of searches
for (size_t searchesI = startSearches; searchesI <= endSearches; searchesI += stepSearches)
{
int32_t searchSeed = 1;
if (getRandomSeed (&searchSeed) != &searchSeed) printExit ("Seed Parsing Error or feed me more seeds");
// for all KeySets in the storage
for (size_t ksI = 0; ksI < ksPerN; ++ksI)
{
// measure
size_t res = benchmarkSearchTimeMeasure (keySetStorage[ksI], searchesI, searchSeed, option, repeats,
numberOfRepeats);
// save partial result to summarize it with median
partialResult[((searchesI - startSearches) / stepSearches) * ksPerN + ksI] = res;
}
}
// sort partialResult and take median as final result
for (size_t searchesI = 0; searchesI < searchesCount; ++searchesI)
{
qsort (&partialResult[searchesI * ksPerN], ksPerN, sizeof (size_t), cmpInteger);
results[((nI - startN) / stepN) * searchesCount + searchesI] =
partialResult[searchesI * ksPerN + (ksPerN / 2)];
}
// free ks
for (size_t ksI = 0; ksI < ksPerN; ++ksI)
{
ksDel (keySetStorage[ksI]);
}
}
// write out
FILE * out = openOutFileWithRPartitePostfix (outFileName, shapeI);
if (!out)
{
printExit ("open out file");
}
// print header
fprintf (out, "n");
for (size_t searchesI = startSearches; searchesI <= endSearches; searchesI += stepSearches)
{
fprintf (out, ";search_%zu", searchesI);
}
fprintf (out, "\n");
// print data
for (size_t nI = startN; nI <= endN; nI += stepN)
{
fprintf (out, "%zu", nI);
for (size_t searchesI = startSearches; searchesI <= endSearches; searchesI += stepSearches)
{
fprintf (out, ";%zu",
results[((nI - startN) / stepN) * searchesCount + ((searchesI - startSearches) / stepSearches)]);
}
fprintf (out, "\n");
}
fclose (out);
}
printf ("\n");
elektraFree (repeats);
elektraFree (partialResult);
elektraFree (keySetStorage);
elektraFree (keySetShapes);
elektraFree (results);
}
void benchmarkOPMPHMSearchTime (char * name)
{
benchmarkSearchTime (name, "benchmark_opmphm_search_time", KDB_O_OPMPHM | KDB_O_NOCASCADING);
}
/**
* END ==================================================== OPMPHM Search Time ========================================================= END
*/
/**
* START ================================================= Binary search Time ======================================================== START
*
* This benchmark measures the time of the binary search.
* Uses all KeySet shapes except 6, for one n (KeySet size) ksPerN KeySets are used.
* The keyset shape 6 is excluded, because previous evaluation had show that the results with that keyset shape
* where unusable, due to the unnatural long key names.
* Each measurement done with one KeySet is repeated numberOfRepeats time and summarized with the median.
* For one n (KeySet size) the ksPerN results are also summarized with the median.
* The results are written out in the following format:
*
* n;search_1;search_2;...;search_(numberOfSearches)
*
* The number of needed seeds for this benchmarks is: (numberOfShapes - 1) * nCount * ksPerN * (1 + searchesCount )
*/
static void benchmarkBinarySearchTime (char * name)
{
benchmarkSearchTime (name, "benchmark_binary_search_time", KDB_O_NOCASCADING);
}
/**
* END =================================================== Binary search Time ========================================================== END
*/
/**
* START ================================================= hsearch Build Time ======================================================== START
*
* This benchmark measures the time of the hsearch build.
* For one n (KeySet size) ksPerN KeySets are used, with different loads.
* This benchmark has a 10 strike policy, when 10 time the measured time is over 10000 the next KeySet shape is handled.
* The results are written out in the following format:
*
* n;ks;load;time
*
* The number of needed seeds for this benchmarks is: (numberOfShapes - 1) * nCount * ksPerN
*/
// clang-format off
// format bug
#ifdef HAVE_HSEARCHR
// clang-format on
/**
* @brief Measures the hsearch build numberOfRepeats time and returns median
*
* @param ks the KeySet
* @param nI the KeySet size
* @param load the load
* @param repeats array to store repeated measurements
* @param numberOfRepeats fields in repeats
*
* @retval median time
*/
static size_t benchmarkHsearchBuildTimeMeasure (KeySet * ks, size_t nI, double load, size_t * repeats, size_t numberOfRepeats)
{
for (size_t repeatsI = 0; repeatsI < numberOfRepeats; ++repeatsI)
{
// preparation for measurement
struct timeval start;
struct timeval end;
Key * key;
ksRewind (ks);
ENTRY e;
ENTRY * ep;
// fresh htab
struct hsearch_data * htab = elektraCalloc (sizeof (struct hsearch_data));
if (!htab)
{
printExit ("calloc");
}
// START MEASUREMENT
__asm__("");
gettimeofday (&start, 0);
__asm__("");
if (!hcreate_r (nI / load, htab))
{
printExit ("hcreate_r");
}
while ((key = ksNext (ks)))
{
e.key = (char *) keyName (key);
e.data = key;
if (!hsearch_r (e, ENTER, &ep, htab))
{
printExit ("hsearch_r");
}
}
__asm__("");
gettimeofday (&end, 0);
__asm__("");
// END MEASUREMENT
// save result
repeats[repeatsI] = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_usec - start.tv_usec);
// sanity check
ksRewind (ks);
while ((key = ksNext (ks)))
{
e.key = (char *) keyName (key);
if (!hsearch_r (e, FIND, &ep, htab))
{
printExit ("Sanity Check Failed: hsearch can not find element");
}
}
hdestroy_r (htab);
elektraFree (htab);
}
// sort repeats
qsort (repeats, numberOfRepeats, sizeof (size_t), cmpInteger);
return repeats[numberOfRepeats / 2]; // take median
}
void benchmarkHsearchBuildTime (char * name)
{
const size_t startN = 50;
const size_t stepN = 500;
const size_t endN = 20000;
const size_t ksPerN = 5;
const size_t numberOfRepeats = 7;
const size_t maxStrikes = 10;
const size_t strikeLimit = 10000;
const size_t numberOfLoads = 4;
double * loads = malloc (sizeof (double) * numberOfLoads);
if (!loads)
{
printExit ("malloc");
}
loads[0] = 1;
loads[1] = 0.75;
loads[2] = 0.5;
loads[3] = 0.25;
// check config
if (startN >= endN || startN == 0)
{
printExit ("startN >= endN || startN == 0");
}
if (numberOfRepeats % 2 == 0)
{
printExit ("numberOfRepeats is even");
}
if (ksPerN % 2 == 0)
{
printExit ("ksPerN is even");
}
// calculate counts
size_t nCount = 0;
for (size_t nI = startN; nI <= endN; nI += stepN)
{
++nCount;
}
// memory allocation and initialization
// init results
size_t * results = elektraMalloc (nCount * ksPerN * numberOfLoads * sizeof (size_t));
if (!results)
{
printExit ("malloc");
}
// init repeats
size_t * repeats = elektraMalloc (numberOfRepeats * sizeof (size_t));
if (!repeats)
{
printExit ("malloc");
}
// init KeySetStorage
KeySet ** keySetStorage = elektraMalloc (ksPerN * sizeof (KeySet *));
if (!keySetStorage)
{
printExit ("malloc");
}
// get KeySet shapes
KeySetShape * keySetShapes = getKeySetShapes ();
printf ("Run Benchmark %s:\n", name);
// for all KeySet shapes except 6
for (size_t shapeI = 0; shapeI < numberOfShapes; ++shapeI)
{
if (shapeI == 6)
{
continue;
}
KeySetShape * usedKeySetShape = &keySetShapes[shapeI];
size_t strikes = 0;
// for all Ns
for (size_t nI = startN; nI <= endN; nI += stepN)
{
printf ("now at: shape = %zu/%zu n = %zu/%zu\r", shapeI + 1, numberOfShapes, nI, endN);
fflush (stdout);
// generate KeySets
int32_t genSeed;
for (size_t ksI = 0; ksI < ksPerN; ++ksI)
{
if (getRandomSeed (&genSeed) != &genSeed) printExit ("Seed Parsing Error or feed me more seeds");
keySetStorage[ksI] = generateKeySet (nI, &genSeed, usedKeySetShape);
}
// for all loads
for (size_t loadI = 0; loadI < numberOfLoads; ++loadI)
{
// for all KeySets in the storage
for (size_t ksI = 0; ksI < ksPerN; ++ksI)
{
// measure
size_t res = benchmarkHsearchBuildTimeMeasure (keySetStorage[ksI], nI, loads[loadI], repeats,
numberOfRepeats);
// strike policy
if (res > strikeLimit)
{
++strikes;
if (strikes >= maxStrikes)
{
ksI = ksPerN;
loadI = numberOfLoads;
nI = endN + 1;
printf ("shape %zu is out!\n", shapeI);
}
}
else
{
strikes = 0;
// save only non strike values
results[((nI - startN) / stepN) * ksPerN * numberOfLoads + ksI * numberOfLoads + loadI] =
res;
}
}
}
// free ks
for (size_t ksI = 0; ksI < ksPerN; ++ksI)
{
ksDel (keySetStorage[ksI]);
}
}
// write out
FILE * out = openOutFileWithRPartitePostfix ("benchmark_hsearch_build_time", shapeI);
if (!out)
{
printExit ("open out file");
}
// print header
fprintf (out, "n;ks;load;time\n");
// print data
for (size_t nI = startN; nI <= endN; nI += stepN)
{
for (size_t ksI = 0; ksI < ksPerN; ++ksI)
{
for (size_t loadI = 0; loadI < numberOfLoads; ++loadI)
{
fprintf (out, "%zu;%zu;%f;%zu\n", nI, ksI, loads[loadI],
results[((nI - startN) / stepN) * ksPerN * numberOfLoads + ksI * numberOfLoads + loadI]);
}
}
}
fclose (out);
}
printf ("\n");
elektraFree (repeats);
elektraFree (keySetStorage);
elektraFree (loads);
elektraFree (keySetShapes);
elektraFree (results);
}
#endif
/**
* END =================================================== hsearch Build Time ========================================================== END
*/
/**
* START =================================================== Prediction Time ========================================================= START
*
* This benchmark measures the time from `numberOfSequences` lookup sequences, with the modified branch predictor and the binary search.
* All KeySet shapes except 6 where used.
* The keyset shape 6 is excluded, because previous evaluation had show that the results with that keyset shape
* where unusable, due to the unnatural long key names.
* For all `n` `patternsPerN` lookup patterns are created. With a length of `numberOfSequences`.
* The KeySet shapes rotate through the lookup patterns.
* Two entries of the pattern entries use one seed (31 bit), this works because max n is 10000.
* log_2 (opmphmPredictorWorthOpmphm(10000)) * 2) < 15 bit
* log_2 (15000) * 2) < 15 bit
*
* n;predictiontime;binarysearchtime
*
* The number of needed seeds for this benchmarks is: nCount * patternsPerN * ( numberOfSequences/2 + 1 + numberOfSequences )
*/
static void benchmarkPredictionTime (char * name)
{
const size_t numberOfRepeats = 5;
const size_t numberOfSequences = 66;
const size_t patternsPerN = 999;
// create the n array
const size_t nCount = 35;
size_t * n = elektraMalloc (nCount * sizeof (size_t));
if (!n)
{
printExit ("malloc");
}
size_t controlCount = 0;
for (size_t i = 100; i < 1000; i += 100)
{
n[controlCount] = i;
++controlCount;
}
for (size_t i = 1000; i < 5000; i += 200)
{
n[controlCount] = i;
++controlCount;
}
for (size_t i = 5000; i <= 10000; i += 1000)
{
n[controlCount] = i;
++controlCount;
}
// check config
if (controlCount != nCount)
{
printExit ("controlCount != nCount");
}
if (numberOfRepeats % 2 == 0)
{
printExit ("numberOfRepeats is even");
}
if (patternsPerN % (numberOfShapes - 1) == 0)
{
printExit ("not all shapes used equally");
}
// memory allocation and initialization
// init results
size_t * results = elektraMalloc (nCount * patternsPerN * 2 * sizeof (size_t)); // 2 prediction and binary search
if (!results)
{
printExit ("malloc");
}
// init repeats
size_t * repeats = elektraMalloc (numberOfRepeats * sizeof (size_t));
if (!repeats)
{
printExit ("malloc");
}
// init seeds
int32_t * seeds = elektraMalloc (numberOfSequences * sizeof (int32_t));
if (!seeds)
{
printExit ("malloc");
}
// init pattern
size_t * pattern = elektraMalloc (numberOfSequences * sizeof (size_t));
if (!pattern)
{
printExit ("malloc");
}
// get KeySet shapes
KeySetShape * keySetShapes = getKeySetShapes ();
printf ("Run Benchmark %s:\n", name);
// for all n
for (size_t nI = 0; nI < nCount; ++nI)
{
// for all pattern per n
for (size_t pI = 0; pI < patternsPerN; ++pI)
{
printf ("now at: n = %zu/%zu pattern = %zu/%zu \r", nI + 1, nCount, pI + 1, patternsPerN);
fflush (stdout);
// create pattern, always two entries with one seed
for (size_t s = 0; s < numberOfSequences; s += 2)
{
int32_t genSeed = 0;
if (getRandomSeed (&genSeed) != &genSeed) printExit ("Seed Parsing Error or feed me more seeds");
// 15 bit each of the 31 bit seed
size_t sequnceLength1 = (genSeed >> 15) & 0x7FFF;
size_t sequnceLength0 = genSeed & 0x7FFF;
sequnceLength1 = sequnceLength1 % (opmphmPredictorWorthOpmphm (n[nI]) * 2 - 1);
sequnceLength0 = sequnceLength0 % (opmphmPredictorWorthOpmphm (n[nI]) * 2 - 1);
pattern[s + 1] = sequnceLength1 + 1;
pattern[s] = sequnceLength0 + 1;
}
// rotate through all KeySet shapes, except 6
size_t shapeI = patternsPerN % (numberOfShapes - 1);
if (shapeI == 6)
{
++shapeI;
}
KeySetShape * usedKeySetShape = &keySetShapes[shapeI];
// generate KeySet
int32_t genSeed = 0;
if (getRandomSeed (&genSeed) != &genSeed) printExit ("Seed Parsing Error or feed me more seeds");
KeySet * ks = generateKeySet (n[nI], &genSeed, usedKeySetShape);
// get seeds for OPMPHM
for (size_t s = 0; s < numberOfSequences; ++s)
{
if (getRandomSeed (&seeds[s]) != &seeds[s]) printExit ("Seed Parsing Error or feed me more seeds");
}
size_t resultPredition;
size_t resultBinarySearch;
// benchmark prediction
// repeat measurement numberOfRepeats time
for (size_t repeatsI = 0; repeatsI < numberOfRepeats; ++repeatsI)
{
// preparation for measurement
struct timeval start;
struct timeval end;
Key * keyFound;
// START MEASUREMENT
__asm__("");
gettimeofday (&start, 0);
__asm__("");
// for all sequences
for (size_t s = 0; s < numberOfSequences; ++s)
{
// seed used for key to lookup and OPMPHM
int32_t searchHashSeed = seeds[s];
// set seed to return by elektraRandGetInitSeed () in the lookup, in case of hashing
elektraRandBenchmarkInitSeed = searchHashSeed;
// do the lookups
for (size_t lookups = 0; lookups < pattern[s]; ++lookups)
{
keyFound = ksLookup (ks, ks->array[searchHashSeed % ks->size], KDB_O_NOCASCADING);
if (!keyFound || keyFound != ks->array[searchHashSeed % ks->size])
{
printExit ("Sanity Check Failed: found wrong Key");
}
elektraRand (&searchHashSeed);
}
if (!ks->opmphmPredictor)
{
printExit ("Sanity Check Failed: no predictor used");
}
// simulate data change
ks->flags |= KS_FLAG_NAME_CHANGE;
if (ks->opmphm) opmphmClear (ks->opmphm);
}
__asm__("");
gettimeofday (&end, 0);
__asm__("");
// END MEASUREMENT
// save result
repeats[repeatsI] = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_usec - start.tv_usec);
}
// sort repeats
qsort (repeats, numberOfRepeats, sizeof (size_t), cmpInteger);
resultPredition = repeats[numberOfRepeats / 2];
// benchmark binary search
// repeat measurement numberOfRepeats time
for (size_t repeatsI = 0; repeatsI < numberOfRepeats; ++repeatsI)
{
// preparation for measurement
struct timeval start;
struct timeval end;
Key * keyFound;
// START MEASUREMENT
__asm__("");
gettimeofday (&start, 0);
__asm__("");
// for all sequences
for (size_t s = 0; s < numberOfSequences; ++s)
{
// seed used for key to lookup and OPMPHM
int32_t searchHashSeed = seeds[s];
// do the lookups
for (size_t lookups = 0; lookups < pattern[s]; ++lookups)
{
keyFound = ksLookup (ks, ks->array[searchHashSeed % ks->size],
KDB_O_NOCASCADING | KDB_O_BINSEARCH);
if (!keyFound || keyFound != ks->array[searchHashSeed % ks->size])
{
printExit ("Sanity Check Failed: found wrong Key");
}
elektraRand (&searchHashSeed);
}
}
__asm__("");
gettimeofday (&end, 0);
__asm__("");
// END MEASUREMENT
// save result
repeats[repeatsI] = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_usec - start.tv_usec);
}
// sort repeats
qsort (repeats, numberOfRepeats, sizeof (size_t), cmpInteger);
resultBinarySearch = repeats[numberOfRepeats / 2];
results[nI * patternsPerN * 2 + pI * 2] = resultPredition;
results[nI * patternsPerN * 2 + pI * 2 + 1] = resultBinarySearch;
ksDel (ks);
}
}
printf ("\n");
// write out
FILE * out = openOutFileWithRPartitePostfix ("benchmark_prediction_time", opmphmPredictorHistoryMask >> 4); // shift 16 to 8 bit
if (!out)
{
printExit ("open out file");
}
// print header
fprintf (out, "n;predictiontime;binarysearchtime\n");
for (size_t nI = 0; nI < nCount; ++nI)
{
for (size_t pI = 0; pI < patternsPerN; ++pI)
{
size_t predictiontime = results[nI * patternsPerN * 2 + pI * 2];
size_t binarysearchtime = results[nI * patternsPerN * 2 + pI * 2 + 1];
fprintf (out, "%zu;%zu;%zu\n", n[nI], predictiontime, binarysearchtime);
}
}
fclose (out);
elektraFree (n);
elektraFree (keySetShapes);
elektraFree (results);
elektraFree (repeats);
elektraFree (pattern);
elektraFree (seeds);
}
/**
* END ===================================================== Prediction Time =========================================================== END
*/
/**
* START ================================================= Prints all KeySetShapes =================================================== START
*/
static void benchmarkPrintAllKeySetShapes (char * name)
{
printf ("%s\n", name);
const size_t n = 30;
int32_t seed = 47658589;
KeySetShape * keySetShapes = getKeySetShapes ();
for (size_t shapeId = 0; shapeId < numberOfShapes; ++shapeId)
{
int32_t s = seed;
//~ timeInit ();
KeySet * ks = generateKeySet (n, &s, &keySetShapes[shapeId]);
//~ timePrint ("generateKeySet:");
// print KS
if (1)
{
printf (" ======================= shapeId %zu =======================\n\n", shapeId);
Key * key;
ksRewind (ks);
while ((key = ksNext (ks)))
{
printf ("%s\n", keyName (key));
}
printf ("\n ======================== size %zd ========================\n\n", ksGetSize (ks));
}
ksDel (ks);
}
elektraFree (keySetShapes);
}
/**
* END =================================================== Prints all KeySetShapes ===================================================== END
*/
int main (int argc, char ** argv)
{
// define all benchmarks
size_t benchmarksCount = 9;
#ifdef HAVE_HSEARCHR
// hsearchbuildtime
++benchmarksCount;
#endif
Benchmark * benchmarks = elektraMalloc (benchmarksCount * sizeof (Benchmark));
if (!benchmarks)
{
printExit ("malloc");
}
// hashfunctiontime
char * benchmarkNameHashFunctionTime = "hashfunctiontime";
benchmarks[0].name = benchmarkNameHashFunctionTime;
benchmarks[0].benchmarkF = benchmarkHashFunctionTime;
benchmarks[0].numberOfSeedsNeeded = 32;
// mapping
char * benchmarkNameMapping = "mapping";
benchmarks[1].name = benchmarkNameMapping;
benchmarks[1].benchmarkF = benchmarkMapping;
benchmarks[1].numberOfSeedsNeeded = 12400;
// mapping_opt
char * benchmarkNameMappingOpt = "mapping_opt";
benchmarks[2].name = benchmarkNameMappingOpt;
benchmarks[2].benchmarkF = benchmarkMappingOpt;
benchmarks[2].numberOfSeedsNeeded = 93920;
// mapping_allseeds
char * benchmarkNameMappingAllSeeds = "mapping_allseeds";
benchmarks[3].name = benchmarkNameMappingAllSeeds;
benchmarks[3].benchmarkF = benchmarkMappingAllSeeds;
benchmarks[3].numberOfSeedsNeeded = 7;
// printallkeysetshapes
char * benchmarkNamePrintAllKeySetShapes = "printallkeysetshapes";
benchmarks[4].name = benchmarkNamePrintAllKeySetShapes;
benchmarks[4].benchmarkF = benchmarkPrintAllKeySetShapes;
benchmarks[4].numberOfSeedsNeeded = 0;
// opmphmbuildtime
char * benchmarkNameOpmphmBuildTime = "opmphmbuildtime";
benchmarks[5].name = benchmarkNameOpmphmBuildTime;
benchmarks[5].benchmarkF = benchmarkOPMPHMBuildTime;
benchmarks[5].numberOfSeedsNeeded = 1757;
// opmphmsearchtime
char * benchmarkNameOpmphmSearchTime = "opmphmsearchtime";
benchmarks[6].name = benchmarkNameOpmphmSearchTime;
benchmarks[6].benchmarkF = benchmarkOPMPHMSearchTime;
benchmarks[6].numberOfSeedsNeeded = 54600;
// binarysearchtime
char * benchmarkNameBinarySearchTime = "binarysearchtime";
benchmarks[7].name = benchmarkNameBinarySearchTime;
benchmarks[7].benchmarkF = benchmarkBinarySearchTime;
benchmarks[7].numberOfSeedsNeeded = 54600;
// predictiontime
char * benchmarkNamePredictionTime = "predictiontime";
benchmarks[8].name = benchmarkNamePredictionTime;
benchmarks[8].benchmarkF = benchmarkPredictionTime;
benchmarks[8].numberOfSeedsNeeded = 3496500;
#ifdef HAVE_HSEARCHR
// hsearchbuildtime
char * benchmarkNameHsearchBuildTime = "hsearchbuildtime";
benchmarks[benchmarksCount - 1].name = benchmarkNameHsearchBuildTime;
benchmarks[benchmarksCount - 1].benchmarkF = benchmarkHsearchBuildTime;
benchmarks[benchmarksCount - 1].numberOfSeedsNeeded = 1400;
#endif
// run benchmark
if (argc == 1)
{
fprintf (stderr, "Usage: cat <fileWithSeeds> | %s <benchmark>\n", argv[0]);
fprintf (stderr, "\nUse the generate-seeds script to generate <fileWithSeeds>, number of seeds according to:\n\n");
fprintf (stderr, "%-20s %10s\n", "<benchmark>", "seeds");
for (size_t i = 0; i < benchmarksCount; ++i)
{
fprintf (stderr, "%-20s %10zu\n", benchmarks[i].name, benchmarks[i].numberOfSeedsNeeded);
}
elektraFree (benchmarks);
return EXIT_FAILURE;
}
for (size_t i = 0; i < benchmarksCount; ++i)
{
if (!strncmp (benchmarks[i].name, argv[1], strlen (argv[1])))
{
benchmarks[i].benchmarkF (benchmarks[i].name);
elektraFree (benchmarks);
return EXIT_SUCCESS;
}
}
fprintf (stderr, "Error: %s is not a benchmark\n", argv[1]);
fprintf (stderr, "Available benchmarks:\n");
for (size_t i = 0; i < benchmarksCount; ++i)
{
fprintf (stderr, "* %s\n", benchmarks[i].name);
}
elektraFree (benchmarks);
return EXIT_FAILURE;
}
/**
* Benchmark helpers
*/
/**
* @brief Read a seed from STDIN.
*
* @param seed storage for the read in seed
*
* @retval int32_t * on success
* @retval NULL on read or parse error
*/
static int32_t * getRandomSeed (int32_t * seed)
{
// read from stdin
char data[10 + 2]; // min = 0, max = 2^32 - 1, len(2^32 - 1) = 10 + '\n' + '\0'
if (fgets (data, 12, stdin) != data)
{
return NULL;
}
// eliminate newline
char * c;
for (c = data; *c != '\n'; ++c)
;
*c = '\0';
// prevent empty lines
if (strlen (data) == 0)
{
return NULL;
}
// convert to int
char * pEnd;
*seed = strtol (data, &pEnd, 10);
if (*pEnd != '\0')
{
return NULL;
}
return seed;
}
/**
* @brief Opens file with OPMPHMR_PARTITE postfix.
*
* supports OPMPHMTUPLE < 100
*
* @param name name of the file
*
* @retval FILE * on success
* @retval NULL on error
*/
static FILE * openOutFileWithRPartitePostfix (const char * name, uint8_t r)
{
const char * const format = "%u.csv";
char formatData[strlen (name) + strlen (format) + 1];
char filename[strlen (name) + strlen (format) + 1];
strcpy (formatData, name);
strcpy (&formatData[strlen (name)], format);
sprintf (filename, formatData, r);
FILE * out = fopen (filename, "w");
if (!out)
{
return NULL;
}
return out;
}
static const char * getString (void * data)
{
return keyName ((Key *) data);
}
/**
* @brief Power function.
*
* @param p basis
* @param q exponent
*
* @retval size_t p^q
*/
static size_t getPower (size_t p, size_t q)
{
size_t result = 1;
for (size_t t = 0; t < q; ++t)
{
result *= p;
}
return result;
}
/**
* @brief comparison between integers suitable as qsort callback.
*
* @param a first integer
* @param b second integer
*
*/
static int cmpInteger (const void * a, const void * b)
{
if (*(size_t *) a < *(size_t *) b)
{
return -1;
}
else if (*(size_t *) a > *(size_t *) b)
{
return 1;
}
else
{
return 0;
}
}
/**
* The Key Set shapes
*/
/**
* every key name is unique and goes 1 level deep
*/
static void shapefConstBinary (const size_t initSize ELEKTRA_UNUSED, size_t size ELEKTRA_UNUSED, size_t level ELEKTRA_UNUSED,
int32_t * seed ELEKTRA_UNUSED, KsShapeFunctionReturn * ret, void * data ELEKTRA_UNUSED)
{
ret->label = 0;
ret->subKeys = 0;
}
/**
* binary tree
*/
static void shapefBinaryBranch (const size_t initSize, size_t size ELEKTRA_UNUSED, size_t level, int32_t * seed ELEKTRA_UNUSED,
KsShapeFunctionReturn * ret, void * data ELEKTRA_UNUSED)
{
size_t subKeys = 2;
ret->label = 0;
if (getPower (subKeys, level) > initSize)
{
ret->subKeys = 0;
}
else
{
ret->subKeys = subKeys;
}
}
/**
* every parent has n/branchfactor children
*/
static void shapefDynamicBranch (const size_t initSize, size_t size ELEKTRA_UNUSED, size_t level, int32_t * seed ELEKTRA_UNUSED,
KsShapeFunctionReturn * ret, void * data ELEKTRA_UNUSED)
{
size_t branchRatio = 9;
ret->label = 0;
size_t subKeys = (initSize / branchRatio);
if (subKeys < 2)
{
subKeys = 2;
}
if (getPower (subKeys, level) > initSize)
{
ret->subKeys = 0;
}
else
{
ret->subKeys = subKeys;
}
}
/**
* all key names have a common start, startLevel length
*/
static void shapefLateDynamicBranch (const size_t initSize, size_t size ELEKTRA_UNUSED, size_t level, int32_t * seed ELEKTRA_UNUSED,
KsShapeFunctionReturn * ret, void * data ELEKTRA_UNUSED)
{
size_t startLevel = 5;
size_t branchRatio = 9;
ret->label = 0;
if (level < startLevel)
{
ret->subKeys = 1;
return;
}
level -= startLevel;
size_t subKeys = (initSize / branchRatio);
if (subKeys < 2)
{
subKeys = 2;
}
if (getPower (subKeys, level) > initSize)
{
ret->subKeys = 0;
}
else
{
ret->subKeys = subKeys;
}
}
/**
* all key names have a common start and end
*/
static void * shapeCommonStartEndInit (void)
{
uint8_t * data = elektraMalloc (sizeof (uint8_t));
if (!data)
{
return NULL;
}
*data = 0;
return data;
}
static void shapeCommonStartEndDel (void * data)
{
elektraFree (data);
}
static void shapefCommonStartEnd (const size_t initSize ELEKTRA_UNUSED, size_t size, size_t level, int32_t * seed ELEKTRA_UNUSED,
KsShapeFunctionReturn * ret, void * data)
{
size_t notCommonLevel = 4;
size_t maxLevel = 10;
if (level < notCommonLevel)
{
// creates common start
ret->subKeys = 1;
ret->label = 0;
}
else if (notCommonLevel == level)
{
// creates level with different names
ret->subKeys = size + 1;
ret->label = 0;
}
else if (level > notCommonLevel)
{
uint8_t * isLabelSet = data;
if (!*isLabelSet)
{
// creates common end
if (level == notCommonLevel + 1)
{
// set label
ret->label = 1;
ret->subKeys = 1;
}
else if (level == maxLevel)
{
// end of deep key
ret->label = 0;
ret->subKeys = 0;
*isLabelSet = 1;
}
else
{
// create deep key
ret->label = 0;
ret->subKeys = 1;
}
}
else
{
// use common end
ret->subKeys = -1;
ret->label = 1;
}
}
}
/**
* modules, level 1 keys same, one level 2 key stores the modules. Like system:/elektra.
*/
static void * shapeModulesInit (void)
{
// three boolean flags if the respective label where set, the fourth counts from 1 to 3 for label assignment
void * data = elektraMalloc (4 * sizeof (uint8_t));
if (!data)
{
return NULL;
}
uint8_t * d = data;
d[0] = 0;
d[1] = 0;
d[2] = 0;
d[3] = 1;
return data;
}
static void shapeModulesDel (void * data)
{
elektraFree (data);
}
static void shapefModules (const size_t initSize, size_t size ELEKTRA_UNUSED, size_t level, int32_t * seed ELEKTRA_UNUSED,
KsShapeFunctionReturn * ret, void * data)
{
// label 1 5 subKeys
// label 2 10 subKeys
// label 3 20 subKeys
ssize_t modulesKeys[3] = { 5, 10, 15 };
uint8_t * d = data;
uint8_t * firstSet = &d[0];
uint8_t * secondSet = &d[1];
uint8_t * thirdSet = &d[2];
uint8_t * assign = &d[3];
if (level == 1)
{
// common start, simulates elektra in system:/elektra
ret->subKeys = 1;
ret->label = 0;
}
else if (level == 2)
{
// common name, simulates modules in system:/elektra/modules
// calculates how many modules have space
ret->subKeys = 0;
ssize_t remainingSize = initSize;
uint8_t isSpace = 1;
uint8_t l = 0;
while (isSpace)
{
if (remainingSize - modulesKeys[l] < 0)
{
isSpace = 0;
}
else
{
remainingSize -= modulesKeys[l];
l = (l + 1) % 3;
++ret->subKeys;
}
}
// add solo keys
ret->subKeys += remainingSize;
ret->label = 0;
}
else if (level == 3)
{
// give each modules ret->subKeys * 5 subKeys
if (!*firstSet)
{
ret->subKeys = 1;
ret->label = 1;
*firstSet = 1;
}
else if (!*secondSet)
{
ret->subKeys = 2;
ret->label = 2;
*secondSet = 1;
}
else if (!*thirdSet)
{
ret->subKeys = 3;
ret->label = 3;
*thirdSet = 1;
}
else
{
// assign
ret->subKeys = -1;
ret->label = *assign;
*assign = (*assign % 3) + 1;
}
}
else if (level == 4)
{
// the 5 in ret->subKeys * 5
ret->subKeys = 5;
ret->label = 0;
}
else
{
// terminate keys
ret->subKeys = 0;
ret->label = 0;
}
}
/**
* always wider, subKeys are incremented by one every level
*/
static void shapefWide (const size_t initSize, size_t size ELEKTRA_UNUSED, size_t level, int32_t * seed ELEKTRA_UNUSED,
KsShapeFunctionReturn * ret, void * data ELEKTRA_UNUSED)
{
ret->label = 0;
size_t startSubKeys = 2;
// determine to which level it is possible to go
size_t l = 0; // level 0 should have 2 subs
size_t keysOnLevel = startSubKeys;
while (keysOnLevel <= initSize)
{
++l;
keysOnLevel *= startSubKeys + l;
}
if (level < l)
{
ret->subKeys = startSubKeys + level;
}
else
{
ret->subKeys = 0;
}
}
/**
* always tighter, subKeys are decrementing by one every level till two is reached
*/
static void shapefTight (const size_t initSize, size_t size ELEKTRA_UNUSED, size_t level, int32_t * seed ELEKTRA_UNUSED,
KsShapeFunctionReturn * ret, void * data ELEKTRA_UNUSED)
{
ret->label = 0;
size_t startSubKeys = 2;
// determine to which level it is possible to go
size_t l = 0; // level 0 should have 2 subs
size_t keysOnLevel = startSubKeys;
while (keysOnLevel <= initSize)
{
++l;
keysOnLevel *= startSubKeys + l;
}
if (level < l)
{
ret->subKeys = startSubKeys + l - level - 1;
}
else
{
ret->subKeys = 0;
}
}
/**
* @brief Set the shape functions and parameters together to get the KeySetShape population.
*
* @retval KeySetShape * on success
*/
static KeySetShape * getKeySetShapes (void)
{
KeySetShape * out = elektraMalloc (sizeof (KeySetShape) * numberOfShapes);
if (!out) printExit ("malloc KeySetShapes");
size_t shapeCount = 0;
// shapefConstBinary
out[shapeCount].minWordLength = 1;
out[shapeCount].maxWordLength = 21;
out[shapeCount].special = 127;
out[shapeCount].parent = 0;
out[shapeCount].shapeInit = NULL;
out[shapeCount].shapef = shapefConstBinary;
out[shapeCount].shapeDel = NULL;
++shapeCount;
// shapefBinaryBranch
out[shapeCount].minWordLength = 1;
out[shapeCount].maxWordLength = 1;
out[shapeCount].special = 50;
out[shapeCount].parent = 7;
out[shapeCount].shapeInit = NULL;
out[shapeCount].shapef = shapefBinaryBranch;
out[shapeCount].shapeDel = NULL;
++shapeCount;
// shapefDynamicBranch
out[shapeCount].minWordLength = 1;
out[shapeCount].maxWordLength = 11;
out[shapeCount].special = 50;
out[shapeCount].parent = 7;
out[shapeCount].shapeInit = NULL;
out[shapeCount].shapef = shapefDynamicBranch;
out[shapeCount].shapeDel = NULL;
++shapeCount;
// shapefLateDynamicBranch
out[shapeCount].minWordLength = 1;
out[shapeCount].maxWordLength = 11;
out[shapeCount].special = 50;
out[shapeCount].parent = 7;
out[shapeCount].shapeInit = NULL;
out[shapeCount].shapef = shapefLateDynamicBranch;
out[shapeCount].shapeDel = NULL;
++shapeCount;
// shapefWide
out[shapeCount].minWordLength = 1;
out[shapeCount].maxWordLength = 11;
out[shapeCount].special = 50;
out[shapeCount].parent = 7;
out[shapeCount].shapeInit = NULL;
out[shapeCount].shapef = shapefWide;
out[shapeCount].shapeDel = NULL;
++shapeCount;
// shapefTight
out[shapeCount].minWordLength = 1;
out[shapeCount].maxWordLength = 11;
out[shapeCount].special = 50;
out[shapeCount].parent = 7;
out[shapeCount].shapeInit = NULL;
out[shapeCount].shapef = shapefTight;
out[shapeCount].shapeDel = NULL;
++shapeCount;
// shapefCommonStartEnd
out[shapeCount].minWordLength = 1;
out[shapeCount].maxWordLength = 21;
out[shapeCount].special = 50;
out[shapeCount].parent = 0;
out[shapeCount].shapeInit = shapeCommonStartEndInit;
out[shapeCount].shapef = shapefCommonStartEnd;
out[shapeCount].shapeDel = shapeCommonStartEndDel;
++shapeCount;
// shapefModules
out[shapeCount].minWordLength = 1;
out[shapeCount].maxWordLength = 11;
out[shapeCount].special = 50;
out[shapeCount].parent = 7;
out[shapeCount].shapeInit = shapeModulesInit;
out[shapeCount].shapef = shapefModules;
out[shapeCount].shapeDel = shapeModulesDel;
++shapeCount;
if (shapeCount != numberOfShapes) printExit ("shapeCount != numberOfShapes");
return out;
}
|
utils.c | #define _GNU_SOURCE
#include "utils.h"
#include <math.h>
#include <signal.h>
#include <stdlib.h>
#ifdef HAVE_GETTIMEOFDAY
#include <sys/time.h>
#else
#include <time.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_SYS_MMAN_H
#include <sys/mman.h>
#endif
#ifdef HAVE_FENV_H
#include <fenv.h>
#endif
#ifdef HAVE_LIBPNG
#include <png.h>
#endif
/* Random number generator state
*/
prng_t prng_state_data;
prng_t *prng_state;
/*----------------------------------------------------------------------------*\
* CRC-32 version 2.0.0 by Craig Bruce, 2006-04-29.
*
* This program generates the CRC-32 values for the files named in the
* command-line arguments. These are the same CRC-32 values used by GZIP,
* PKZIP, and ZMODEM. The Crc32_ComputeBuf () can also be detached and
* used independently.
*
* THIS PROGRAM IS PUBLIC-DOMAIN SOFTWARE.
*
* Based on the byte-oriented implementation "File Verification Using CRC"
* by Mark R. Nelson in Dr. Dobb's Journal, May 1992, pp. 64-67.
*
* v1.0.0: original release.
* v1.0.1: fixed printf formats.
* v1.0.2: fixed something else.
* v1.0.3: replaced CRC constant table by generator function.
* v1.0.4: reformatted code, made ANSI C. 1994-12-05.
* v2.0.0: rewrote to use memory buffer & static table, 2006-04-29.
\*----------------------------------------------------------------------------*/
/*----------------------------------------------------------------------------*\
* NAME:
* Crc32_ComputeBuf () - computes the CRC-32 value of a memory buffer
* DESCRIPTION:
* Computes or accumulates the CRC-32 value for a memory buffer.
* The 'inCrc32' gives a previously accumulated CRC-32 value to allow
* a CRC to be generated for multiple sequential buffer-fuls of data.
* The 'inCrc32' for the first buffer must be zero.
* ARGUMENTS:
* inCrc32 - accumulated CRC-32 value, must be 0 on first call
* buf - buffer to compute CRC-32 value for
* bufLen - number of bytes in buffer
* RETURNS:
* crc32 - computed CRC-32 value
* ERRORS:
* (no errors are possible)
\*----------------------------------------------------------------------------*/
uint32_t
compute_crc32 (uint32_t in_crc32,
const void *buf,
size_t buf_len)
{
static const uint32_t crc_table[256] = {
0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F,
0xE963A535, 0x9E6495A3, 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, 0x1DB71064, 0x6AB020F2,
0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9,
0xFA0F3D63, 0x8D080DF5, 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, 0x35B5A8FA, 0x42B2986C,
0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423,
0xCFBA9599, 0xB8BDA50F, 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, 0x76DC4190, 0x01DB7106,
0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D,
0x91646C97, 0xE6635C01, 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, 0x65B0D9C6, 0x12B7E950,
0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7,
0xA4D1C46D, 0xD3D6F4FB, 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, 0x5005713C, 0x270241AA,
0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81,
0xB7BD5C3B, 0xC0BA6CAD, 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, 0xE3630B12, 0x94643B84,
0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB,
0x196C3671, 0x6E6B06E7, 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, 0xD6D6A3E8, 0xA1D1937E,
0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55,
0x316E8EEF, 0x4669BE79, 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, 0xC5BA3BBE, 0xB2BD0B28,
0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F,
0x72076785, 0x05005713, 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, 0x86D3D2D4, 0xF1D4E242,
0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69,
0x616BFFD3, 0x166CCF45, 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, 0xAED16A4A, 0xD9D65ADC,
0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693,
0x54DE5729, 0x23D967BF, 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
};
uint32_t crc32;
unsigned char * byte_buf;
size_t i;
/* accumulate crc32 for buffer */
crc32 = in_crc32 ^ 0xFFFFFFFF;
byte_buf = (unsigned char*) buf;
for (i = 0; i < buf_len; i++)
crc32 = (crc32 >> 8) ^ crc_table[(crc32 ^ byte_buf[i]) & 0xFF];
return (crc32 ^ 0xFFFFFFFF);
}
static uint32_t
compute_crc32_for_image_internal (uint32_t crc32,
pixman_image_t *img,
pixman_bool_t remove_alpha,
pixman_bool_t remove_rgb)
{
pixman_format_code_t fmt = pixman_image_get_format (img);
uint32_t *data = pixman_image_get_data (img);
int stride = pixman_image_get_stride (img);
int height = pixman_image_get_height (img);
uint32_t mask = 0xffffffff;
int i;
if (stride < 0)
{
data += (stride / 4) * (height - 1);
stride = - stride;
}
/* mask unused 'x' part */
if (PIXMAN_FORMAT_BPP (fmt) - PIXMAN_FORMAT_DEPTH (fmt) &&
PIXMAN_FORMAT_DEPTH (fmt) != 0)
{
uint32_t m = (1 << PIXMAN_FORMAT_DEPTH (fmt)) - 1;
if (PIXMAN_FORMAT_TYPE (fmt) == PIXMAN_TYPE_BGRA ||
PIXMAN_FORMAT_TYPE (fmt) == PIXMAN_TYPE_RGBA)
{
m <<= (PIXMAN_FORMAT_BPP (fmt) - PIXMAN_FORMAT_DEPTH (fmt));
}
mask &= m;
}
/* mask alpha channel */
if (remove_alpha && PIXMAN_FORMAT_A (fmt))
{
uint32_t m;
if (PIXMAN_FORMAT_BPP (fmt) == 32)
m = 0xffffffff;
else
m = (1 << PIXMAN_FORMAT_BPP (fmt)) - 1;
m >>= PIXMAN_FORMAT_A (fmt);
if (PIXMAN_FORMAT_TYPE (fmt) == PIXMAN_TYPE_BGRA ||
PIXMAN_FORMAT_TYPE (fmt) == PIXMAN_TYPE_RGBA ||
PIXMAN_FORMAT_TYPE (fmt) == PIXMAN_TYPE_A)
{
/* Alpha is at the bottom of the pixel */
m <<= PIXMAN_FORMAT_A (fmt);
}
mask &= m;
}
/* mask rgb channels */
if (remove_rgb && PIXMAN_FORMAT_RGB (fmt))
{
uint32_t m = ((uint32_t)~0) >> (32 - PIXMAN_FORMAT_BPP (fmt));
uint32_t size = PIXMAN_FORMAT_R (fmt) + PIXMAN_FORMAT_G (fmt) + PIXMAN_FORMAT_B (fmt);
m &= ~((1 << size) - 1);
if (PIXMAN_FORMAT_TYPE (fmt) == PIXMAN_TYPE_BGRA ||
PIXMAN_FORMAT_TYPE (fmt) == PIXMAN_TYPE_RGBA)
{
/* RGB channels are at the top of the pixel */
m >>= size;
}
mask &= m;
}
for (i = 0; i * PIXMAN_FORMAT_BPP (fmt) < 32; i++)
mask |= mask << (i * PIXMAN_FORMAT_BPP (fmt));
for (i = 0; i < stride * height / 4; i++)
data[i] &= mask;
/* swap endiannes in order to provide identical results on both big
* and litte endian systems
*/
image_endian_swap (img);
return compute_crc32 (crc32, data, stride * height);
}
uint32_t
compute_crc32_for_image (uint32_t crc32,
pixman_image_t *img)
{
if (img->common.alpha_map)
{
crc32 = compute_crc32_for_image_internal (crc32, img, TRUE, FALSE);
crc32 = compute_crc32_for_image_internal (
crc32, (pixman_image_t *)img->common.alpha_map, FALSE, TRUE);
}
else
{
crc32 = compute_crc32_for_image_internal (crc32, img, FALSE, FALSE);
}
return crc32;
}
void
print_image (pixman_image_t *image)
{
int i, j;
int width, height, stride;
pixman_format_code_t format;
uint8_t *buffer;
int s;
width = pixman_image_get_width (image);
height = pixman_image_get_height (image);
stride = pixman_image_get_stride (image);
format = pixman_image_get_format (image);
buffer = (uint8_t *)pixman_image_get_data (image);
s = (stride >= 0)? stride : - stride;
printf ("---\n");
for (i = 0; i < height; i++)
{
for (j = 0; j < s; j++)
{
if (j == (width * PIXMAN_FORMAT_BPP (format) + 7) / 8)
printf ("| ");
printf ("%02X ", *((uint8_t *)buffer + i * stride + j));
}
printf ("\n");
}
printf ("---\n");
}
/* perform endian conversion of pixel data
*/
void
image_endian_swap (pixman_image_t *img)
{
int stride = pixman_image_get_stride (img);
uint32_t *data = pixman_image_get_data (img);
int height = pixman_image_get_height (img);
int bpp = PIXMAN_FORMAT_BPP (pixman_image_get_format (img));
int i, j;
/* swap bytes only on big endian systems */
if (is_little_endian())
return;
if (bpp == 8)
return;
for (i = 0; i < height; i++)
{
uint8_t *line_data = (uint8_t *)data + stride * i;
int s = (stride >= 0)? stride : - stride;
switch (bpp)
{
case 1:
for (j = 0; j < s; j++)
{
line_data[j] =
((line_data[j] & 0x80) >> 7) |
((line_data[j] & 0x40) >> 5) |
((line_data[j] & 0x20) >> 3) |
((line_data[j] & 0x10) >> 1) |
((line_data[j] & 0x08) << 1) |
((line_data[j] & 0x04) << 3) |
((line_data[j] & 0x02) << 5) |
((line_data[j] & 0x01) << 7);
}
break;
case 4:
for (j = 0; j < s; j++)
{
line_data[j] = (line_data[j] >> 4) | (line_data[j] << 4);
}
break;
case 16:
for (j = 0; j + 2 <= s; j += 2)
{
char t1 = line_data[j + 0];
char t2 = line_data[j + 1];
line_data[j + 1] = t1;
line_data[j + 0] = t2;
}
break;
case 24:
for (j = 0; j + 3 <= s; j += 3)
{
char t1 = line_data[j + 0];
char t2 = line_data[j + 1];
char t3 = line_data[j + 2];
line_data[j + 2] = t1;
line_data[j + 1] = t2;
line_data[j + 0] = t3;
}
break;
case 32:
for (j = 0; j + 4 <= s; j += 4)
{
char t1 = line_data[j + 0];
char t2 = line_data[j + 1];
char t3 = line_data[j + 2];
char t4 = line_data[j + 3];
line_data[j + 3] = t1;
line_data[j + 2] = t2;
line_data[j + 1] = t3;
line_data[j + 0] = t4;
}
break;
default:
assert (FALSE);
break;
}
}
}
#define N_LEADING_PROTECTED 10
#define N_TRAILING_PROTECTED 10
typedef struct
{
void *addr;
uint32_t len;
uint8_t *trailing;
int n_bytes;
} info_t;
#if defined(HAVE_MPROTECT) && defined(HAVE_GETPAGESIZE) && defined(HAVE_SYS_MMAN_H) && defined(HAVE_MMAP)
/* This is apparently necessary on at least OS X */
#ifndef MAP_ANONYMOUS
#define MAP_ANONYMOUS MAP_ANON
#endif
void *
fence_malloc (int64_t len)
{
unsigned long page_size = getpagesize();
unsigned long page_mask = page_size - 1;
uint32_t n_payload_bytes = (len + page_mask) & ~page_mask;
uint32_t n_bytes =
(page_size * (N_LEADING_PROTECTED + N_TRAILING_PROTECTED + 2) +
n_payload_bytes) & ~page_mask;
uint8_t *initial_page;
uint8_t *leading_protected;
uint8_t *trailing_protected;
uint8_t *payload;
uint8_t *addr;
if (len < 0)
abort();
addr = mmap (NULL, n_bytes, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS,
-1, 0);
if (addr == MAP_FAILED)
{
printf ("mmap failed on %lld %u\n", (long long int)len, n_bytes);
return NULL;
}
initial_page = (uint8_t *)(((uintptr_t)addr + page_mask) & ~page_mask);
leading_protected = initial_page + page_size;
payload = leading_protected + N_LEADING_PROTECTED * page_size;
trailing_protected = payload + n_payload_bytes;
((info_t *)initial_page)->addr = addr;
((info_t *)initial_page)->len = len;
((info_t *)initial_page)->trailing = trailing_protected;
((info_t *)initial_page)->n_bytes = n_bytes;
if ((mprotect (leading_protected, N_LEADING_PROTECTED * page_size,
PROT_NONE) == -1) ||
(mprotect (trailing_protected, N_TRAILING_PROTECTED * page_size,
PROT_NONE) == -1))
{
munmap (addr, n_bytes);
return NULL;
}
return payload;
}
void
fence_free (void *data)
{
uint32_t page_size = getpagesize();
uint8_t *payload = data;
uint8_t *leading_protected = payload - N_LEADING_PROTECTED * page_size;
uint8_t *initial_page = leading_protected - page_size;
info_t *info = (info_t *)initial_page;
munmap (info->addr, info->n_bytes);
}
#else
void *
fence_malloc (int64_t len)
{
return malloc (len);
}
void
fence_free (void *data)
{
free (data);
}
#endif
uint8_t *
make_random_bytes (int n_bytes)
{
uint8_t *bytes = fence_malloc (n_bytes);
if (!bytes)
return NULL;
prng_randmemset (bytes, n_bytes, 0);
return bytes;
}
void
a8r8g8b8_to_rgba_np (uint32_t *dst, uint32_t *src, int n_pixels)
{
uint8_t *dst8 = (uint8_t *)dst;
int i;
for (i = 0; i < n_pixels; ++i)
{
uint32_t p = src[i];
uint8_t a, r, g, b;
a = (p & 0xff000000) >> 24;
r = (p & 0x00ff0000) >> 16;
g = (p & 0x0000ff00) >> 8;
b = (p & 0x000000ff) >> 0;
if (a != 0)
{
#define DIVIDE(c, a) \
do \
{ \
int t = ((c) * 255) / a; \
(c) = t < 0? 0 : t > 255? 255 : t; \
} while (0)
DIVIDE (r, a);
DIVIDE (g, a);
DIVIDE (b, a);
}
*dst8++ = r;
*dst8++ = g;
*dst8++ = b;
*dst8++ = a;
}
}
#ifdef HAVE_LIBPNG
pixman_bool_t
write_png (pixman_image_t *image, const char *filename)
{
int width = pixman_image_get_width (image);
int height = pixman_image_get_height (image);
int stride = width * 4;
uint32_t *data = malloc (height * stride);
pixman_image_t *copy;
png_struct *write_struct;
png_info *info_struct;
pixman_bool_t result = FALSE;
FILE *f = fopen (filename, "wb");
png_bytep *row_pointers;
int i;
if (!f)
return FALSE;
row_pointers = malloc (height * sizeof (png_bytep));
copy = pixman_image_create_bits (
PIXMAN_a8r8g8b8, width, height, data, stride);
pixman_image_composite32 (
PIXMAN_OP_SRC, image, NULL, copy, 0, 0, 0, 0, 0, 0, width, height);
a8r8g8b8_to_rgba_np (data, data, height * width);
for (i = 0; i < height; ++i)
row_pointers[i] = (png_bytep)(data + i * width);
if (!(write_struct = png_create_write_struct (
PNG_LIBPNG_VER_STRING, NULL, NULL, NULL)))
goto out1;
if (!(info_struct = png_create_info_struct (write_struct)))
goto out2;
png_init_io (write_struct, f);
png_set_IHDR (write_struct, info_struct, width, height,
8, PNG_COLOR_TYPE_RGB_ALPHA,
PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_BASE,
PNG_FILTER_TYPE_BASE);
png_write_info (write_struct, info_struct);
png_write_image (write_struct, row_pointers);
png_write_end (write_struct, NULL);
result = TRUE;
out2:
png_destroy_write_struct (&write_struct, &info_struct);
out1:
if (fclose (f) != 0)
result = FALSE;
pixman_image_unref (copy);
free (row_pointers);
free (data);
return result;
}
#else /* no libpng */
pixman_bool_t
write_png (pixman_image_t *image, const char *filename)
{
return FALSE;
}
#endif
static void
color8_to_color16 (uint32_t color8, pixman_color_t *color16)
{
color16->alpha = ((color8 & 0xff000000) >> 24);
color16->red = ((color8 & 0x00ff0000) >> 16);
color16->green = ((color8 & 0x0000ff00) >> 8);
color16->blue = ((color8 & 0x000000ff) >> 0);
color16->alpha |= color16->alpha << 8;
color16->red |= color16->red << 8;
color16->blue |= color16->blue << 8;
color16->green |= color16->green << 8;
}
void
draw_checkerboard (pixman_image_t *image,
int check_size,
uint32_t color1, uint32_t color2)
{
pixman_color_t check1, check2;
pixman_image_t *c1, *c2;
int n_checks_x, n_checks_y;
int i, j;
color8_to_color16 (color1, &check1);
color8_to_color16 (color2, &check2);
c1 = pixman_image_create_solid_fill (&check1);
c2 = pixman_image_create_solid_fill (&check2);
n_checks_x = (
pixman_image_get_width (image) + check_size - 1) / check_size;
n_checks_y = (
pixman_image_get_height (image) + check_size - 1) / check_size;
for (j = 0; j < n_checks_y; j++)
{
for (i = 0; i < n_checks_x; i++)
{
pixman_image_t *src;
if (((i ^ j) & 1))
src = c1;
else
src = c2;
pixman_image_composite32 (PIXMAN_OP_SRC, src, NULL, image,
0, 0, 0, 0,
i * check_size, j * check_size,
check_size, check_size);
}
}
}
static uint32_t
call_test_function (uint32_t (*test_function)(int testnum, int verbose),
int testnum,
int verbose)
{
uint32_t retval;
#if defined (__GNUC__) && defined (_WIN32) && (defined (__i386) || defined (__i386__))
__asm__ (
/* Deliberately avoid aligning the stack to 16 bytes */
"pushl %1\n\t"
"pushl %2\n\t"
"call *%3\n\t"
"addl $8, %%esp\n\t"
: "=a" (retval)
: "r" (verbose),
"r" (testnum),
"r" (test_function)
: "edx", "ecx"); /* caller save registers */
#else
retval = test_function (testnum, verbose);
#endif
return retval;
}
/*
* A function, which can be used as a core part of the test programs,
* intended to detect various problems with the help of fuzzing input
* to pixman API (according to some templates, aka "smart" fuzzing).
* Some general information about such testing can be found here:
* http://en.wikipedia.org/wiki/Fuzz_testing
*
* It may help detecting:
* - crashes on bad handling of valid or reasonably invalid input to
* pixman API.
* - deviations from the behavior of older pixman releases.
* - deviations from the behavior of the same pixman release, but
* configured in a different way (for example with SIMD optimizations
* disabled), or running on a different OS or hardware.
*
* The test is performed by calling a callback function a huge number
* of times. The callback function is expected to run some snippet of
* pixman code with pseudorandom variations to the data feeded to
* pixman API. A result of running each callback function should be
* some deterministic value which depends on test number (test number
* can be used as a seed for PRNG). When 'verbose' argument is nonzero,
* callback function is expected to print to stdout some information
* about what it does.
*
* Return values from many small tests are accumulated together and
* used as final checksum, which can be compared to some expected
* value. Running the tests not individually, but in a batch helps
* to reduce process start overhead and also allows to parallelize
* testing and utilize multiple CPU cores.
*
* The resulting executable can be run without any arguments. In
* this case it runs a batch of tests starting from 1 and up to
* 'default_number_of_iterations'. The resulting checksum is
* compared with 'expected_checksum' and FAIL or PASS verdict
* depends on the result of this comparison.
*
* If the executable is run with 2 numbers provided as command line
* arguments, they specify the starting and ending numbers for a test
* batch.
*
* If the executable is run with only one number provided as a command
* line argument, then this number is used to call the callback function
* once, and also with verbose flag set.
*/
int
fuzzer_test_main (const char *test_name,
int default_number_of_iterations,
uint32_t expected_checksum,
uint32_t (*test_function)(int testnum, int verbose),
int argc,
const char *argv[])
{
int i, n1 = 1, n2 = 0;
uint32_t checksum = 0;
int verbose = getenv ("VERBOSE") != NULL;
if (argc >= 3)
{
n1 = atoi (argv[1]);
n2 = atoi (argv[2]);
if (n2 < n1)
{
printf ("invalid test range\n");
return 1;
}
}
else if (argc >= 2)
{
n2 = atoi (argv[1]);
checksum = call_test_function (test_function, n2, 1);
printf ("%d: checksum=%08X\n", n2, checksum);
return 0;
}
else
{
n1 = 1;
n2 = default_number_of_iterations;
}
#ifdef USE_OPENMP
#pragma omp parallel for reduction(+:checksum) default(none) \
shared(n1, n2, test_function, verbose)
#endif
for (i = n1; i <= n2; i++)
{
uint32_t crc = call_test_function (test_function, i, 0);
if (verbose)
printf ("%d: %08X\n", i, crc);
checksum += crc;
}
if (n1 == 1 && n2 == default_number_of_iterations)
{
if (checksum == expected_checksum)
{
printf ("%s test passed (checksum=%08X)\n",
test_name, checksum);
}
else
{
printf ("%s test failed! (checksum=%08X, expected %08X)\n",
test_name, checksum, expected_checksum);
return 1;
}
}
else
{
printf ("%d-%d: checksum=%08X\n", n1, n2, checksum);
}
return 0;
}
/* Try to obtain current time in seconds */
double
gettime (void)
{
#ifdef HAVE_GETTIMEOFDAY
struct timeval tv;
gettimeofday (&tv, NULL);
return (double)((int64_t)tv.tv_sec * 1000000 + tv.tv_usec) / 1000000.;
#else
return (double)clock() / (double)CLOCKS_PER_SEC;
#endif
}
uint32_t
get_random_seed (void)
{
union { double d; uint32_t u32; } t;
t.d = gettime();
prng_srand (t.u32);
return prng_rand ();
}
#ifdef HAVE_SIGACTION
#ifdef HAVE_ALARM
static const char *global_msg;
static void
on_alarm (int signo)
{
printf ("%s\n", global_msg);
exit (1);
}
#endif
#endif
void
fail_after (int seconds, const char *msg)
{
#ifdef HAVE_SIGACTION
#ifdef HAVE_ALARM
struct sigaction action;
global_msg = msg;
memset (&action, 0, sizeof (action));
action.sa_handler = on_alarm;
alarm (seconds);
sigaction (SIGALRM, &action, NULL);
#endif
#endif
}
void
enable_divbyzero_exceptions (void)
{
#ifdef HAVE_FENV_H
#ifdef HAVE_FEENABLEEXCEPT
feenableexcept (FE_DIVBYZERO);
#endif
#endif
}
void *
aligned_malloc (size_t align, size_t size)
{
void *result;
#ifdef HAVE_POSIX_MEMALIGN
if (posix_memalign (&result, align, size) != 0)
result = NULL;
#else
result = malloc (size);
#endif
return result;
}
#define CONVERT_15(c, is_rgb) \
(is_rgb? \
((((c) >> 3) & 0x001f) | \
(((c) >> 6) & 0x03e0) | \
(((c) >> 9) & 0x7c00)) : \
(((((c) >> 16) & 0xff) * 153 + \
(((c) >> 8) & 0xff) * 301 + \
(((c) ) & 0xff) * 58) >> 2))
double
convert_srgb_to_linear (double c)
{
if (c <= 0.04045)
return c / 12.92;
else
return pow ((c + 0.055) / 1.055, 2.4);
}
double
convert_linear_to_srgb (double c)
{
if (c <= 0.0031308)
return c * 12.92;
else
return 1.055 * pow (c, 1.0/2.4) - 0.055;
}
void
initialize_palette (pixman_indexed_t *palette, uint32_t depth, int is_rgb)
{
int i;
uint32_t mask = (1 << depth) - 1;
for (i = 0; i < 32768; ++i)
palette->ent[i] = prng_rand() & mask;
memset (palette->rgba, 0, sizeof (palette->rgba));
for (i = 0; i < mask + 1; ++i)
{
uint32_t rgba24;
pixman_bool_t retry;
uint32_t i15;
/* We filled the rgb->index map with random numbers, but we
* do need the ability to round trip, that is if some indexed
* color expands to an argb24, then the 15 bit version of that
* color must map back to the index. Anything else, we don't
* care about too much.
*/
do
{
uint32_t old_idx;
rgba24 = prng_rand();
i15 = CONVERT_15 (rgba24, is_rgb);
old_idx = palette->ent[i15];
if (CONVERT_15 (palette->rgba[old_idx], is_rgb) == i15)
retry = 1;
else
retry = 0;
} while (retry);
palette->rgba[i] = rgba24;
palette->ent[i15] = i;
}
for (i = 0; i < mask + 1; ++i)
{
assert (palette->ent[CONVERT_15 (palette->rgba[i], is_rgb)] == i);
}
}
const char *
operator_name (pixman_op_t op)
{
switch (op)
{
case PIXMAN_OP_CLEAR: return "PIXMAN_OP_CLEAR";
case PIXMAN_OP_SRC: return "PIXMAN_OP_SRC";
case PIXMAN_OP_DST: return "PIXMAN_OP_DST";
case PIXMAN_OP_OVER: return "PIXMAN_OP_OVER";
case PIXMAN_OP_OVER_REVERSE: return "PIXMAN_OP_OVER_REVERSE";
case PIXMAN_OP_IN: return "PIXMAN_OP_IN";
case PIXMAN_OP_IN_REVERSE: return "PIXMAN_OP_IN_REVERSE";
case PIXMAN_OP_OUT: return "PIXMAN_OP_OUT";
case PIXMAN_OP_OUT_REVERSE: return "PIXMAN_OP_OUT_REVERSE";
case PIXMAN_OP_ATOP: return "PIXMAN_OP_ATOP";
case PIXMAN_OP_ATOP_REVERSE: return "PIXMAN_OP_ATOP_REVERSE";
case PIXMAN_OP_XOR: return "PIXMAN_OP_XOR";
case PIXMAN_OP_ADD: return "PIXMAN_OP_ADD";
case PIXMAN_OP_SATURATE: return "PIXMAN_OP_SATURATE";
case PIXMAN_OP_DISJOINT_CLEAR: return "PIXMAN_OP_DISJOINT_CLEAR";
case PIXMAN_OP_DISJOINT_SRC: return "PIXMAN_OP_DISJOINT_SRC";
case PIXMAN_OP_DISJOINT_DST: return "PIXMAN_OP_DISJOINT_DST";
case PIXMAN_OP_DISJOINT_OVER: return "PIXMAN_OP_DISJOINT_OVER";
case PIXMAN_OP_DISJOINT_OVER_REVERSE: return "PIXMAN_OP_DISJOINT_OVER_REVERSE";
case PIXMAN_OP_DISJOINT_IN: return "PIXMAN_OP_DISJOINT_IN";
case PIXMAN_OP_DISJOINT_IN_REVERSE: return "PIXMAN_OP_DISJOINT_IN_REVERSE";
case PIXMAN_OP_DISJOINT_OUT: return "PIXMAN_OP_DISJOINT_OUT";
case PIXMAN_OP_DISJOINT_OUT_REVERSE: return "PIXMAN_OP_DISJOINT_OUT_REVERSE";
case PIXMAN_OP_DISJOINT_ATOP: return "PIXMAN_OP_DISJOINT_ATOP";
case PIXMAN_OP_DISJOINT_ATOP_REVERSE: return "PIXMAN_OP_DISJOINT_ATOP_REVERSE";
case PIXMAN_OP_DISJOINT_XOR: return "PIXMAN_OP_DISJOINT_XOR";
case PIXMAN_OP_CONJOINT_CLEAR: return "PIXMAN_OP_CONJOINT_CLEAR";
case PIXMAN_OP_CONJOINT_SRC: return "PIXMAN_OP_CONJOINT_SRC";
case PIXMAN_OP_CONJOINT_DST: return "PIXMAN_OP_CONJOINT_DST";
case PIXMAN_OP_CONJOINT_OVER: return "PIXMAN_OP_CONJOINT_OVER";
case PIXMAN_OP_CONJOINT_OVER_REVERSE: return "PIXMAN_OP_CONJOINT_OVER_REVERSE";
case PIXMAN_OP_CONJOINT_IN: return "PIXMAN_OP_CONJOINT_IN";
case PIXMAN_OP_CONJOINT_IN_REVERSE: return "PIXMAN_OP_CONJOINT_IN_REVERSE";
case PIXMAN_OP_CONJOINT_OUT: return "PIXMAN_OP_CONJOINT_OUT";
case PIXMAN_OP_CONJOINT_OUT_REVERSE: return "PIXMAN_OP_CONJOINT_OUT_REVERSE";
case PIXMAN_OP_CONJOINT_ATOP: return "PIXMAN_OP_CONJOINT_ATOP";
case PIXMAN_OP_CONJOINT_ATOP_REVERSE: return "PIXMAN_OP_CONJOINT_ATOP_REVERSE";
case PIXMAN_OP_CONJOINT_XOR: return "PIXMAN_OP_CONJOINT_XOR";
case PIXMAN_OP_MULTIPLY: return "PIXMAN_OP_MULTIPLY";
case PIXMAN_OP_SCREEN: return "PIXMAN_OP_SCREEN";
case PIXMAN_OP_OVERLAY: return "PIXMAN_OP_OVERLAY";
case PIXMAN_OP_DARKEN: return "PIXMAN_OP_DARKEN";
case PIXMAN_OP_LIGHTEN: return "PIXMAN_OP_LIGHTEN";
case PIXMAN_OP_COLOR_DODGE: return "PIXMAN_OP_COLOR_DODGE";
case PIXMAN_OP_COLOR_BURN: return "PIXMAN_OP_COLOR_BURN";
case PIXMAN_OP_HARD_LIGHT: return "PIXMAN_OP_HARD_LIGHT";
case PIXMAN_OP_SOFT_LIGHT: return "PIXMAN_OP_SOFT_LIGHT";
case PIXMAN_OP_DIFFERENCE: return "PIXMAN_OP_DIFFERENCE";
case PIXMAN_OP_EXCLUSION: return "PIXMAN_OP_EXCLUSION";
case PIXMAN_OP_HSL_HUE: return "PIXMAN_OP_HSL_HUE";
case PIXMAN_OP_HSL_SATURATION: return "PIXMAN_OP_HSL_SATURATION";
case PIXMAN_OP_HSL_COLOR: return "PIXMAN_OP_HSL_COLOR";
case PIXMAN_OP_HSL_LUMINOSITY: return "PIXMAN_OP_HSL_LUMINOSITY";
case PIXMAN_OP_NONE:
return "<invalid operator 'none'>";
};
return "<unknown operator>";
}
const char *
format_name (pixman_format_code_t format)
{
switch (format)
{
/* 32bpp formats */
case PIXMAN_a8r8g8b8: return "a8r8g8b8";
case PIXMAN_x8r8g8b8: return "x8r8g8b8";
case PIXMAN_a8b8g8r8: return "a8b8g8r8";
case PIXMAN_x8b8g8r8: return "x8b8g8r8";
case PIXMAN_b8g8r8a8: return "b8g8r8a8";
case PIXMAN_b8g8r8x8: return "b8g8r8x8";
case PIXMAN_r8g8b8a8: return "r8g8b8a8";
case PIXMAN_r8g8b8x8: return "r8g8b8x8";
case PIXMAN_x14r6g6b6: return "x14r6g6b6";
case PIXMAN_x2r10g10b10: return "x2r10g10b10";
case PIXMAN_a2r10g10b10: return "a2r10g10b10";
case PIXMAN_x2b10g10r10: return "x2b10g10r10";
case PIXMAN_a2b10g10r10: return "a2b10g10r10";
/* sRGB formats */
case PIXMAN_a8r8g8b8_sRGB: return "a8r8g8b8_sRGB";
/* 24bpp formats */
case PIXMAN_r8g8b8: return "r8g8b8";
case PIXMAN_b8g8r8: return "b8g8r8";
/* 16bpp formats */
case PIXMAN_r5g6b5: return "r5g6b5";
case PIXMAN_b5g6r5: return "b5g6r5";
case PIXMAN_a1r5g5b5: return "a1r5g5b5";
case PIXMAN_x1r5g5b5: return "x1r5g5b5";
case PIXMAN_a1b5g5r5: return "a1b5g5r5";
case PIXMAN_x1b5g5r5: return "x1b5g5r5";
case PIXMAN_a4r4g4b4: return "a4r4g4b4";
case PIXMAN_x4r4g4b4: return "x4r4g4b4";
case PIXMAN_a4b4g4r4: return "a4b4g4r4";
case PIXMAN_x4b4g4r4: return "x4b4g4r4";
/* 8bpp formats */
case PIXMAN_a8: return "a8";
case PIXMAN_r3g3b2: return "r3g3b2";
case PIXMAN_b2g3r3: return "b2g3r3";
case PIXMAN_a2r2g2b2: return "a2r2g2b2";
case PIXMAN_a2b2g2r2: return "a2b2g2r2";
#if 0
case PIXMAN_x4c4: return "x4c4";
case PIXMAN_g8: return "g8";
#endif
case PIXMAN_c8: return "x4c4 / c8";
case PIXMAN_x4g4: return "x4g4 / g8";
case PIXMAN_x4a4: return "x4a4";
/* 4bpp formats */
case PIXMAN_a4: return "a4";
case PIXMAN_r1g2b1: return "r1g2b1";
case PIXMAN_b1g2r1: return "b1g2r1";
case PIXMAN_a1r1g1b1: return "a1r1g1b1";
case PIXMAN_a1b1g1r1: return "a1b1g1r1";
case PIXMAN_c4: return "c4";
case PIXMAN_g4: return "g4";
/* 1bpp formats */
case PIXMAN_a1: return "a1";
case PIXMAN_g1: return "g1";
/* YUV formats */
case PIXMAN_yuy2: return "yuy2";
case PIXMAN_yv12: return "yv12";
};
/* Fake formats.
*
* This is separate switch to prevent GCC from complaining
* that the values are not in the pixman_format_code_t enum.
*/
switch ((uint32_t)format)
{
case PIXMAN_null: return "null";
case PIXMAN_solid: return "solid";
case PIXMAN_pixbuf: return "pixbuf";
case PIXMAN_rpixbuf: return "rpixbuf";
case PIXMAN_unknown: return "unknown";
};
return "<unknown format>";
};
static double
calc_op (pixman_op_t op, double src, double dst, double srca, double dsta)
{
#define mult_chan(src, dst, Fa, Fb) MIN ((src) * (Fa) + (dst) * (Fb), 1.0)
double Fa, Fb;
switch (op)
{
case PIXMAN_OP_CLEAR:
case PIXMAN_OP_DISJOINT_CLEAR:
case PIXMAN_OP_CONJOINT_CLEAR:
return mult_chan (src, dst, 0.0, 0.0);
case PIXMAN_OP_SRC:
case PIXMAN_OP_DISJOINT_SRC:
case PIXMAN_OP_CONJOINT_SRC:
return mult_chan (src, dst, 1.0, 0.0);
case PIXMAN_OP_DST:
case PIXMAN_OP_DISJOINT_DST:
case PIXMAN_OP_CONJOINT_DST:
return mult_chan (src, dst, 0.0, 1.0);
case PIXMAN_OP_OVER:
return mult_chan (src, dst, 1.0, 1.0 - srca);
case PIXMAN_OP_OVER_REVERSE:
return mult_chan (src, dst, 1.0 - dsta, 1.0);
case PIXMAN_OP_IN:
return mult_chan (src, dst, dsta, 0.0);
case PIXMAN_OP_IN_REVERSE:
return mult_chan (src, dst, 0.0, srca);
case PIXMAN_OP_OUT:
return mult_chan (src, dst, 1.0 - dsta, 0.0);
case PIXMAN_OP_OUT_REVERSE:
return mult_chan (src, dst, 0.0, 1.0 - srca);
case PIXMAN_OP_ATOP:
return mult_chan (src, dst, dsta, 1.0 - srca);
case PIXMAN_OP_ATOP_REVERSE:
return mult_chan (src, dst, 1.0 - dsta, srca);
case PIXMAN_OP_XOR:
return mult_chan (src, dst, 1.0 - dsta, 1.0 - srca);
case PIXMAN_OP_ADD:
return mult_chan (src, dst, 1.0, 1.0);
case PIXMAN_OP_SATURATE:
case PIXMAN_OP_DISJOINT_OVER_REVERSE:
if (srca == 0.0)
Fa = 1.0;
else
Fa = MIN (1.0, (1.0 - dsta) / srca);
return mult_chan (src, dst, Fa, 1.0);
case PIXMAN_OP_DISJOINT_OVER:
if (dsta == 0.0)
Fb = 1.0;
else
Fb = MIN (1.0, (1.0 - srca) / dsta);
return mult_chan (src, dst, 1.0, Fb);
case PIXMAN_OP_DISJOINT_IN:
if (srca == 0.0)
Fa = 0.0;
else
Fa = MAX (0.0, 1.0 - (1.0 - dsta) / srca);
return mult_chan (src, dst, Fa, 0.0);
case PIXMAN_OP_DISJOINT_IN_REVERSE:
if (dsta == 0.0)
Fb = 0.0;
else
Fb = MAX (0.0, 1.0 - (1.0 - srca) / dsta);
return mult_chan (src, dst, 0.0, Fb);
case PIXMAN_OP_DISJOINT_OUT:
if (srca == 0.0)
Fa = 1.0;
else
Fa = MIN (1.0, (1.0 - dsta) / srca);
return mult_chan (src, dst, Fa, 0.0);
case PIXMAN_OP_DISJOINT_OUT_REVERSE:
if (dsta == 0.0)
Fb = 1.0;
else
Fb = MIN (1.0, (1.0 - srca) / dsta);
return mult_chan (src, dst, 0.0, Fb);
case PIXMAN_OP_DISJOINT_ATOP:
if (srca == 0.0)
Fa = 0.0;
else
Fa = MAX (0.0, 1.0 - (1.0 - dsta) / srca);
if (dsta == 0.0)
Fb = 1.0;
else
Fb = MIN (1.0, (1.0 - srca) / dsta);
return mult_chan (src, dst, Fa, Fb);
case PIXMAN_OP_DISJOINT_ATOP_REVERSE:
if (srca == 0.0)
Fa = 1.0;
else
Fa = MIN (1.0, (1.0 - dsta) / srca);
if (dsta == 0.0)
Fb = 0.0;
else
Fb = MAX (0.0, 1.0 - (1.0 - srca) / dsta);
return mult_chan (src, dst, Fa, Fb);
case PIXMAN_OP_DISJOINT_XOR:
if (srca == 0.0)
Fa = 1.0;
else
Fa = MIN (1.0, (1.0 - dsta) / srca);
if (dsta == 0.0)
Fb = 1.0;
else
Fb = MIN (1.0, (1.0 - srca) / dsta);
return mult_chan (src, dst, Fa, Fb);
case PIXMAN_OP_CONJOINT_OVER:
if (dsta == 0.0)
Fb = 0.0;
else
Fb = MAX (0.0, 1.0 - srca / dsta);
return mult_chan (src, dst, 1.0, Fb);
case PIXMAN_OP_CONJOINT_OVER_REVERSE:
if (srca == 0.0)
Fa = 0.0;
else
Fa = MAX (0.0, 1.0 - dsta / srca);
return mult_chan (src, dst, Fa, 1.0);
case PIXMAN_OP_CONJOINT_IN:
if (srca == 0.0)
Fa = 1.0;
else
Fa = MIN (1.0, dsta / srca);
return mult_chan (src, dst, Fa, 0.0);
case PIXMAN_OP_CONJOINT_IN_REVERSE:
if (dsta == 0.0)
Fb = 1.0;
else
Fb = MIN (1.0, srca / dsta);
return mult_chan (src, dst, 0.0, Fb);
case PIXMAN_OP_CONJOINT_OUT:
if (srca == 0.0)
Fa = 0.0;
else
Fa = MAX (0.0, 1.0 - dsta / srca);
return mult_chan (src, dst, Fa, 0.0);
case PIXMAN_OP_CONJOINT_OUT_REVERSE:
if (dsta == 0.0)
Fb = 0.0;
else
Fb = MAX (0.0, 1.0 - srca / dsta);
return mult_chan (src, dst, 0.0, Fb);
case PIXMAN_OP_CONJOINT_ATOP:
if (srca == 0.0)
Fa = 1.0;
else
Fa = MIN (1.0, dsta / srca);
if (dsta == 0.0)
Fb = 0.0;
else
Fb = MAX (0.0, 1.0 - srca / dsta);
return mult_chan (src, dst, Fa, Fb);
case PIXMAN_OP_CONJOINT_ATOP_REVERSE:
if (srca == 0.0)
Fa = 0.0;
else
Fa = MAX (0.0, 1.0 - dsta / srca);
if (dsta == 0.0)
Fb = 1.0;
else
Fb = MIN (1.0, srca / dsta);
return mult_chan (src, dst, Fa, Fb);
case PIXMAN_OP_CONJOINT_XOR:
if (srca == 0.0)
Fa = 0.0;
else
Fa = MAX (0.0, 1.0 - dsta / srca);
if (dsta == 0.0)
Fb = 0.0;
else
Fb = MAX (0.0, 1.0 - srca / dsta);
return mult_chan (src, dst, Fa, Fb);
case PIXMAN_OP_MULTIPLY:
case PIXMAN_OP_SCREEN:
case PIXMAN_OP_OVERLAY:
case PIXMAN_OP_DARKEN:
case PIXMAN_OP_LIGHTEN:
case PIXMAN_OP_COLOR_DODGE:
case PIXMAN_OP_COLOR_BURN:
case PIXMAN_OP_HARD_LIGHT:
case PIXMAN_OP_SOFT_LIGHT:
case PIXMAN_OP_DIFFERENCE:
case PIXMAN_OP_EXCLUSION:
case PIXMAN_OP_HSL_HUE:
case PIXMAN_OP_HSL_SATURATION:
case PIXMAN_OP_HSL_COLOR:
case PIXMAN_OP_HSL_LUMINOSITY:
default:
abort();
return 0; /* silence MSVC */
}
#undef mult_chan
}
void
do_composite (pixman_op_t op,
const color_t *src,
const color_t *mask,
const color_t *dst,
color_t *result,
pixman_bool_t component_alpha)
{
color_t srcval, srcalpha;
if (mask == NULL)
{
srcval = *src;
srcalpha.r = src->a;
srcalpha.g = src->a;
srcalpha.b = src->a;
srcalpha.a = src->a;
}
else if (component_alpha)
{
srcval.r = src->r * mask->r;
srcval.g = src->g * mask->g;
srcval.b = src->b * mask->b;
srcval.a = src->a * mask->a;
srcalpha.r = src->a * mask->r;
srcalpha.g = src->a * mask->g;
srcalpha.b = src->a * mask->b;
srcalpha.a = src->a * mask->a;
}
else
{
srcval.r = src->r * mask->a;
srcval.g = src->g * mask->a;
srcval.b = src->b * mask->a;
srcval.a = src->a * mask->a;
srcalpha.r = src->a * mask->a;
srcalpha.g = src->a * mask->a;
srcalpha.b = src->a * mask->a;
srcalpha.a = src->a * mask->a;
}
result->r = calc_op (op, srcval.r, dst->r, srcalpha.r, dst->a);
result->g = calc_op (op, srcval.g, dst->g, srcalpha.g, dst->a);
result->b = calc_op (op, srcval.b, dst->b, srcalpha.b, dst->a);
result->a = calc_op (op, srcval.a, dst->a, srcalpha.a, dst->a);
}
static double
round_channel (double p, int m)
{
int t;
double r;
t = p * ((1 << m));
t -= t >> m;
r = t / (double)((1 << m) - 1);
return r;
}
void
round_color (pixman_format_code_t format, color_t *color)
{
if (PIXMAN_FORMAT_R (format) == 0)
{
color->r = 0.0;
color->g = 0.0;
color->b = 0.0;
}
else
{
color->r = round_channel (color->r, PIXMAN_FORMAT_R (format));
color->g = round_channel (color->g, PIXMAN_FORMAT_G (format));
color->b = round_channel (color->b, PIXMAN_FORMAT_B (format));
}
if (PIXMAN_FORMAT_A (format) == 0)
color->a = 1;
else
color->a = round_channel (color->a, PIXMAN_FORMAT_A (format));
}
/* Check whether @pixel is a valid quantization of the a, r, g, b
* parameters. Some slack is permitted.
*/
void
pixel_checker_init (pixel_checker_t *checker, pixman_format_code_t format)
{
assert (PIXMAN_FORMAT_VIS (format));
checker->format = format;
switch (PIXMAN_FORMAT_TYPE (format))
{
case PIXMAN_TYPE_A:
checker->bs = 0;
checker->gs = 0;
checker->rs = 0;
checker->as = 0;
break;
case PIXMAN_TYPE_ARGB:
case PIXMAN_TYPE_ARGB_SRGB:
checker->bs = 0;
checker->gs = checker->bs + PIXMAN_FORMAT_B (format);
checker->rs = checker->gs + PIXMAN_FORMAT_G (format);
checker->as = checker->rs + PIXMAN_FORMAT_R (format);
break;
case PIXMAN_TYPE_ABGR:
checker->rs = 0;
checker->gs = checker->rs + PIXMAN_FORMAT_R (format);
checker->bs = checker->gs + PIXMAN_FORMAT_G (format);
checker->as = checker->bs + PIXMAN_FORMAT_B (format);
break;
case PIXMAN_TYPE_BGRA:
/* With BGRA formats we start counting at the high end of the pixel */
checker->bs = PIXMAN_FORMAT_BPP (format) - PIXMAN_FORMAT_B (format);
checker->gs = checker->bs - PIXMAN_FORMAT_B (format);
checker->rs = checker->gs - PIXMAN_FORMAT_G (format);
checker->as = checker->rs - PIXMAN_FORMAT_R (format);
break;
case PIXMAN_TYPE_RGBA:
/* With BGRA formats we start counting at the high end of the pixel */
checker->rs = PIXMAN_FORMAT_BPP (format) - PIXMAN_FORMAT_R (format);
checker->gs = checker->rs - PIXMAN_FORMAT_R (format);
checker->bs = checker->gs - PIXMAN_FORMAT_G (format);
checker->as = checker->bs - PIXMAN_FORMAT_B (format);
break;
default:
assert (0);
break;
}
checker->am = ((1 << PIXMAN_FORMAT_A (format)) - 1) << checker->as;
checker->rm = ((1 << PIXMAN_FORMAT_R (format)) - 1) << checker->rs;
checker->gm = ((1 << PIXMAN_FORMAT_G (format)) - 1) << checker->gs;
checker->bm = ((1 << PIXMAN_FORMAT_B (format)) - 1) << checker->bs;
checker->aw = PIXMAN_FORMAT_A (format);
checker->rw = PIXMAN_FORMAT_R (format);
checker->gw = PIXMAN_FORMAT_G (format);
checker->bw = PIXMAN_FORMAT_B (format);
}
void
pixel_checker_split_pixel (const pixel_checker_t *checker, uint32_t pixel,
int *a, int *r, int *g, int *b)
{
*a = (pixel & checker->am) >> checker->as;
*r = (pixel & checker->rm) >> checker->rs;
*g = (pixel & checker->gm) >> checker->gs;
*b = (pixel & checker->bm) >> checker->bs;
}
void
pixel_checker_get_masks (const pixel_checker_t *checker,
uint32_t *am,
uint32_t *rm,
uint32_t *gm,
uint32_t *bm)
{
if (am)
*am = checker->am;
if (rm)
*rm = checker->rm;
if (gm)
*gm = checker->gm;
if (bm)
*bm = checker->bm;
}
void
pixel_checker_convert_pixel_to_color (const pixel_checker_t *checker,
uint32_t pixel, color_t *color)
{
int a, r, g, b;
pixel_checker_split_pixel (checker, pixel, &a, &r, &g, &b);
if (checker->am == 0)
color->a = 1.0;
else
color->a = a / (double)(checker->am >> checker->as);
if (checker->rm == 0)
color->r = 0.0;
else
color->r = r / (double)(checker->rm >> checker->rs);
if (checker->gm == 0)
color->g = 0.0;
else
color->g = g / (double)(checker->gm >> checker->gs);
if (checker->bm == 0)
color->b = 0.0;
else
color->b = b / (double)(checker->bm >> checker->bs);
if (PIXMAN_FORMAT_TYPE (checker->format) == PIXMAN_TYPE_ARGB_SRGB)
{
color->r = convert_srgb_to_linear (color->r);
color->g = convert_srgb_to_linear (color->g);
color->b = convert_srgb_to_linear (color->b);
}
}
static int32_t
convert (double v, uint32_t width, uint32_t mask, uint32_t shift, double def)
{
int32_t r;
if (!mask)
v = def;
r = (v * ((mask >> shift) + 1));
r -= r >> width;
return r;
}
static void
get_limits (const pixel_checker_t *checker, double limit,
color_t *color,
int *ao, int *ro, int *go, int *bo)
{
color_t tmp;
if (PIXMAN_FORMAT_TYPE (checker->format) == PIXMAN_TYPE_ARGB_SRGB)
{
tmp.a = color->a;
tmp.r = convert_linear_to_srgb (color->r);
tmp.g = convert_linear_to_srgb (color->g);
tmp.b = convert_linear_to_srgb (color->b);
color = &tmp;
}
*ao = convert (color->a + limit, checker->aw, checker->am, checker->as, 1.0);
*ro = convert (color->r + limit, checker->rw, checker->rm, checker->rs, 0.0);
*go = convert (color->g + limit, checker->gw, checker->gm, checker->gs, 0.0);
*bo = convert (color->b + limit, checker->bw, checker->bm, checker->bs, 0.0);
}
/* The acceptable deviation in units of [0.0, 1.0]
*/
#define DEVIATION (0.0064)
void
pixel_checker_get_max (const pixel_checker_t *checker, color_t *color,
int *am, int *rm, int *gm, int *bm)
{
get_limits (checker, DEVIATION, color, am, rm, gm, bm);
}
void
pixel_checker_get_min (const pixel_checker_t *checker, color_t *color,
int *am, int *rm, int *gm, int *bm)
{
get_limits (checker, - DEVIATION, color, am, rm, gm, bm);
}
pixman_bool_t
pixel_checker_check (const pixel_checker_t *checker, uint32_t pixel,
color_t *color)
{
int32_t a_lo, a_hi, r_lo, r_hi, g_lo, g_hi, b_lo, b_hi;
int32_t ai, ri, gi, bi;
pixman_bool_t result;
pixel_checker_get_min (checker, color, &a_lo, &r_lo, &g_lo, &b_lo);
pixel_checker_get_max (checker, color, &a_hi, &r_hi, &g_hi, &b_hi);
pixel_checker_split_pixel (checker, pixel, &ai, &ri, &gi, &bi);
result =
a_lo <= ai && ai <= a_hi &&
r_lo <= ri && ri <= r_hi &&
g_lo <= gi && gi <= g_hi &&
b_lo <= bi && bi <= b_hi;
return result;
}
|
GB_binop__atan2_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__atan2_fp64)
// A.*B function (eWiseMult): GB (_AemultB_01__atan2_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__atan2_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__atan2_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__atan2_fp64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__atan2_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__atan2_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__atan2_fp64)
// C=scalar+B GB (_bind1st__atan2_fp64)
// C=scalar+B' GB (_bind1st_tran__atan2_fp64)
// C=A+scalar GB (_bind2nd__atan2_fp64)
// C=A'+scalar GB (_bind2nd_tran__atan2_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = atan2 (aij, bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = atan2 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ATAN2 || GxB_NO_FP64 || GxB_NO_ATAN2_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__atan2_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__atan2_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__atan2_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__atan2_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__atan2_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__atan2_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__atan2_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__atan2_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__atan2_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = atan2 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__atan2_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = atan2 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = atan2 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__atan2_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = atan2 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__atan2_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
knapsack.c | /* A Naive recursive implementation
of 0-1 Knapsack problem */
#include <stdio.h>
#include <omp.h>
#include "time.h"
// A utility function that returns
// maximum of two integers
int max(int a, int b) { return (a > b) ? a : b; }
// Returns the maximum value that can be
// put in a knapsack of capacity W
int knapSack(int W, int wt[], int val[], int n)
{
// Base Case
if (n == 0 || W == 0)
return 0;
// If weight of the nth item is more than
// Knapsack capacity W, then this item cannot
// be included in the optimal solution
if (wt[n - 1] > W) {
int ks;
#pragma omp parallel default(none) shared(W, wt, val, n, ks)
{
ks = knapSack(W, wt, val, n - 1);
}
return ks;
}
// Return the maximum of two cases:
// (1) nth item included
// (2) not included
else {
int maximum;
#pragma omp parallel default(none) shared(W, wt, val, n, maximum)
{
maximum = max(
val[n - 1] + knapSack(W - wt[n - 1], wt, val, n - 1),
knapSack(W, wt, val, n - 1)
);
}
return maximum;
}
}
// Driver program to test above function
int main()
{
omp_set_num_threads(16);
double begin, end;
begin = clock();
int val[] = { 60, 100, 120 };
int wt[] = { 10, 20, 30 };
int W = 50;
int n = sizeof(val) / sizeof(val[0]);
printf("%d\n", knapSack(W, wt, val, n));
end = clock();
printf("My serial code took %g seconds\n", ((end - begin) / CLOCKS_PER_SEC));
}
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 24;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
tutorial_region.c | /*
* Copyright (c) 2015 - 2021, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <stdint.h>
#include <mpi.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "tutorial_region.h"
#ifdef TUTORIAL_ENABLE_MKL
#include "mkl.h"
#else
/* Terrible DGEMM implementation should only be used if there is no */
/* BLAS support. Build assumes that the Intel(R) Math Kernel Library */
/* is the only provider of BLAS. */
static inline
void dgemm(const char *transa, const char *transb, const int *M,
const int *N, const int *K, const double *alpha,
const double *A, const int *LDA, const double *B,
const int *LDB, const double *beta, double *C, const int *LDC)
{
#pragma omp parallel for
for (int i = 0; i < *M; ++i) {
for (int j = 0; j < *N; ++j) {
C[i * *LDC + j] = 0;
for (int k = 0; k < *K; ++k) {
C[i * *LDC + j] += A[i * *LDA + j] * B[j * *LDB + k];
}
}
}
}
#endif
int tutorial_sleep(double big_o, int do_report)
{
int err = 0;
if (big_o != 0.0) {
struct timespec seconds = {(time_t)(big_o),
(long)((big_o -
(time_t)(big_o)) * 1E9)};
if (do_report) {
printf("Sleeping for %e seconds\n", big_o);
fflush(stdout);
}
err = clock_nanosleep(CLOCK_REALTIME, 0, &seconds, NULL);
}
return err;
}
int tutorial_dgemm(double big_o, int do_report)
{
int err = 0;
if (big_o != 0.0) {
int matrix_size = (int) pow(4e9 * big_o, 1.0/3.0);
int pad_size = 64;
size_t mem_size = sizeof(double) * (matrix_size * (matrix_size + pad_size));
char transa = 'n';
char transb = 'n';
int M = matrix_size;
int N = matrix_size;
int K = matrix_size;
int LDA = matrix_size + pad_size / sizeof(double);
int LDB = matrix_size + pad_size / sizeof(double);
int LDC = matrix_size + pad_size / sizeof(double);
double alpha = 2.0;
double beta = 3.0;
double *A = NULL;
double *B = NULL;
double *C = NULL;
err = posix_memalign((void *)&A, pad_size, mem_size);
if (!err) {
err = posix_memalign((void *)&B, pad_size, mem_size);
}
if (!err) {
err = posix_memalign((void *)&C, pad_size, mem_size);
}
if (!err) {
#pragma omp parallel for
for (int i = 0; i < mem_size / sizeof(double); ++i) {
A[i] = random() / RAND_MAX;
B[i] = random() / RAND_MAX;
}
if (do_report) {
printf("Executing a %d x %d DGEMM\n", matrix_size, matrix_size);
fflush(stdout);
}
dgemm(&transa, &transb, &M, &N, &K, &alpha,
A, &LDA, B, &LDB, &beta, C, &LDC);
free(C);
free(B);
free(A);
}
}
return err;
}
int tutorial_stream(double big_o, int do_report)
{
int err = 0;
if (big_o != 0.0) {
size_t cline_size = 64;
size_t num_stream = (size_t)big_o * 500000000;
size_t mem_size = sizeof(double) * num_stream;
double *a = NULL;
double *b = NULL;
double *c = NULL;
double scalar = 3.0;
err = posix_memalign((void *)&a, cline_size, mem_size);
if (!err) {
err = posix_memalign((void *)&b, cline_size, mem_size);
}
if (!err) {
err = posix_memalign((void *)&c, cline_size, mem_size);
}
if (!err) {
#pragma omp parallel for
for (int i = 0; i < num_stream; i++) {
a[i] = 0.0;
b[i] = 1.0;
c[i] = 2.0;
}
if (do_report) {
printf("Executing STREAM triad on length %d vectors.\n", num_stream);
fflush(stdout);
}
#pragma omp parallel for
for (int i = 0; i < num_stream; ++i) {
a[i] = b[i] + scalar * c[i];
}
free(c);
free(b);
free(a);
}
}
return err;
}
int tutorial_all2all(double big_o, int do_report)
{
/* Best case scaling is O(ln(num_send) + num_rank) => */
/* num_send = exp(big_o_n - factor * num_rank) */
/* We have somewhat arbitrarily set factor to 1/128 */
int err = 0;
if (big_o != 0.0) {
int num_rank = 0;
int err = MPI_Comm_size(MPI_COMM_WORLD, &num_rank);
size_t num_send = (size_t)pow(2.0, 16 * big_o - num_rank / 128.0);
num_send = num_send ? num_send : 1;
size_t cline_size = 64;
char *send_buffer = NULL;
char *recv_buffer = NULL;
if (!err) {
err = posix_memalign((void *)&send_buffer, cline_size,
num_rank * num_send * sizeof(char));
}
if (!err) {
err = posix_memalign((void *)&recv_buffer, cline_size,
num_rank * num_send * sizeof(char));
}
if (!err) {
if (do_report) {
printf("Executing all2all of %d byte buffer on %d ranks.\n",
num_send * sizeof(char), num_rank);
fflush(stdout);
}
err = MPI_Alltoall(send_buffer, num_send, MPI_CHAR, recv_buffer,
num_send, MPI_CHAR, MPI_COMM_WORLD);
}
if (!err) {
err = MPI_Barrier(MPI_COMM_WORLD);
}
if (!err) {
free(recv_buffer);
free(send_buffer);
}
}
return err;
}
int tutorial_dgemm_static(double big_o, int do_report)
{
static double big_o_last = 0.0;
static double *A = NULL;
static double *B = NULL;
static double *C = NULL;
int err = 0;
if (big_o != 0.0) {
int matrix_size = (int) pow(4e9 * big_o, 1.0/3.0);
int pad_size = 64;
size_t mem_size = sizeof(double) * (matrix_size * (matrix_size + pad_size));
char transa = 'n';
char transb = 'n';
int M = matrix_size;
int N = matrix_size;
int K = matrix_size;
int LDA = matrix_size + pad_size / sizeof(double);
int LDB = matrix_size + pad_size / sizeof(double);
int LDC = matrix_size + pad_size / sizeof(double);
double alpha = 2.0;
double beta = 3.0;
if (big_o != big_o_last) {
big_o_last = big_o;
if (A) {
free(C);
free(B);
free(A);
A = NULL;
B = NULL;
C = NULL;
}
err = posix_memalign((void *)&A, pad_size, mem_size);
if (!err) {
err = posix_memalign((void *)&B, pad_size, mem_size);
}
if (!err) {
err = posix_memalign((void *)&C, pad_size, mem_size);
}
if (!err) {
#pragma omp parallel for
for (int i = 0; i < mem_size / sizeof(double); ++i) {
A[i] = random() / RAND_MAX;
B[i] = random() / RAND_MAX;
}
}
}
if (!err) {
if (do_report) {
printf("Executing a %d x %d DGEMM\n", matrix_size, matrix_size);
fflush(stdout);
}
dgemm(&transa, &transb, &M, &N, &K, &alpha,
A, &LDA, B, &LDB, &beta, C, &LDC);
}
}
else if (A) {
free(C);
free(B);
free(A);
A = NULL;
B = NULL;
C = NULL;
}
return err;
}
|
openbsdsoftraid_fmt_plug.c | /*
* Copyright (c) 2014 Thiébaud Weksteen <thiebaud at weksteen dot fr>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Fixed BE issues, and build problems (Fall 2014), JimF.
*/
#include "arch.h"
#if FMT_EXTERNS_H
extern struct fmt_main fmt_openbsd_softraid;
#elif FMT_REGISTERS_H
john_register_one(&fmt_openbsd_softraid);
#else
#include "aes.h"
#include "hmac_sha.h"
#include "sha.h"
#include "common.h"
#include "formats.h"
#include "bcrypt_pbkdf.h"
#include "pbkdf2_hmac_sha1.h"
#include "loader.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "OpenBSD-SoftRAID"
#define FORMAT_NAME ""
#define FORMAT_TAG "$openbsd-softraid$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT " (8192 iterations)"
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 4
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define OPENBSD_SOFTRAID_SALTLENGTH 128
#define OPENBSD_SOFTRAID_KEYS 32
#define OPENBSD_SOFTRAID_KEYLENGTH 64 /* AES-XTS-256 keys are 512 bits long */
#define OPENBSD_SOFTRAID_MACLENGTH 20
#define BINARY_SIZE OPENBSD_SOFTRAID_MACLENGTH
#define BINARY_ALIGN sizeof(uint32_t)
static struct fmt_tests tests_openbsdsoftraid[] = {
// too long of line was causing my Sparc box to fail to compile this code
{"\
$openbsd-softraid$8192$c2891132ca5305d1189a7da94d32de29182abc2f56dc641d685e471935f2646e06b79f1d6c102c2f62f3757a20efb0a110b8ae207f9129f0dc5eea8ab05cc8280e0ba2460faf979dbac9f577c4a083349064364556b7ad15468c17c4d794c3da0ddf5990cc66751a6ded8d534531dd9aa9fce2f43e68d6a7200e135beb55e752$311c42d1d8daf1e47e0150c8d4a455a0567b062970c1838faaedcd3e43795545de64971c7598902a6e2c3fffcf8abe2ef78979164d0c9089fbb931c4c9dac8b86c85eeace11095e38487e41eb7b6094d96c339e86686121fbe1c32dbff3c00706926b22ec3a1329f346c599d132105b5d182a380161504d535f9836bb7286331adce1e47e4e251a0249612a94312bb309a6f4558568467731c1ae8c9b910d27102dca2a72228ffde7bfc60004c8ab33ca2b01aa476c4f42f99a3d1f904e3bbc56270edb314a62e92cf68185ace93731ef4ce08dff3c695c45e35b57ed8ab1552114635eb2ff531437ba5c3a08ebf3e73b6bbb7fe1ad98373da349f09284ae819b6a2f6fc5a10aec347f3c2331abc1d6617e77d68f314fdb683294f3ef351869491c4fb096969924215d711c15e5fce533dc5acaed4a473b14c595bababc178e62ef065770716520ecddc7cbf1cbed1250b7e004ab975bc29780c952087ec382bf6e77447720a10a8c2993262a2b21f8a3f47e35daa5b620573626b474d3e8abf8e73164664b041a18fe35c2a1905fad617bf6e6c380fdeeb680fa89b6c6dc7676ad93fde25076ecb8855d623b45af9a16a62a957d85c4c70896019be1827ad9320a69f18bdfc2674f04babdbfcd679c0ef22f7ab2a18818b9b425e61d8c06196a23babd0aefd5a00f1b297a66d973daae40f4dbd9be60d8953fafbd51f7745e2d04b5c80b63ad1f550cd939490b346d4fe7c1fc266d593bcafac0d8989994e174de6d1ef4ce78b3224ea4e68ccbf998654a067558537be332f5cae4b44c18664428d45b71cde5b53bedddf8a7daf47fce212578b72\
7e420c91de0baa1108683dd5b5534e81f4fe945d27fd9d28934afc8d15d95932952c0be717d4d87bb8255bf658a083c3aed643f7a6cfb56fbcbdab9e0a7348b0a3a91e3d560d1ec96f5769551e64beb54a499f6d6dd37e4361d484fe4f7bac4dc26c8a1a2609592d527b134c8212d71b3578217e0ec1da317c69e7e8c39d2d5b2d4073fa9c618a01a092b61613f6f1e41e6ab43d8ca010f177947aeab2884e9a4dd28453ff5bdadb765680733e7af1463ec1b20b879ae01c9256da0207811f956b3950f6db743a9e34a6d8f0fdfa5c47b4f807f0017c2092d72dc19d111711e796ffc4035da3a4caa6a5301491d0473b0d47cd01b705ff11a10263867013a11c65462c311fa5ac9a2598142779b55f09dbec89ac18049c29e5baf3aa38696a3b92d08b02cb10af5389e06058b3ad8be09b121e4e320520413775b7c6fbb3f2b332e3ac0295a4a4dfb4a56ea1c32bc28c149ffaa3b426f5a17a11afe56426b38966c86734654fe05a611c8f025ee4092656c097bbf59743c31508fa9e80ff86a2ae33d401ec316e65eef251d173e9565ffc1672b8b341174427a851a6a4c42554848c637283d13d4ba5b5414b4e61ade6ec7ef7b77186a81adff381e6a79d3dac2c68bf386f100fef1c354221a2ba3d8a7a10460f637eaa152ab79027ab94e5965660de3ed66dac4a0f8e75b85d768e51c8e82a26cb81249ca8d249d8c5cdc8bd55289679d3915a397d31863334df18e2fe3ef9069b064c4ef6b418e5388817040ae9922e5e9f57a8bf3b3fe04748b9cf5068ac86f942b4068853602a6c6c794423569b665b359d5f947c2e5ff194d23d953b435b2b3834513fdfda2b66fcea22883690b1cc56c2fcaa5600895ff8d8ae9e3a6a2b6258ff873242d1128b20e7d1e843ade1bd206b541eba02a214a95cd83860865f947cb4adbd465957055060df05e53fa9ea4b29867c92b224be939d3715be0e61b7aa0e24a8f25bccfa3b7901a3f0a8cb25498d7c9899d435b409220723dcde1d38ab6d4e7cfb42d443c9b65a37\
53891f46adb9bc52574699a7b642955702ed662d04cbe21aeec7c15db7e325dcaa74c85c5e3ed54424642d5bd8d3109c2d4c0079b3d2c5f2da12ad5b25407ae48f6fe4fc653b23a7f2d56a93c898dd0bd59ba02295934c9f7ffb433ef611d51b7c203f374cf9e8b69d4952ccc44593447ad41540270b0e30c349401048cbce10a0e1bae373de15c878982b0af837fb5432cd2471516d1e218296ce462a59fd5412921bbd3f75cf65070f7bafe21105ba83f7ffe8ece71534863c0dd731a2f3c29fff97b8ce798890a1b158a8891bb6f2dd751e75c0cb0db7ea152d7cdc91663f46f85d12ce0015351dba5225b2a87b64cc30518b23e31b2bfbb0b2a5042eeaea1234a57549a3e55ddd708e3380df032e93071b10b3e6902152c90ffd99bda0177a197779341307c5d9f335e698259ade70564eab9d2856aa1aa814211e71ba2885ef9cd5f5bdd225af2f6eebf775cc0bbdb3e519edb7c49a9a1984cc0cc012679aca8fd1d002fa64b2df095b4a9e2b496e3f4b544955c817efb29562cf8b3d2eeccbe4d364ce71d2d12b504b11de4747139ef505bdd12f382eb02fa3f5272b710644a9c20660ca5b4fa74be60984240b555c1f34261ee1d72d9eb2cc680f32b4603865503addc3a1fdc49d2b158d3407a282edd72ef51ad021338fdebf413726e1778e3bc3909b670d3f40e824391c5525b162ea01c29205e12f8e62bdd8cd0f21f6f7b44af4521c2dd23a7f3508e5dc6fffa3365e4ca1cac33bb515a5c5495dc059a94396de7d802758b65bb4cecb90bf69ab4126eab85958cb8b64eedf3a0955ab42cdc98ef90620e10cc854b9c02bfaff60742494a0c3bb34ef6d6bb861b275d975bdc4a10ac922dc70c1b03a4c01943a704af36ec8d79cf2f9ce0f602f01bef4a32edeb8fbba863c945552efc814410ac6bb839349ea65879644003bdda35d40eabdc9dcfb2d67d945b7f111ab62591763a0dd2d338594eff004237e5acce69dd9d2cdbb9ce121bd$5337e4ba9d877a1e84559688386fbc844c5fe557", "password1" },
// bcrypt PBKDF
{"$openbsd-softraid$16$981b56db39bb572998affacafe76d495efd75212b0c31dccb4e8e8f70ecc874ddbc51d5cdd2b4fff6d98ee589cb271738b55f43c33e620eaef93e21398963421031940e455c44bcfbbcf0e686e2b860585bea1ab4891cf666a147ae2da97243d068a7171a1227a667cbbda83c50ff3ed9fdd447ed4d9699844a5b68863f3b3df$8fb40eb55b0ced789a88138f70c06c3c71321c49fc4a8dace25aaac36cdf6615e09896f8f3eba0b9d0f1cedb67935b39d45667abf976a0e032082eff80f4ab6bbc004ed29a3c54c49607ec76ca8135d08b63a144b1337e2345e82247955576221d4a59e2f247b0037599328cf0f7c4f45dc4fd5c0cf6b9ca788cadd9fa7f02386d67004477d69e19d1ebdf2d8b9eb529189dcb7c6093353465c055b8b9cec24e6a5f47191b4e3759fb8f9b2e7dab90678c336ea25f0e2310063462b4ef38c30c0bb31ae5b1b6a3b237df30b278407ce94aac718b0784d95b6fa1ac4c1f1d8f570485967144ef5081a0b156db126c73bb74766d99f8691aa327d55525533983333949ab2180a90064ed4673740a599860778fc99bed8b72d153ad9d365e81e1000d3952856538fcef36c4892605eb9a10cb1820769b0387cd8a6617a8e6774f572ce1c0208e70364bbbdbe16afc6e9caddaecd34cc2d4d2e9ca9a87285dcc560625320378be238faaa00d143d5294e284471d906ea56c7c2d2c3575d83e78c68da2eaf3b80bfb8ad7ab6b9265c62e8b07e9e285cd37dd4b1eccf4ae031cf6f6855db85cf2238aa88c1c101371c8b3dfb03db1d732a7777644c2942c14e44ce4eaa2894c5392ff2ee0c0f02cf0cd6d50b645648a7aa039287b6a748f27b325e8b7206c915dafef0b11166860475d67c921a637aaddb25a98812be7076f9a2c62071552444d6d59d24227d8bb948834ea3b0a14ea1b0e3682ae4667e47c271b11847889995cd0c8bed5740b0b4c4873d285cef1213777f1227d2447c43fa9f6410e6f76571fffb47d7594ac0bf1ecdf27d13543bbe59f99f4d87f02ef62ce2ba34cf090410d024395c178cf22d5852289fddcfc4ab4088beacc6efbc365013eedc9a7d43070122cd2bfc96a7eb4802bb672b613f3d11ea108ef7cd03985e46340d4cf9fb4e91a5782a4c0c013e1670af8b6fd7c6f4b55fd4234bea13cd6bf431d037a315e93787385eefd70e652fa9f86198e74816e203a2425b7c2378984ed3d795719d7c6c05478729ce9e863791e65a33de5cb71bf4d3e9ba7ec7990d0dfcfaaa2a2405a7bad1f267ccdc2116e32d4637ee10912f56dd4f4f6ea9131769c7d818098d3b4a555a8c5b6b4ba601c1e939871d1b2eda01ad1190e7f94817a82474badd634df8bba93ce02d6384aa704476914490e8b648bf1d71b623d03b97bc33661b6b744426da73cacf104af07eb49d1b0384a49635d1acd332a9047c39000b408f40b86767ada19cc87858134449fcbc43029a9c6192e127638c31b2093fa0c397303c35a0f1ea04a7206822ff05dd277e77d4ae99cffe13b4f56d13cfb54e5ae2af42ce24f1b7247fb91e14b3a829f1be11039978d84c71cde0156a00a042b8b337ccdb28fe7191e51f135b5278852ad749cdde3d90d024e1e20b35bc14fdfea745fe92dd35d13be1b61cf0b00f1d838f854b678d89f813bdc1d89e21fc5d9e4455f6655b8b2216e5826ceff8a2bcef2f0066525ad25384e21664187a6a1e5b9c38e04ab66bc557802ef29e93608cdabe78fe14f32e2c779e090ebce74ba472373857bb76795e81da34a0e637a5c8c4532ac1e0cfc6ca07a55c3f8a3c1d68e2225d48f29ab5da2c6034506bbe7507be4587781b780544bf15570689097ab78f09e2787183e0a6b1e068875cc228722a644d875f8e95f45e02e2f9178afc379c10a3c840eef997b76ec9a0860f0c0361ffd39bda157dacf54eb6197182edf6733b9ca3ef31f997389123f46c5d64a4fdcfadb1840e22a1ea7dddc2ab8be4dda87d4bfb543daa5af34d622b50432a354dde602910708c42661f29131d4d2dbcddb2447cc82e869f42e573d4b55ea21b6a6c8580a6b10a579ebf0b03cfe98a2e7d1e4eb492dcf79c937db4d386bae2b67f167f27d6771584a9a9466a7fc6856c39bde70dfd1451b49ea232f2e2d17fdcb11eec11d089c1ba0d0b11c21783ce42edda7dc9061e04098e81efb402f8f38f1a7c704843f2f865af40dc9e94b539979ba64696d7a284f2f46806decdc4b309aa562be7218e0706b3d075a170e3e9da0a6d8a28ea1afc5171f9ea324ed91cedf6a7fcf6222f65824571281eedf7423c42312c7b2152263cbef56100e8150c445cfac74b5af5c7ab36075fb8ac0b956c693245244fc4b00402323123158e9cdafb5f571090bb96b6cffb5de7ee37778f460c1a1616bef484ae69602e99a7d710b2f941c03849785dbcc85e90e0397b93765c17d097ad5c3de47699883316ab7ef9c3ae2f4a57d08d1ecc41e1d7753a8c04e735cbef995b7f44ac252d26d32c897e8e80965eb202bed3fae87b777f840d1f00cb788854e08f5eb4ab240c7d3ce2d41c532f19ad30f39ab7d1ce280ccb8bd5247d0c54cd066c11f5cbd9d78ac757719abe8f3bbdcc3a699d38ec9d32504f857c6cc8946e8ae67437a1c46e5e21fea38a91a5f357cb948feb892945926e23fd66d0ecfa6d0a5dc8a53ffa69230fda9adcd0cdfabfe56333a07ab711abe5ee78091c64ac2ea1418bf2f4f4f215ccab23123c553f82bc75324147cd0ce30393acf07bb355efca25272bba208d5fe7bd65317c3497d4e1ab551dbdd75fa6c0afaaab616983b5db9a343067a1c5f41a74c119d2635eade24e52cc6a5df7256256f00fb6e6b00075bd4570aafe4d382119d5d2aa90b9c6f170ca0a807678227bd953a6d5193a775e70c64d49a17512042fb9d1515be8bded24c25f63548670221598cb295a28dbd5a7bba55603cf0e928eb00aa1d3a10a00050885786b55bf4c80f786ee1d90e904940aa2f38c3d2ac806a2e119ee9dac6936645d2f81af80224060f23d8107f95d8cad4e55e39da0f66a7497150283508b5044bae24faa8bca6fe$c1459799ddcfb4f0da35506f7cb3bef58aceba2d$3", "openwall12345"},
{"$openbsd-softraid$8192$2fada40a4b317c3f8829abc2d046fa33fefc73dab99cc0f68577598ff5d673892145b48afe8eee76baaee531097bb60c6888e74097d56530738c6f572dc1a4e603c7e89631cfcfda6bb8bdcdf681960037c05c8e729b4ecd0c247b6cd504c27002cd8356deadb38c628104e307176218371afac51f7382b7dad8cfb7f65b509a$36ce839df6aaea0b274bed8e15783f57de169fe0361c924e0ad6466c6db52cf41fd07f3848b58e15c5e1647e1b9c513c83b4e09f46e3975d08ec615c74b2b722a7f6f230a94a127a278baaa15b911ad986e7ecc5026f3f012a12ad1b20c1490249ff83c3a782732017e7897e304f4c662b04d47b46b64abc11a98c953220f4210df74351d667a9a12d18883f4e309d1c694488c0cfa817afe354624333172b74d1f7e504d64643106d40cc35059276058b6d653faad6463458529890e109ec4313e3be4782ffb761d0444cf4096e54b663bb2cfe219aed10db7f2dc00aef69b34262cd0ae9c0a59f47c74665adf0a210e78e3b16d57aba9eba2bfc519648ac60941e0d736d6b30b52d8c6ae254127b6c156c5fa0388b3a90a592f26b8a9b66032ebaf2ca536fb8696b2dd4ae1d45488dd6c52bb94777ef2840fda049e0a7bb8b860f1a3b6ca5595076942ca985eefe4c1f97ddc6d16e48627f9f2e2aa31c965e67adf2c7594fcda50f8dde15bcb7045b0bae24b57da6f88154e268ffa68378ef424a5a2b988628c7630c59cd56157ea4e653614f8e55427e39d37d07b43647f8c1bafc6c2bc1c00cb37c13fa25e5205ef2f03b9613f8e4baafc48ad2d0b3feee9cd4807984e9e0650e9f639a45ce165e0478422d1e2aad9a3b056876be4b5219467149599dcc352fd8542ce28dc4f1350713b774c65e3bec345f1925a68afb71688fd683beb7a099f4ac13305abc4e2f40efb3c4147ca8b1b5b58cea0eeee71d2884e96e8c88214c86d326f56a6ede525bccff75784857fab35db71ab2cc6d06017fbc43a5e760cdf83608c47b28c98acfbff8a299ec222231aac7064768220414e829fc464f06b09bf0ef74f8eb356abeb253aaa3d6ee916bc19eacfff70fdc9b2f041bcacf107c8f4ccca06be2ec3fbc2c5734399f160e1f964feba8fce796f20c9c4899ca3e638d12c9a30d06f6cd19fb613be75146053dcfe008a6ff2324440695e70145b1c063fe594ad37f0197b4785cdd81f2de8776d8d7eafbe0bf3533c56dd3fd77456a087b17ce29a15b1df5b2129ba9ef54ffc19b67eb0c6c34024cc3f0fbbf3f81489d10a7d28348ceca62f28b040c9bc32fb9771fbf8ddb5ef34677c13f693318090d4d66b6f507817485ef438072892b10d20982ce778fe410699b4e4ab44647a2cf3af34d465784fbf22a387809adb60248d663994cf68d5ce12278ae3421c48874808c035a445cafcee5be55e03b1c744c12a29327e3ff2cb5e78f8ccb00a2bef88e122b1381a459351bd45260ffb7b6be334719551750c313adf3a9b777095386e78786837e05c4c56fb5093eef9ba8365a103c78e36755619e60ad6781d4d9d1c2b7ee7b471d4c41e525e08d3a60c98916005c13dbb0afe8c4ccf2160390e7a955f208fcf9d59cdaf28ef917b25f5158755288a6f52ad46bf96e7af2a02a35d582ae8c412810355f6973ee9f99d28112374bbf11dd7fd27f7ff48703b85b1167061555a89a66bfbe89f981ae75171a344c66805e83c4bb306cb91e2b6f05eb2477838724fdef79571584d5b5577edf389b437db2e0e6008c4534b37f925aef1168c53bdbeaeb7538ca0f145db4d24e50af54b5019b7e201480c3869b11aad7ea786c130c9c14de99be26ab22fae57b5dff9d1159fe10188c0a5ed36a442422fd4e8805da3c6dbc16d5434ad2006638ec81904dc34a0f168b76874938ffcc7f1995388362c26259a6765b278895f33999e3d9ca4b4f64862cb99ae14ffb62e71ccf8ec722b65c1b334c44977bc99f5c7036f4fecc30249b5ee1f8ad444164a6c9055915c6454a09a4381874449dfaf9e76711c62e57ef4d247c77dae1cdd9329762b5ae694c7fb9ff8fc97a852e7b189a2423ff72c3c52782a98dea8d9a07a8481062a47762c771d5de93fd37416de39e9337f458ea682147a2f0359512e0d74ce5699563f2a93f88ea5be99e7e4948234e03a015d0c7eeba58e071e98cfeaf4d1dafa06171dc49f16df68cd2a2b280e53c588d91e298bb9de26408e6eebee1e5e55430947109c3c6b85d4919deab602f2b2e156565cbf8f833d7c208ab65f68c1fd4915f06c5034e05b027f0aec1907cca270b07db27aef70aa1f863341eac73f40602213cafa315eb7b04955e552efbdeb6e81b473977a04a92f723dc192b2c0a473b7c418e3fa7e9959be7eb1f801aabaea65e25b6e3035b9f4f6afb57d964d1bda864fad2733e645c11e602d430bd7061c1330b01c4313bc25c9c4b5605828f577f5aebd63482105344b9123781783d79a1e3a758482285b6cab6aabaa78cf1ada7767f717f2a3733de149909d997b3039c0de458bd3806267c63477ec77105266242a85f4127a03dd6fcaaff6e93089468e60c6fe8838c10b9db82c6810c9d11f63ecfb9c8679cedf70b58208274b9d35ac10c8ab85dca8cd0984487ddc923245a78d4c3359f6abb48d4540e7834e85fa290cc7ee12d188d94d816879015ef06383f9bb574fbf76e6ed2fefabe35f1f6ae41942b0229eed7c50df51a79f9566835e324b4450d1e7ec4ca9fc78450370cab0cb65795be6d7ac3e71e1bcab4f28d589ac0f99e0318fbbd96aa30d478a6204888851a66b3a9ebc85e1e9194d59d15a71658f5fac3ce1a0e99307aaebde657332fa0a2645c5c430b2900726fa245620ae90a7eb3f718f7d576c9d0381f97dad43c7a02644f66966ce68202724d141a5876b7b33dbf65c9ab80000c886128ba8a18f5563b08771979031bc9f8617288a3ce0a9cb1d8cdab427fd8389e16c11b6a7724473658e98b56fb9e7b88120a6dffe198d3b3b5225d0a05132705dc8db6400e1507cc29cdbb9a78412cdb8a4f6bf775000d189a277efddac02dd299e05e3255dba148$c9ed11dcd424349ff64092492e4ff7357ab4d239$1", "openwall123"},
{NULL}
};
static char (*key_buffer)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static struct custom_salt {
unsigned int num_iterations;
unsigned char salt[OPENBSD_SOFTRAID_SALTLENGTH];
unsigned char masked_keys[OPENBSD_SOFTRAID_KEYLENGTH * OPENBSD_SOFTRAID_KEYS];
int kdf_type;
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
key_buffer = mem_calloc(sizeof(*key_buffer), self->params.max_keys_per_crypt);
crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt);
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(key_buffer);
}
static int valid(char* ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
char *p;
int kdf_type;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN;
if ((p = strtokm(ctcopy, "$")) == NULL)
goto err;
if (!isdec(p)) /* iterations */
goto err;
if ((p = strtokm(NULL, "$")) == NULL)
goto err;
if (strlen(p) != 2 * 128) /* salt */
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL)
goto err;
if (strlen(p) != 2 * 32 * 64) /* masked keys */
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL)
goto err;
if (strlen(p) != 2 * BINARY_SIZE) /* HMAC-SHA1 */
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "$")) != NULL) { /* kdf type */
if (strlen(p) != 1)
goto err;
if (!isdec(p))
goto err;
kdf_type = atoi(p);
if (kdf_type !=1 && kdf_type != 3)
goto err;
}
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static void* get_salt(char *ciphertext)
{
static struct custom_salt cs;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
memset(&cs, 0, sizeof(cs));
ctcopy += FORMAT_TAG_LEN;
p = strtokm(ctcopy, "$"); /* iterations */
cs.num_iterations = atoi(p);
p = strtokm(NULL, "$"); /* salt */
for (i = 0; i < OPENBSD_SOFTRAID_SALTLENGTH ; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "$"); /* masked keys */
for (i = 0; i < OPENBSD_SOFTRAID_KEYLENGTH * OPENBSD_SOFTRAID_KEYS; i++)
cs.masked_keys[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "$"); /* binary hash */
p = strtokm(NULL, "$"); /* kdf type */
if (p)
cs.kdf_type = atoi(p);
else
cs.kdf_type = 1;
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p, *cc = NULL;
int i;
p = strrchr(ciphertext, '$') + 1;
if (strlen(p) == 1) { // hack, last field is kdf type
cc = strdup(ciphertext);
cc[strlen(ciphertext) - 2] = 0;
p = strrchr(cc, '$') + 1;
}
for (i = 0; i < BINARY_SIZE; i++) {
out[i] = (atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
if (cc)
MEM_FREE(cc);
return out;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
AES_KEY akey;
unsigned char mask_key[MAX_KEYS_PER_CRYPT][32];
unsigned char unmasked_keys[OPENBSD_SOFTRAID_KEYLENGTH * OPENBSD_SOFTRAID_KEYS];
unsigned char hashed_mask_key[20];
int i, j;
/* derive masking key from password */
if (cur_salt->kdf_type == 1) {
#ifdef SSE_GROUP_SZ_SHA1
int lens[SSE_GROUP_SZ_SHA1];
unsigned char *pin[SSE_GROUP_SZ_SHA1], *pout[SSE_GROUP_SZ_SHA1];
for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) {
lens[i] = strlen(key_buffer[index+i]);
pin[i] = (unsigned char*)key_buffer[index+i];
pout[i] = mask_key[i];
}
pbkdf2_sha1_sse((const unsigned char **)pin, lens,
cur_salt->salt, OPENBSD_SOFTRAID_SALTLENGTH,
cur_salt->num_iterations, (unsigned char**)pout,
32, 0);
#else
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
pbkdf2_sha1((const unsigned char*)(key_buffer[index+i]),
strlen(key_buffer[index+i]),
cur_salt->salt, OPENBSD_SOFTRAID_SALTLENGTH,
cur_salt->num_iterations, mask_key[i],
32, 0);
}
#endif
} else if (cur_salt->kdf_type == 3) {
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
bcrypt_pbkdf((const char*)key_buffer[index+i],
strlen(key_buffer[index+i]),
cur_salt->salt, OPENBSD_SOFTRAID_SALTLENGTH,
mask_key[i], 32, cur_salt->num_iterations);
}
}
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
/* decrypt sector keys */
AES_set_decrypt_key(mask_key[i], 256, &akey);
for (j = 0; j < (OPENBSD_SOFTRAID_KEYLENGTH * OPENBSD_SOFTRAID_KEYS) / 16; j++) {
AES_decrypt(&cur_salt->masked_keys[16*j], &unmasked_keys[16*j], &akey);
}
/* get SHA1 of mask_key */
SHA1(mask_key[i], 32, hashed_mask_key);
hmac_sha1(hashed_mask_key, OPENBSD_SOFTRAID_MACLENGTH,
unmasked_keys, OPENBSD_SOFTRAID_KEYLENGTH * OPENBSD_SOFTRAID_KEYS,
(unsigned char*)crypt_out[index+i], 20);
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (*(uint32_t*)binary == *(uint32_t*)(crypt_out[index]))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return (*(uint32_t*)binary == *(uint32_t*)(crypt_out[index]));
}
static int cmp_exact(char *source, int index)
{
void *bin = get_binary(source);
return !memcmp(bin, crypt_out[index], 20);
}
static void jtr_set_key(char* key, int index)
{
strnzcpyn(key_buffer[index], key, sizeof(*key_buffer));
}
static char *get_key(int index)
{
return key_buffer[index];
}
/* report kdf type as tunable cost */
static unsigned int get_kdf_type(void *salt)
{
return ((struct custom_salt*)salt)->kdf_type;
}
/* report iteration count as tunable cost */
static unsigned int get_iteration_count(void *salt)
{
return ((struct custom_salt*)salt)->num_iterations;
}
struct fmt_main fmt_openbsd_softraid = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{
"kdf",
"iteration count",
},
{ FORMAT_TAG },
tests_openbsdsoftraid
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
get_kdf_type,
get_iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
jtr_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif
|
dotProduct.h | #pragma once
#include <vector>
#include <algorithm>
#include <memory>
#include <omp.h>
#include "_cuda.h"
#include "ceilDiv.h"
#include "sum.h"
using std::vector;
using std::unique_ptr;
using std::max;
// Finds sum of element-by-element product of 2 vectors.
template <class T>
T dotProduct(T *x, T *y, int N) {
T a = T();
for (int i=0; i<N; i++)
a += x[i] * y[i];
return a;
}
template <class T>
T dotProduct(vector<T>& x, vector<T>& y) {
return dotProduct(x.data(), y.data(), x.size());
}
template <class T>
T dotProductOmp(T *x, T *y, int N) {
T a = T();
#pragma omp parallel for reduction (+:a)
for (int i=0; i<N; i++)
a += x[i] * y[i];
return a;
}
template <class T>
T dotProductOmp(vector<T>& x, vector<T>& y) {
return dotProductOmp(x.data(), y.data(), x.size());
}
template <class T>
__device__ T dotProductKernelLoop(T *x, T *y, int N, int i, int DI) {
T a = T();
for (; i<N; i+=DI)
a += x[i] * y[i];
return a;
}
template <class T>
__global__ void dotProductKernel(T *a, T *x, T *y, int N) {
DEFINE(t, b, B, G);
__shared__ T cache[_THREADS];
cache[t] = dotProductKernelLoop(x, y, N, B*b+t, G*B);
sumKernelReduce(cache, B, t);
if (t == 0) a[b] = cache[0];
}
template <class T>
T dotProductCuda(T *x, T *y, int N) {
int threads = _THREADS;
int blocks = min(ceilDiv(N, threads), _BLOCKS);
size_t X1 = N * sizeof(T);
size_t A1 = blocks * sizeof(T);
unique_ptr<T> a(new T[A1]);
T *xD, *yD, *aD;
TRY( cudaMalloc(&xD, X1) );
TRY( cudaMalloc(&yD, X1) );
TRY( cudaMalloc(&aD, A1) );
TRY( cudaMemcpy(xD, x, X1, cudaMemcpyHostToDevice) );
TRY( cudaMemcpy(yD, y, X1, cudaMemcpyHostToDevice) );
dotProductKernel<<<blocks, threads>>>(aD, xD, yD, N);
TRY( cudaMemcpy(a.get(), aD, A1, cudaMemcpyDeviceToHost) );
TRY( cudaFree(yD) );
TRY( cudaFree(xD) );
TRY( cudaFree(aD) );
return sum(a.get(), blocks);
}
template <class T>
T dotProductCuda(vector<T>& x, vector<T>& y) {
return dotProductCuda(x.data(), y.data(), x.size());
}
|
plot.h | #ifndef OPENMC_PLOT_H
#define OPENMC_PLOT_H
#include <unordered_map>
#include <sstream>
#include "pugixml.hpp"
#include "xtensor/xarray.hpp"
#include "hdf5.h"
#include "openmc/position.h"
#include "openmc/constants.h"
#include "openmc/cell.h"
#include "openmc/error.h"
#include "openmc/geometry.h"
#include "openmc/particle.h"
#include "openmc/xml_interface.h"
#include "openmc/random_lcg.h"
namespace openmc {
//===============================================================================
// Global variables
//===============================================================================
class Plot;
namespace model {
extern std::vector<Plot> plots; //!< Plot instance container
extern std::unordered_map<int, int> plot_map; //!< map of plot ids to index
extern uint64_t plotter_prn_seeds[N_STREAMS]; // Random number seeds used for plotter
extern int plotter_stream; // Stream index used by the plotter
} // namespace model
//===============================================================================
// RGBColor holds color information for plotted objects
//===============================================================================
struct RGBColor {
//Constructors
RGBColor() : red(0), green(0), blue(0) { };
RGBColor(const int v[3]) : red(v[0]), green(v[1]), blue(v[2]) { };
RGBColor(int r, int g, int b) : red(r), green(g), blue(b) { };
RGBColor(const std::vector<int> &v) {
if (v.size() != 3) {
throw std::out_of_range("Incorrect vector size for RGBColor.");
}
red = v[0];
green = v[1];
blue = v[2];
}
bool operator ==(const RGBColor& other) {
return red == other.red && green == other.green && blue == other.blue;
}
// Members
uint8_t red, green, blue;
};
// some default colors
const RGBColor WHITE {255, 255, 255};
const RGBColor RED {255, 0, 0};
typedef xt::xtensor<RGBColor, 2> ImageData;
struct IdData {
// Constructor
IdData(size_t h_res, size_t v_res);
// Methods
void set_value(size_t y, size_t x, const Particle& p, int level);
void set_overlap(size_t y, size_t x);
// Members
xt::xtensor<int32_t, 3> data_; //!< 2D array of cell & material ids
};
struct PropertyData {
// Constructor
PropertyData(size_t h_res, size_t v_res);
// Methods
void set_value(size_t y, size_t x, const Particle& p, int level);
void set_overlap(size_t y, size_t x);
// Members
xt::xtensor<double, 3> data_; //!< 2D array of temperature & density data
};
enum class PlotType {
slice = 1,
voxel = 2
};
enum class PlotBasis {
xy = 1,
xz = 2,
yz = 3
};
enum class PlotColorBy {
cells = 0,
mats = 1
};
//===============================================================================
// Plot class
//===============================================================================
class PlotBase {
public:
template<class T> T get_map() const;
// Members
public:
Position origin_; //!< Plot origin in geometry
Position width_; //!< Plot width in geometry
PlotBasis basis_; //!< Plot basis (XY/XZ/YZ)
std::array<size_t, 3> pixels_; //!< Plot size in pixels
bool color_overlaps_; //!< Show overlapping cells?
int level_; //!< Plot universe level
};
template<class T>
T PlotBase::get_map() const {
size_t width = pixels_[0];
size_t height = pixels_[1];
// get pixel size
double in_pixel = (width_[0])/static_cast<double>(width);
double out_pixel = (width_[1])/static_cast<double>(height);
// size data array
T data(width, height);
// setup basis indices and initial position centered on pixel
int in_i, out_i;
Position xyz = origin_;
switch(basis_) {
case PlotBasis::xy :
in_i = 0;
out_i = 1;
break;
case PlotBasis::xz :
in_i = 0;
out_i = 2;
break;
case PlotBasis::yz :
in_i = 1;
out_i = 2;
break;
default:
UNREACHABLE();
}
// set initial position
xyz[in_i] = origin_[in_i] - width_[0] / 2. + in_pixel / 2.;
xyz[out_i] = origin_[out_i] + width_[1] / 2. - out_pixel / 2.;
// arbitrary direction
Direction dir = {0.7071, 0.7071, 0.0};
#pragma omp parallel
{
Particle p;
p.r() = xyz;
p.u() = dir;
p.coord_[0].universe = model::root_universe;
int level = level_;
int j{};
#pragma omp for
for (int y = 0; y < height; y++) {
p.r()[out_i] = xyz[out_i] - out_pixel * y;
for (int x = 0; x < width; x++) {
p.r()[in_i] = xyz[in_i] + in_pixel * x;
p.n_coord_ = 1;
// local variables
bool found_cell = find_cell(&p, 0);
j = p.n_coord_ - 1;
if (level >=0) {j = level + 1;}
if (found_cell) {
data.set_value(y, x, p, j);
}
if (color_overlaps_ && check_cell_overlap(&p, false)) {
data.set_overlap(y, x);
}
} // inner for
} // outer for
} // omp parallel
return data;
}
class Plot : public PlotBase {
public:
// Constructor
Plot(pugi::xml_node plot);
// Methods
private:
void set_id(pugi::xml_node plot_node);
void set_type(pugi::xml_node plot_node);
void set_output_path(pugi::xml_node plot_node);
void set_bg_color(pugi::xml_node plot_node);
void set_basis(pugi::xml_node plot_node);
void set_origin(pugi::xml_node plot_node);
void set_width(pugi::xml_node plot_node);
void set_universe(pugi::xml_node plot_node);
void set_default_colors(pugi::xml_node plot_node);
void set_user_colors(pugi::xml_node plot_node);
void set_meshlines(pugi::xml_node plot_node);
void set_mask(pugi::xml_node plot_node);
void set_overlap_color(pugi::xml_node plot_node);
// Members
public:
int id_; //!< Plot ID
PlotType type_; //!< Plot type (Slice/Voxel)
PlotColorBy color_by_; //!< Plot coloring (cell/material)
int meshlines_width_; //!< Width of lines added to the plot
int index_meshlines_mesh_ {-1}; //!< Index of the mesh to draw on the plot
RGBColor meshlines_color_; //!< Color of meshlines on the plot
RGBColor not_found_ {WHITE}; //!< Plot background color
RGBColor overlap_color_ {RED}; //!< Plot overlap color
std::vector<RGBColor> colors_; //!< Plot colors
std::string path_plot_; //!< Plot output filename
};
//===============================================================================
// Non-member functions
//===============================================================================
//! Add mesh lines to image data of a plot object
//! \param[in] plot object
//! \param[out] image data associated with the plot object
void draw_mesh_lines(Plot pl, ImageData& data);
//! Write a ppm image to file using a plot object's image data
//! \param[in] plot object
//! \param[out] image data associated with the plot object
void output_ppm(Plot pl, const ImageData& data);
//! Initialize a voxel file
//! \param[in] id of an open hdf5 file
//! \param[in] dimensions of the voxel file (dx, dy, dz)
//! \param[out] dataspace pointer to voxel data
//! \param[out] dataset pointer to voxesl data
//! \param[out] pointer to memory space of voxel data
void voxel_init(hid_t file_id, const hsize_t* dims, hid_t* dspace,
hid_t* dset, hid_t* memspace);
//! Write a section of the voxel data to hdf5
//! \param[in] voxel slice
//! \param[out] dataspace pointer to voxel data
//! \param[out] dataset pointer to voxesl data
//! \param[out] pointer to data to write
void voxel_write_slice(int x, hid_t dspace, hid_t dset,
hid_t memspace, void* buf);
//! Close voxel file entities
//! \param[in] data space to close
//! \param[in] dataset to close
//! \param[in] memory space to close
void voxel_finalize(hid_t dspace, hid_t dset, hid_t memspace);
//===============================================================================
// External functions
//===============================================================================
//! Read plot specifications from a plots.xml file
void read_plots_xml();
//! Create a ppm image for a plot object
//! \param[in] plot object
void create_ppm(Plot pl);
//! Create an hdf5 voxel file for a plot object
//! \param[in] plot object
void create_voxel(Plot pl);
//! Create a randomly generated RGB color
//! \return RGBColor with random value
RGBColor random_color();
} // namespace openmc
#endif // OPENMC_PLOT_H
|
homomorphic_comparison_modified.c | struct plaintext_keyword
{
int bits[TABLE_CONTENT_SIZE][1024];
};
struct plaintext_keyword init_keyword(struct plaintext_keyword ptext)
{
int i, j;
for(j=0; j<TABLE_CONTENT_SIZE; j++)
for(i=0; i<1024; i++)
ptext.bits[j][i]=0;
return(ptext);
}
struct plaintext
{
int bits[TABLE_CONTENT_SIZE][1024];
};
struct plaintext init_plaintext(struct plaintext ptext)
{
int i, j;
for(j=0; j<TABLE_CONTENT_SIZE; j++)
for(i=0; i<1024; i++)
ptext.bits[j][i]=0;
return(ptext);
}
struct encrypted_bit
{
long long int c0[5][1024], c1[5][1024];
};
struct encrypted_keyword
{
struct encrypted_bit bits[TABLE_CONTENT_SIZE];
};
struct encrypted_data
{
struct encrypted_bit bits[TABLE_CONTENT_SIZE];
};
struct encrypted_data_8bit
{
struct encrypted_bit bits[5];
};
struct window_table_entry
{
struct encrypted_bit bits[2];
};
struct window_table
{
struct window_table_entry window_table_entries[32];
};
void copy_encrypted_bit(struct encrypted_bit *bit_in, struct encrypted_bit *bit_out)
{
int i, j;
for(i=0; i<5; i++) // copy cout in bit_in
{
for(j=0; j<1024; j++)
{
bit_out->c0[i][j] = bit_in->c0[i][j];
bit_out->c1[i][j] = bit_in->c1[i][j];
}
}
}
void copy_encrypted_keyword(struct encrypted_keyword *in, struct encrypted_keyword *out)
{
int k;
for(k=0; k<TABLE_CONTENT_SIZE; k++)
copy_encrypted_bit(&in->bits[k], &out->bits[k]);
}
void copy_encrypted_data(struct encrypted_data *in, struct encrypted_data *out)
{
int k;
for(k=0; k<TABLE_CONTENT_SIZE; k++)
copy_encrypted_bit(&in->bits[k], &out->bits[k]);
}
struct encrypted_data ed_const_one, ed_const_allone;
struct encrypted_keyword encrypt_keyword(struct plaintext_keyword ptext, struct encrypted_keyword ed)
{
int i;
for(i=0; i<TABLE_CONTENT_SIZE; i++)
FV_enc_q(ptext.bits[i], ed.bits[i].c0, ed.bits[i].c1);
return(ed);
}
struct encrypted_data encrypt_data(struct plaintext ptext, struct encrypted_data ed)
{
int i;
for(i=0; i<TABLE_CONTENT_SIZE; i++)
FV_enc_q(ptext.bits[i], ed.bits[i].c0, ed.bits[i].c1);
return(ed);
}
struct plaintext_keyword decrypt_keyword(struct plaintext_keyword ptext, struct encrypted_keyword ed)
{
int i;
for(i=0; i<10; i++)
FV_dec_q(ptext.bits[i], ed.bits[i].c0, ed.bits[i].c1);
return(ptext);
}
void decrypt_data(struct encrypted_data ed, unsigned char decoded_string[])
{
int i;
struct plaintext ptext;
init_plaintext(ptext);
unsigned char decoded_char = 0;
for(i=TABLE_CONTENT_SIZE-1; i>=0; i--)
{
FV_dec_q(ptext.bits[0], ed.bits[i].c0, ed.bits[i].c1);
decoded_char = 2*decoded_char + ptext.bits[0][0];
if(i%8==0)
{
decoded_string[i/8] = decoded_char;
decoded_char = 0;
}
}
}
struct encrypted_bit encryption_of_bit_zero, encryption_of_bit_one;
struct window_table wt;
void homomorphic_search_precomputation(struct encrypted_keyword ed, int index)
{
int i, j, bit;
struct encrypted_data_8bit window; // 8-bit windows are formed for all possible combinations of the table_entry bits
struct encrypted_data_8bit result; // is the addition of (window + ed_window)
struct encrypted_bit multiplication_result; // is the multiplication of the bits of result
struct encrypted_data_8bit window_ed;
int thread_num;
// copy of the bits of ed in window_ed.
for(i=0; i<5; i++)
copy_encrypted_bit(&ed.bits[5*index+i], &window_ed.bits[i]);
//#pragma omp parallel for private(j, bit, window, result, multiplication_result)
for(i=0; i<32; i++)
{
for(j=0; j<5; j++)
{
bit = (i>>j)%2;
if(bit==0) copy_encrypted_bit(&encryption_of_bit_zero, &window.bits[j]);
else copy_encrypted_bit(&encryption_of_bit_one, &window.bits[j]);
}
for(j=0; j<5; j++)
{
FV_add(window_ed.bits[j].c0, window_ed.bits[j].c1, window.bits[j].c0, window.bits[j].c1, result.bits[j].c0, result.bits[j].c1);
if(IMPLEMENTATION_TYPE==0)
FV_recrypt1_HW(result.bits[j].c0, result.bits[j].c1);
else
FV_recrypt1(result.bits[j].c0, result.bits[j].c1);
}
for(j=1; j<5; j++)
{
FV_mul(result.bits[j-1].c0, result.bits[j-1].c1, result.bits[j].c0, result.bits[j].c1, multiplication_result.c0, multiplication_result.c1);
if(IMPLEMENTATION_TYPE==0)
FV_recrypt1_HW(multiplication_result.c0, multiplication_result.c1);
else
FV_recrypt1(multiplication_result.c0, multiplication_result.c1);
if(j<4) copy_encrypted_bit(&multiplication_result, &result.bits[j]);
}
copy_encrypted_bit(&multiplication_result, &wt.window_table_entries[i].bits[index]);
}
}
struct encrypted_data homomorphic_search(struct encrypted_keyword keyword)
{
int index;
int thread_num;
int i, j, k, window_index0, window_index1, window_index2, window_index3;
unsigned int table_content;
unsigned char table_row[GENOMIC_STRING_LENGTH];
struct encrypted_bit multiplication_result0, multiplication_result1; // is the multiplication of the bits of result
struct encrypted_data acc[THREADS]; // this accumulates the sum of encrypted contents
struct encrypted_data acc_sum;
printf("\n[SERVER] Performing encrypted search ...\n");
for(index=0; index<2; index++)
homomorphic_search_precomputation(keyword, index);
//printf("Precomp done\n");
struct plaintext ptext;
for(i=0; i<THREADS; i++)
{
//printf("i=%d\n", i);
init_plaintext(ptext);
acc[i] = encrypt_data(ptext, acc[i]);
}
ptext=init_plaintext(ptext);
acc_sum = encrypt_data(ptext, acc_sum);
#pragma omp parallel
i = omp_get_num_threads();
//printf("num of threads %d\n", i);
// searching starts with the table entries
#pragma omp parallel for private(window_index0, window_index1, window_index2, window_index3, multiplication_result0, multiplication_result1, table_content, j, k, thread_num, table_row)
for(i=0; i<256*4; i++) // assuming 40 threads, this boundary 16384 is a multiple of 4
{
thread_num = omp_get_thread_num();
window_index0 = i & 31;
window_index1 = (i>>5) & 31;
// multiplication_result0 = window_index0 & window_index1
FV_mul(wt.window_table_entries[window_index1].bits[1].c0, wt.window_table_entries[window_index1].bits[1].c1,
wt.window_table_entries[window_index0].bits[0].c0, wt.window_table_entries[window_index0].bits[0].c1,
multiplication_result0.c0, multiplication_result0.c1);
table(i, table_row);
for(j=0; j<GENOMIC_STRING_LENGTH; j++)
{
for(k=0; k<8; k++)
{
if( (table_row[j]>>k)%2 == 1 )
FV_add( acc[thread_num].bits[j*8+k].c0, acc[thread_num].bits[j*8+k].c1,
multiplication_result0.c0, multiplication_result0.c1,
acc[thread_num].bits[j*8+k].c0, acc[thread_num].bits[j*8+k].c1);
}
}
}
for(i=0; i<THREADS; i++)
{
for(j=0; j<TABLE_CONTENT_SIZE; j++)
{
FV_add(acc[i].bits[j].c0, acc[i].bits[j].c1, acc_sum.bits[j].c0, acc_sum.bits[j].c1, acc_sum.bits[j].c0, acc_sum.bits[j].c1);
}
}
printf("\n[SERVER] Encrypted Search done\n");
return(acc_sum);
}
|
matrix_op-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file matrix_op-inl.h
* \brief Function definition of matrix related operators
*/
#ifndef MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
#define MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
#include <mxnet/operator_util.h>
#include <vector>
#include <algorithm>
#include <utility>
#include <type_traits>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "../channel_op_common.h"
#include "../mxnet_op.h"
#include "broadcast_reduce_op.h"
#include "./init_op.h"
#include "../../common/static_array.h"
#include "./slice-inl.h"
#if MXNET_USE_CUDA
#include <thrust/device_vector.h>
#endif
#ifdef __CUDACC__
#include "./pseudo2DTranspose_op-inl.cuh"
#endif
namespace mxnet {
namespace op {
struct ReshapeParam : public dmlc::Parameter<ReshapeParam> {
mxnet::TShape target_shape;
bool keep_highest;
mxnet::Tuple<int> shape;
bool reverse;
DMLC_DECLARE_PARAMETER(ReshapeParam) {
DMLC_DECLARE_FIELD(shape)
.set_default(mxnet::Tuple<int>())
.describe("The target shape");
DMLC_DECLARE_FIELD(reverse)
.set_default(false)
.describe("If true then the special values are inferred from right to left");
DMLC_DECLARE_FIELD(target_shape)
.set_default(mxnet::TShape(0, -1))
.describe("(Deprecated! Use ``shape`` instead.) "
"Target new shape. One and only one dim can be 0, "
"in which case it will be inferred from the rest of dims");
DMLC_DECLARE_FIELD(keep_highest).set_default(false)
.describe("(Deprecated! Use ``shape`` instead.) Whether keep the highest dim unchanged."
"If set to true, then the first dim in target_shape is ignored,"
"and always fixed as input");
}
bool operator==(const ReshapeParam &other) const {
return this->target_shape == other.target_shape &&
this->keep_highest == other.keep_highest &&
this->shape == other.shape &&
this->reverse == other.reverse;
}
};
template<typename IType>
inline mxnet::TShape InferReshapeShape(const mxnet::Tuple<IType>& shape,
const mxnet::TShape& dshape, bool reverse) {
std::vector<IType> dshape_vec;
std::vector<IType> param_shape_vec(shape.begin(), shape.end());
for (int i = 0; i < dshape.ndim(); ++i) {
dshape_vec.push_back(dshape[i]);
}
std::vector<IType> tmp;
size_t src_idx = 0;
int inf_idx = -1;
if (reverse) {
std::reverse(dshape_vec.begin(), dshape_vec.end());
std::reverse(param_shape_vec.begin(), param_shape_vec.end());
}
auto dshape_len = dshape_vec.size();
auto params_len = param_shape_vec.size();
for (size_t i = 0; i < params_len; ++i) {
IType proposed_dim = param_shape_vec[i];
if (proposed_dim == 0) {
// keep same
CHECK_LT(src_idx, dshape_len);
tmp.push_back(dshape_vec[src_idx++]);
} else if (proposed_dim == -1) {
// infer
CHECK_LT(inf_idx, 0) << "One and only one dim can be inferred";
inf_idx = i;
tmp.push_back(1);
src_idx++;
} else if (proposed_dim == -2) {
// copy all remaining dims from source
while (src_idx < dshape_len) {
const int dn = dshape_vec[src_idx++];
tmp.push_back(dn);
}
} else if (proposed_dim == -3) {
// merge two dims from source
CHECK_LT(src_idx, dshape_len-1);
const int d1 = dshape_vec[src_idx++];
const int d2 = dshape_vec[src_idx++];
if (!mxnet::dim_size_is_known(d1) || !mxnet::dim_size_is_known(d2)) {
tmp.push_back(-1);
} else {
tmp.push_back(d1 * d2);
}
} else if (proposed_dim == -4) {
// split the source dim s into two dims
// read the left dim and then the right dim (either can be -1)
CHECK_LT(i + 2, params_len);
CHECK_LT(src_idx, dshape_len);
const int d0 = dshape_vec[src_idx++];
IType d1 = param_shape_vec[++i];
IType d2 = param_shape_vec[++i];
CHECK(d1 != -1 || d2 != -1) << "Split dims cannot both be -1.";
if (d1 == -1 && d0 >= 0) d1 = d0 / d2; // d0 must be known to do this
if (d2 == -1 && d0 >= 0) d2 = d0 / d1; // d0 must be known to do this
CHECK(d1 * d2 == static_cast<IType>(d0) || static_cast<IType>(d0) == IType(-1)) <<
"Split dims " << d1 << ", " << d2 << " do not divide original dim " << d0;
tmp.push_back(d1);
tmp.push_back(d2);
} else {
// greater than 0, new shape
tmp.push_back(proposed_dim);
src_idx++;
}
}
if (inf_idx >= 0) {
if (shape_is_known(dshape)) {
IType new_size = 1;
for (IType x : tmp) new_size *= x;
tmp[inf_idx] = dshape.Size() / new_size;
} else {
tmp[inf_idx] = -1;
}
}
if (reverse) {
std::reverse(param_shape_vec.begin(), param_shape_vec.end());
std::reverse(dshape_vec.begin(), dshape_vec.end());
std::reverse(tmp.begin(), tmp.end());
}
mxnet::TShape oshape(tmp.begin(), tmp.end());
return oshape;
}
inline bool ReverseReshapeInferShape(mxnet::TShape *in, const mxnet::TShape& out) {
if (shape_is_known(*in) && shape_is_known(out)) {
return true;
} else if (!shape_is_known(out)) {
return false;
} else {
int zero_axis = -1;
int known_dim_size_prod = 1;
for (int i = 0; i < in->ndim(); i++) {
if (!mxnet::dim_size_is_known(*in, i)) {
if (zero_axis != -1)
return false; // more than 1 zero found.
else
zero_axis = i;
} else {
known_dim_size_prod *= (*in)[i];
}
}
(*in)[zero_axis] = out.Size() / known_dim_size_prod;
return true;
}
}
inline bool ReshapeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const ReshapeParam& param_ = nnvm::get<ReshapeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape &dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape)) return false;
mxnet::TShape oshape;
if (param_.shape.ndim() != 0) {
oshape = InferReshapeShape(param_.shape, dshape, param_.reverse);
} else if (param_.target_shape.ndim() != -1) {
LOG(INFO) << "Using target_shape will be deprecated.";
oshape = param_.target_shape;
int neg_count = 0;
index_t inf_idx = 0;
index_t start_idx = param_.keep_highest ? 1 : 0;
if (param_.keep_highest) {
oshape[0] = dshape[0];
}
for (int i = start_idx; i < oshape.ndim(); ++i) {
if (oshape[i] == 0) {
neg_count++;
inf_idx = i;
}
}
if (neg_count == 1) {
oshape[inf_idx] = 1;
oshape[inf_idx] = dshape.Size() / oshape.Size();
}
} else {
return shape_is_known((*out_attrs)[0])
&& ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]);
}
ReverseReshapeInferShape(&dshape, oshape);
#if 0
CHECK_EQ(oshape.Size(), dshape.Size())
<< "Target shape size is different to source. "
<< "Target: " << oshape
<< "\nSource: " << dshape;
#endif
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]);
}
inline bool FlattenShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape &dshape = (*in_attrs)[0];
if (!shape_is_known(dshape)) return false;
int target_dim = 1;
for (int i = 1; i < dshape.ndim(); ++i) {
target_dim *= dshape[i];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::Shape2(dshape[0], target_dim));
return true;
}
struct TransposeParam : public dmlc::Parameter<TransposeParam> {
mxnet::TShape axes;
DMLC_DECLARE_PARAMETER(TransposeParam) {
DMLC_DECLARE_FIELD(axes).set_default(mxnet::TShape(0, -1))
.describe("Target axis order. By default the axes will be inverted.");
}
bool operator==(const TransposeParam &other) const {
return this->axes == other.axes;
}
};
/*!
* \brief This function performs transpose operation on a 2D matrix by utilizing the L1 cache
* \param in input tensor
* \param out output tensor
* \param row shape of dim 0 of input
* \param col shape of dim 1 of input
* \tparam DType Data type
* \tparam is_addto
*/
template<typename DType, bool is_addto>
MSHADOW_XINLINE void Transpose2D(const DType *in, DType *out, index_t row, index_t col) {
// ensure cache line hits and prevent cache miss for any configuration
// L1 cache size to be utilized = 32kb = 2^15
// Largest size of a single unit of any dtype <= 8 byte = 2^3
// Number of elements - (2^15/2^3) = 2^12
// Block-size - 2^6 v 2^6 (64 v 64)
// But we could leverage unrolling of for loops (for parallelization)
// Block-size - 2^5 v 2^5 (32 v 32) with potential 4 pragma for loop unrolled
// blocksize * blocksize * num_threads = cache_size / dtype_size
// Instead of explicit unroll, let compiler figure out optimal unroll factor
const index_t blocksize = 32;
// collapse 2 parallelizes 2 for loops
// inner 2 for loops aren't parallelized to prevent cache miss
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (index_t i = 0; i < row; i += blocksize) {
for (index_t j = 0; j < col; j += blocksize) {
// transpose the block
for (index_t a = j; (a < blocksize + j) && (a < col); ++a) {
for (index_t b = i; (b < blocksize + i) && (b < row); ++b) {
if (!is_addto) {
out[a * row + b] = in[b * col + a];
} else {
out[a * row + b] += in[b * col + a];
}
}
}
}
}
}
inline bool IsIdentityTranspose(const TShape& axes) {
for (dim_t i = 0; i < axes.ndim(); i++) {
if (axes[i] != i) return false;
}
return true;
}
template<typename xpu, bool is_addto = false>
void TransposeImpl(RunContext ctx,
const TBlob& src,
const TBlob& ret,
const mxnet::TShape& axes) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(src.type_flag_, ret.type_flag_);
// zero-size tensor, no need to compute
if (src.shape_.Size() == 0U) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
#ifdef __CUDACC__
// This transpose can be used only if there exist n and m such that:
// params = (0, ..., n-1, n+m, ..., params.size, n, ..., n+m-1)
// Example: (0, 2, 3, 1) or (0, 3, 1, 2), but not (0, 2, 1, 3).
if (isPseudo2DTranspose(axes)) {
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
transpose_pseudo2D<DType, is_addto>(ret, src, axes, s);
});
return;
}
#endif
// Special handle the identity case
if (IsIdentityTranspose(axes)) {
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
Tensor<xpu, 1, DType> in = src.get_with_shape<xpu, 1, DType>(mshadow::Shape1(src.Size()), s);
Tensor<xpu, 1, DType> out = ret.get_with_shape<xpu, 1, DType>(mshadow::Shape1(ret.Size()), s);
if (!is_addto) {
// Use memcpy to accelerate the speed
Copy(out, in, s);
} else {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, kAddTo>, xpu>::Launch(
s, ret.Size(), out.dptr_, in.dptr_);
}
});
return;
}
// Handle the general transpose case
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
switch (axes.ndim()) {
case 2: {
Tensor<xpu, 2, DType> in = src.get<xpu, 2, DType>(s);
Tensor<xpu, 2, DType> out = ret.get<xpu, 2, DType>(s);
if (ctx.get_ctx().dev_mask() == cpu::kDevMask) {
Transpose2D<DType, is_addto>(in.dptr_, out.dptr_, in.shape_[0], in.shape_[1]);
} else {
LOG(FATAL) << "Not Implemented. We should never reach here because the 2D case "
"in GPU has been covered by transpose_pseudo2D."
" Report an issue in Github.";
}
break;
}
case 3: {
Tensor<xpu, 3, DType> in = src.get<xpu, 3, DType>(s);
Tensor<xpu, 3, DType> out = ret.get<xpu, 3, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<3>());
} else {
out += transpose(in, axes.get<3>());
}
break;
}
case 4: {
Tensor<xpu, 4, DType> in = src.get<xpu, 4, DType>(s);
Tensor<xpu, 4, DType> out = ret.get<xpu, 4, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<4>());
} else {
out += transpose(in, axes.get<4>());
}
break;
}
case 5: {
Tensor<xpu, 5, DType> in = src.get<xpu, 5, DType>(s);
Tensor<xpu, 5, DType> out = ret.get<xpu, 5, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<5>());
} else {
out += transpose(in, axes.get<5>());
}
break;
}
case 6: {
Tensor<xpu, 6, DType> in = src.get<xpu, 6, DType>(s);
Tensor<xpu, 6, DType> out = ret.get<xpu, 6, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<6>());
} else {
out += transpose(in, axes.get<6>());
}
break;
}
default:
LOG(FATAL) << "Transpose support at most 6 dimensions";
break;
}
});
}
// matrix transpose
template<typename xpu>
void Transpose(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
if (req[0] == kNullOp) {
return;
}
const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed);
CHECK(req[0] == kWriteTo || req[0] == kAddTo)
<< "Transpose only supports kNullOp, kWriteTo and kAddTo";
mxnet::TShape axes;
if (param.axes.ndim() == 0) {
axes = mxnet::TShape(inputs[0].ndim(), -1);
for (int i = 0; i < axes.ndim(); ++i) {
axes[i] = axes.ndim() - 1 - i;
}
} else {
axes = common::CanonicalizeAxes(param.axes);
}
if (req[0] == kAddTo) {
TransposeImpl<xpu, true>(ctx.run_ctx, inputs[0], outputs[0], axes);
} else {
TransposeImpl<xpu, false>(ctx.run_ctx, inputs[0], outputs[0], axes);
}
}
inline bool TransposeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& shp = (*in_attrs)[0];
mxnet::TShape& out_shp = (*out_attrs)[0];
CHECK_LE(shp.ndim(), 6) << "Transpose support at most 6 dimensions";
if (shp.ndim() == -1 && out_shp.ndim() == -1)
return false; // none of the shapes is known
if (out_shp.ndim() >= 0 && shp.ndim() >= 0)
CHECK_EQ(out_shp.ndim(), shp.ndim());
mxnet::TShape get(std::max(shp.ndim(), out_shp.ndim()), -1);
mxnet::TShape ret(std::max(shp.ndim(), out_shp.ndim()), -1);
if (param.axes.ndim() == 0) {
for (int i = 0; i < shp.ndim(); ++i) {
ret[i] = shp[shp.ndim()-1-i];
}
for (int i = 0; i < out_shp.ndim(); ++i) {
get[shp.ndim()-1-i] = out_shp[i];
}
} else {
CHECK_EQ(std::max(shp.ndim(), out_shp.ndim()), param.axes.ndim());
for (int i = 0; i < shp.ndim(); ++i) {
CHECK(param.axes[i] < static_cast<int64_t>(shp.ndim()));
ret[i] = shp[param.axes[i]];
}
for (int i = 0; i < out_shp.ndim(); ++i) {
get[param.axes[i]] = out_shp[i];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, 0, get);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret);
return shape_is_known(ret);
}
struct ExpandDimParam : public dmlc::Parameter<ExpandDimParam> {
int axis;
DMLC_DECLARE_PARAMETER(ExpandDimParam) {
DMLC_DECLARE_FIELD(axis)
.describe("Position where new axis is to be inserted. Suppose that "
"the input `NDArray`'s dimension is `ndim`, the range of "
"the inserted axis is `[-ndim, ndim]`");
}
bool operator==(const ExpandDimParam &other) const {
return this->axis == other.axis;
}
};
inline bool ExpandDimShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const ExpandDimParam& param = nnvm::get<ExpandDimParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
if (!mxnet::ndim_is_known(in_attrs->at(0)) && !mxnet::ndim_is_known(out_attrs->at(0))) {
return false;
}
mxnet::TShape& ishape = (*in_attrs)[0];
mxnet::TShape& oshape = (*out_attrs)[0];
int indim = ishape.ndim();
bool unknown_ishape = false;
if (-1 == indim) {
indim = oshape.ndim() - 1;
unknown_ishape = true;
}
int axis = param.axis;
if (axis < 0) {
axis += indim + 1;
}
CHECK(axis >= 0 && axis <= indim)
<< "axis must be in the range [" << -indim << ", " << indim << "] ("
<< param.axis << " provided)";
mxnet::TShape ret(indim + 1, -1);
for (int i = 0; i < axis; ++i) {
ret[i] = (unknown_ishape? -1 : ishape[i]);
}
ret[axis] = 1;
for (int i = axis+1; i < indim+1; ++i) {
ret[i] = (unknown_ishape? -1 : ishape[i-1]);
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret);
ret = mxnet::TShape(indim, -1);
for (int i = 0; i < axis; ++i) ret[i] = oshape[i];
for (int i = axis+1; i < indim+1; ++i) ret[i-1] = oshape[i];
SHAPE_ASSIGN_CHECK(*in_attrs, 0, ret);
return shape_is_known(in_attrs->at(0)) && shape_is_known(out_attrs->at(0));
}
// Currently MKLDNN only supports step = 1 or step has no value
inline bool SupportMKLDNNSlice(const SliceParam& param) {
if (param.step.ndim() == 0U) return true;
for (int i = 0; i < param.step.ndim(); ++i) {
if (param.step[i].has_value() && param.step[i].value() != 1)
return false;
}
return true;
}
inline bool SliceForwardInferStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1);
CHECK_EQ(out_attrs->size(), 1);
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
const auto& in_stype = in_attrs->at(0);
auto& out_stype = out_attrs->at(0);
bool dispatched = false;
const auto dispatch_ex = DispatchMode::kFComputeEx;
// If step = 1, no need to fallback; otherwise fallback to dense
bool trivial_step = false;
if (param.step.ndim() == 0U) {
trivial_step = true;
} else if (param.step.ndim() == 1U
&& (!param.step[0].has_value() || param.step[0].value() == 1)) {
trivial_step = true;
}
if (in_stype == kDefaultStorage) {
#if MXNET_USE_MKLDNN == 1
if (dev_mask == Context::kCPU && MKLDNNEnvSet()
&& SupportMKLDNNSlice(param)) {
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, dispatch_ex);
}
#endif
if (!dispatched) {
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
}
if (!dispatched && in_stype == kCSRStorage && trivial_step) {
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched) {
dispatched = dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}
// slice the indptr of a csr
struct SliceCsrIndPtr {
template<typename IType>
MSHADOW_XINLINE static void Map(int i, IType* out, const IType* in, const IType* base) {
KERNEL_ASSIGN(out[i], kWriteTo, in[i] - *base);
}
};
/*
* a wrapper to launch SliceCsrIndPtr kernel.
* slice [src[begin] .. src[end]) and store in dst[0, end - begin)
*/
template<typename xpu, typename IType>
void SliceCsrIndPtrImpl(const int begin, const int end, RunContext ctx,
const IType* src, IType* dst) {
using namespace mshadow;
using namespace mxnet_op;
Stream<xpu> *s = ctx.get_stream<xpu>();
int indptr_len = end - begin + 1;
Kernel<SliceCsrIndPtr, xpu>::Launch(s, indptr_len, dst, src + begin, src + begin);
}
/*
* Slice a CSR NDArray for first dimension
*/
template<typename xpu>
void SliceDimOneCsrImpl(const mxnet::TShape &begin, const mxnet::TShape &end, const OpContext& ctx,
const NDArray &in, const NDArray &out) {
using namespace mshadow;
using namespace mxnet_op;
using namespace csr;
nnvm::dim_t begin_row = begin[0];
nnvm::dim_t end_row = end[0];
nnvm::dim_t indptr_len = end_row - begin_row + 1;
out.CheckAndAllocAuxData(kIndPtr, Shape1(indptr_len));
// assume idx indptr share the same type
MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIndPtr), RType, {
MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIdx), IType, {
MSHADOW_TYPE_SWITCH(in.dtype(), DType, {
RType* in_indptr = in.aux_data(kIndPtr).dptr<RType>();
RType* out_indptr = out.aux_data(kIndPtr).dptr<RType>();
SliceCsrIndPtrImpl<xpu, RType>(begin_row, end_row, ctx.run_ctx, in_indptr, out_indptr);
Stream<xpu> *s = ctx.get_stream<xpu>();
RType nnz = 0;
mshadow::Copy(Tensor<cpu, 1, RType>(&nnz, Shape1(1)),
Tensor<xpu, 1, RType>(out_indptr + indptr_len - 1, Shape1(1), s));
// return csr zeros if nnz = 0
if (nnz == 0) {
out.set_aux_shape(kIdx, Shape1(0));
return;
}
// copy indices and values
out.CheckAndAllocAuxData(kIdx, Shape1(nnz));
out.CheckAndAllocData(Shape1(nnz));
IType* in_idx = in.aux_data(kIdx).dptr<IType>();
IType* out_idx = out.aux_data(kIdx).dptr<IType>();
DType* in_data = in.data().dptr<DType>();
DType* out_data = out.data().dptr<DType>();
RType offset = 0;
mshadow::Copy(Tensor<cpu, 1, RType>(&offset, Shape1(1)),
Tensor<xpu, 1, RType>(in_indptr + begin_row, Shape1(1), s));
mshadow::Copy(Tensor<xpu, 1, IType>(out_idx, Shape1(nnz), s),
Tensor<xpu, 1, IType>(in_idx + offset, Shape1(nnz), s), s);
mshadow::Copy(Tensor<xpu, 1, DType>(out_data, Shape1(nnz), s),
Tensor<xpu, 1, DType>(in_data + offset, Shape1(nnz), s), s);
});
});
});
}
/*!
* \brief slice a CSRNDArray for two dimensions
*/
struct SliceDimTwoCsrAssign {
/*!
* \brief This function slices a CSRNDArray on axis one between begin_col and end_col
* \param i loop index
* \param out_idx output csr ndarray column indices
* \param out_data output csr ndarray data
* \param out_indptr output csr ndarray row index pointer
* \param in_idx input csr ndarray column indices
* \param in_data input csr ndarray data
* \param in_indptr input csr ndarray row index pointer
* \param begin_col begin column indice
* \param end_col end column indice
*/
template<typename IType, typename RType, typename DType>
MSHADOW_XINLINE static void Map(int i,
IType* out_idx, DType* out_data,
const RType* out_indptr,
const IType* in_idx, const DType* in_data,
const RType* in_indptr,
const int begin_col, const int end_col) {
RType ind = out_indptr[i];
for (RType j = in_indptr[i]; j < in_indptr[i+1]; j++) {
// indices of CSRNDArray are in ascending order per row
if (in_idx[j] >= end_col) {
break;
} else if (in_idx[j] >= begin_col) {
out_idx[ind] = in_idx[j] - begin_col;
out_data[ind] = in_data[j];
ind++;
}
}
}
};
/*
* Slice a CSR NDArray for two dimensions
*/
template<typename xpu>
void SliceDimTwoCsrImpl(const mxnet::TShape &begin, const mxnet::TShape &end, const OpContext& ctx,
const NDArray &in, const NDArray &out);
template<typename xpu>
void SliceCsrImpl(const SliceParam ¶m, const OpContext& ctx,
const NDArray &in, OpReqType req, const NDArray &out) {
if (req == kNullOp) return;
CHECK_NE(req, kAddTo) << "kAddTo for Slice on CSR input is not supported";
CHECK_NE(req, kWriteInplace) << "kWriteInplace for Slice on CSR input is not supported";
const mxnet::TShape ishape = in.shape();
const mxnet::TShape oshape = out.shape();
int N = ishape.ndim();
mxnet::TShape begin(N, -1), end(N, -1);
for (int i = 0; i < N; ++i) {
int s = 0;
if (i < param.begin.ndim() && param.begin[i]) {
s = *param.begin[i];
if (s < 0) s += ishape[i];
}
begin[i] = s;
end[i] = s + oshape[i];
}
switch (N) {
case 1: {
SliceDimOneCsrImpl<xpu>(begin, end, ctx, in, out);
break;
}
case 2: {
SliceDimTwoCsrImpl<xpu>(begin, end, ctx, in, out);
break;
}
default:
LOG(FATAL) << "CSR is only for 2-D shape";
break;
}
}
template<typename xpu>
void SliceEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
CHECK_EQ(inputs.size(), 1);
CHECK_EQ(outputs.size(), 1);
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
auto in_stype = inputs[0].storage_type();
if (in_stype == kCSRStorage) {
SliceCsrImpl<xpu>(param, ctx, inputs[0], req[0], outputs[0]);
} else {
LOG(FATAL) << "Slice not implemented for storage type" << in_stype;
}
}
template<int ndim>
inline bool GetIndexRange(const mxnet::TShape& dshape,
const mxnet::Tuple<dmlc::optional<index_t>>& param_begin,
const mxnet::Tuple<dmlc::optional<index_t>>& param_end,
const mxnet::Tuple<dmlc::optional<index_t>>& param_step,
common::StaticArray<index_t, ndim>* begin,
common::StaticArray<index_t, ndim>* end,
common::StaticArray<index_t, ndim>* step) {
// Function returns false if output is zero-sized, true otherwise.
bool zero_size_shape = false;
CHECK_NE(dshape.ndim(), 0U);
CHECK_LE(param_begin.ndim(), dshape.ndim())
<< "Slicing axis exceeds data dimensions";
CHECK_LE(param_end.ndim(), dshape.ndim())
<< "Slicing axis exceeds data dimensions";
CHECK_EQ(param_begin.ndim(), param_end.ndim())
<< "begin and end must have the same length";
CHECK_EQ(ndim, dshape.ndim())
<< "Static array size=" << ndim
<< " is not equal to data shape ndim=" << dshape.ndim();
if (param_step.ndim() > 0) {
CHECK_EQ(param_step.ndim(), param_begin.ndim())
<< "step and begin must have the same length";
}
for (int i = 0; i < param_begin.ndim(); ++i) {
index_t s = param_step.ndim() > 0 && param_step[i].has_value() ? param_step[i].value() : 1;
CHECK_NE(s, 0) << "slice op step[" << i << "] cannot be 0";
index_t b = 0, e = 0;
const index_t len = dshape[i];
if (len > 0) {
b = param_begin[i].has_value() ? param_begin[i].value() : (s < 0 ? len - 1 : 0);
e = param_end[i].has_value() ? param_end[i].value() : (s < 0 ? -1 : len);
if (b < 0) {
b += len;
}
if (e < 0 && param_end[i].has_value()) {
e += len;
}
// move the begin and end to correct position for calculating dim size
b = (b < 0 && s > 0) ? 0 : b;
b = (b > len - 1 && s < 0) ? len - 1 : b;
// if the start value lead to empty tensor under step s, use -1 for indication
b = (b < 0 || b > len - 1) ? -1 : b;
e = e > -1 ? e : -1;
e = e > len ? len : e;
} else if (len == 0) {
b = 0;
e = 0;
}
(*begin)[i] = b;
(*end)[i] = e;
(*step)[i] = s;
// checking begin==end
if (b == e) {
zero_size_shape = true;
}
}
for (int i = param_begin.ndim(); i < dshape.ndim(); ++i) {
(*begin)[i] = 0;
(*end)[i] = dshape[i];
(*step)[i] = 1;
}
return zero_size_shape;
}
inline void SetSliceOpOutputDimSize(const mxnet::TShape& dshape,
const index_t i, const index_t b,
const index_t e, const index_t s,
mxnet::TShape* oshape) {
if (!mxnet::dim_size_is_known(dshape, i)) {
(*oshape)[i] = -1;
return;
}
if (e != b && b >= 0) {
if (s > 0) {
(*oshape)[i] = e > b ? (e - b - 1) / s + 1 : 0;
} else {
(*oshape)[i] = e < b ? (b - e - 1) / (-s) + 1 : 0;
}
} else {
(*oshape)[i] = 0;
}
}
inline bool SliceOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape)) return false;
CHECK_GT(dshape.ndim(), 0) << "slice only works for ndim > 0";
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
mxnet::TShape oshape = dshape;
MXNET_NDIM_SWITCH(dshape.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step);
for (int i = 0; i < param.begin.ndim(); ++i) {
const index_t b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(dshape, i, b, e, s, &oshape);
}
})
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(dshape) && shape_is_known(oshape);
}
template<int ndim, int req, typename xpu>
struct slice_forward;
template<int ndim, int req>
struct slice_forward<ndim, req, gpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data,
const mshadow::Shape<ndim> dshape,
const mshadow::Shape<ndim> oshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = dshape[ndim-1];
const index_t out_last_dim_size = oshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
const index_t j = i % out_last_dim_size;
index_t irow = 0; // row id of flattend 2D data
index_t stride = 1;
index_t idx = i / out_last_dim_size;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % oshape[k]) * step[k] + begin[k]);
idx /= oshape[k];
stride *= dshape[k];
}
KERNEL_ASSIGN(out[i], req,
data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]);
}
};
template<int ndim, int req>
struct slice_forward<ndim, req, cpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* data,
const mshadow::Shape<ndim> dshape,
const mshadow::Shape<ndim> oshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = dshape[ndim-1];
const index_t out_last_dim_size = oshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
index_t out_offset = i * out_last_dim_size;
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D data
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % oshape[k]) * step[k] + begin[k]);
idx /= oshape[k];
stride *= dshape[k];
}
KERNEL_ASSIGN(out[out_offset++], req,
data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]);
}
}
};
template<typename xpu>
void SliceOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
if (req[0] == kNullOp) return;
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
if (out.Size() == 0) return;
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH_WITH_BOOL(out.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
size_t num_threads = out.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= out.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
out.dptr<DType>(), data.dptr<DType>(),
data.shape_.get<ndim>(), out.shape_.get<ndim>(), begin, step);
})
})
})
}
template<int ndim, int req, typename xpu>
struct slice_assign;
template<int ndim, int req>
struct slice_assign<ndim, req, cpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* val,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim-1];
const index_t out_last_dim_size = vshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
index_t offset = i * out_last_dim_size;
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim],
req, val[offset++]);
}
}
};
template<int ndim, int req>
struct slice_assign<ndim, req, gpu> {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* val,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim-1];
const index_t out_last_dim_size = vshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
const index_t j = i % out_last_dim_size;
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i / out_last_dim_size;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim],
req, val[i]);
}
};
template<typename xpu>
void SliceOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
if (req[0] == kNullOp) return;
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& ograd = inputs[0];
const TBlob& igrad = outputs[0];
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
if (req[0] == kWriteTo) {
Fill(s, igrad, req[0], 0);
} else if (req[0] == kWriteInplace) {
LOG(FATAL) << "_slice_backward does not support kWriteInplace";
}
if (ograd.Size() == 0) return;
MXNET_NDIM_SWITCH(ograd.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(igrad.shape_, param.begin, param.end, param.step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = ograd.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= ograd.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
igrad.dptr<DType>(), ograd.dptr<DType>(),
igrad.shape_.get<ndim>(), ograd.shape_.get<ndim>(), begin, step);
})
})
})
}
inline bool SliceAssignOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 2U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape)) return false;
mxnet::TShape vshape = dshape; // vshape is the value shape on the right hand side
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(dshape.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step);
for (int i = 0; i < param.begin.ndim(); ++i) {
const int b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(dshape, i, b, e, s, &vshape);
}
})
SHAPE_ASSIGN_CHECK(*in_attrs, 1, vshape);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template<typename xpu>
void SliceAssignOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 2U); // data[index] = val, data and val are two inputs
CHECK_EQ(outputs.size(), 1U);
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& val = inputs[1];
const TBlob& out = outputs[0];
if (req[0] == kWriteTo) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s);
Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s);
Copy(out, in, s);
});
} else if (req[0] != kWriteInplace) {
LOG(FATAL) << "_slice_assign only supports kWriteTo and kWriteInplace";
}
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
bool zero_size_shape = GetIndexRange(data.shape_, param.begin, param.end, param.step,
&begin, &end, &step);
if (zero_size_shape) {
return; // slice_assign of zero-sized subspace needs no operation.
}
MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = val.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= val.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
out.dptr<DType>(), val.dptr<DType>(),
out.shape_.get<ndim>(), val.shape_.get<ndim>(), begin, step);
})
})
})
}
struct SliceAssignScalarParam : public dmlc::Parameter<SliceAssignScalarParam> {
double scalar;
mxnet::Tuple<dmlc::optional<index_t>> begin, end;
mxnet::Tuple<dmlc::optional<index_t>> step;
DMLC_DECLARE_PARAMETER(SliceAssignScalarParam) {
DMLC_DECLARE_FIELD(scalar)
.set_default(0)
.describe("The scalar value for assignment.");
DMLC_DECLARE_FIELD(begin)
.describe("starting indices for the slice operation, supports negative indices.");
DMLC_DECLARE_FIELD(end)
.describe("ending indices for the slice operation, supports negative indices.");
DMLC_DECLARE_FIELD(step)
.set_default(mxnet::Tuple<dmlc::optional<index_t>>())
.describe("step for the slice operation, supports negative values.");
}
};
inline bool SliceAssignScalarOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!shape_is_known(dshape)) return false;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template<int ndim>
struct slice_assign_scalar {
// i is the i-th row after flattening out into 2D tensor
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType val,
const OpReqType req,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim-1];
const index_t out_last_dim_size = vshape[ndim-1];
const index_t step_last_dim = step[ndim-1];
const index_t begin_last_dim = begin[ndim-1];
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim], req, val);
}
}
};
template<typename xpu>
void SliceAssignScalarOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
using namespace mshadow;
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
if (req[0] == kWriteTo) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s);
Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s);
Copy(out, in, s);
});
} else if (req[0] != kWriteInplace) {
LOG(FATAL) << "_crop_assign_scalar only supports kWriteTo and kWriteInplace";
}
mxnet::TShape vshape = data.shape_;
const SliceAssignScalarParam& param = nnvm::get<SliceAssignScalarParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
bool zero_size_shape = GetIndexRange(data.shape_, param.begin, param.end, param.step,
&begin, &end, &step);
if (zero_size_shape) {
return; // slice_assign of zero-sized subspaced needs no operation.
}
for (index_t i = 0; i < param.begin.ndim(); ++i) {
const int b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(data.shape_, i, b, e, s, &vshape);
}
MSHADOW_TYPE_SWITCH_WITH_BOOL(out.type_flag_, DType, {
mxnet_op::Kernel<slice_assign_scalar<ndim>, xpu>::Launch(s, vshape.FlatTo2D()[0],
out.dptr<DType>(), static_cast<DType>(param.scalar), req[0],
out.shape_.get<ndim>(), vshape.get<ndim>(), begin, step);
})
})
}
struct SliceAxisParam : public dmlc::Parameter<SliceAxisParam> {
int axis;
index_t begin;
dmlc::optional<index_t> end;
DMLC_DECLARE_PARAMETER(SliceAxisParam) {
DMLC_DECLARE_FIELD(axis)
.describe("Axis along which to be sliced, supports negative indexes.");
DMLC_DECLARE_FIELD(begin)
.describe("The beginning index along the axis to be sliced, "
" supports negative indexes.");
DMLC_DECLARE_FIELD(end)
.describe("The ending index along the axis to be sliced, "
" supports negative indexes.");
}
};
inline void GetSliceAxisParams(const SliceAxisParam& param, const mxnet::TShape& ishape,
int* axis, index_t* begin, index_t* end) {
*axis = param.axis;
if (*axis < 0) {
*axis += ishape.ndim();
}
CHECK(*axis < ishape.ndim() && *axis >= 0) <<
"Transformed axis must be smaller than the source ndim and larger than zero! Recieved axis=" <<
param.axis << ", src_ndim=" << ishape.ndim() << ", transformed axis=" << *axis;
index_t axis_size = static_cast<index_t>(ishape[*axis]);
*begin = param.begin;
*end = -1;
if (*begin < 0) {
*begin += axis_size;
}
if (axis_size > 0) {
if (!static_cast<bool>(param.end)) {
*end = axis_size;
} else {
*end = param.end.value();
if (*end < 0) {
*end += axis_size;
}
}
CHECK(*end <= axis_size) << "Invalid end for end=" << *end << " as axis_size is " << axis_size;
CHECK((*begin < *end))
<< "Invalid begin, end, get begin=" << param.begin << ", end=" << param.end;
} else {
*begin = 0;
*end = 0;
}
CHECK(*end >= 0)
<< "Invalid begin, end, get begin=" << param.begin << ", end=" << param.end;
CHECK(*begin >= 0) << "Invalid begin for begin=" << param.begin;
}
inline bool SliceAxisShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& ishape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(ishape)) return false;
int axis;
index_t begin, end;
GetSliceAxisParams(param, ishape, &axis, &begin, &end);
if (!mxnet::dim_size_is_known(ishape, axis)) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape);
return false;
}
mxnet::TShape shape(ishape.ndim(), -1);
for (int i = 0; i < ishape.ndim(); ++i) {
if (i == axis) {
shape[i] = static_cast<index_t>(end - begin);
} else {
shape[i] = ishape[i];
}
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
return shape_is_known(shape);
}
template<typename xpu>
void SliceAxis(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow::expr;
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
int axis;
index_t begin, end;
GetSliceAxisParams(param, inputs[0].shape_, &axis, &begin, &end);
int ndim = outputs[0].ndim();
if (axis + 1 == ndim) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 2, DType> in =
inputs[0].FlatTo2D<xpu, DType>(s);
mshadow::Tensor<xpu, 2, DType> out =
outputs[0].FlatTo2D<xpu, DType>(s);
ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end));
});
} else {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 3, DType> in =
inputs[0].FlatTo3D<xpu, DType>(axis, s);
mshadow::Tensor<xpu, 3, DType> out =
outputs[0].FlatTo3D<xpu, DType>(axis, s);
ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end));
});
}
}
// Backward pass of broadcast over the given axis
template<typename xpu>
void SliceAxisGrad_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
if (outputs[0].shape_.Size() == 0) {
return;
}
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
using namespace mshadow::op;
using namespace mshadow::expr;
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
int axis;
index_t begin, end;
GetSliceAxisParams(param, outputs[0].shape_, &axis, &begin, &end);
int ndim = outputs[0].shape_.ndim();
if (axis + 1 == ndim) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 2, DType> ograd =
inputs[0].FlatTo2D<xpu, DType>(s);
mshadow::Tensor<xpu, 2, DType> igrad =
outputs[0].FlatTo2D<xpu, DType>(s);
if (req[0] == kAddTo) {
slice<1>(igrad, begin, end) += F<identity>(ograd);
} else if (req[0] == kWriteTo) {
igrad = 0.0f;
slice<1>(igrad, begin, end) = F<identity>(ograd);
} else {
CHECK_EQ(req[0], kNullOp);
}
});
} else {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 3, DType> ograd =
inputs[0].FlatTo3D<xpu, DType>(axis, s);
mshadow::Tensor<xpu, 3, DType> igrad =
outputs[0].FlatTo3D<xpu, DType>(axis, s);
if (req[0] == kAddTo) {
slice<1>(igrad, begin, end) += F<identity>(ograd);
} else if (req[0] == kWriteTo) {
igrad = 0.0f;
slice<1>(igrad, begin, end) = F<identity>(ograd);
} else {
CHECK_EQ(req[0], kNullOp);
}
});
}
}
struct SliceLikeParam : public dmlc::Parameter<SliceLikeParam> {
mxnet::Tuple<int> axes;
DMLC_DECLARE_PARAMETER(SliceLikeParam) {
DMLC_DECLARE_FIELD(axes).set_default(mxnet::Tuple<int>())
.describe("List of axes on which input data will be sliced according to the "
"corresponding size of the second input. By default will slice on "
"all axes. Negative axes are supported.");
}
};
inline bool SliceLikeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 2U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& ishape = (*in_attrs)[0];
mxnet::TShape& from_shape = (*in_attrs)[1];
if (param.axes.ndim() == 0) {
CHECK_EQ(ishape.ndim(), from_shape.ndim())
<< "By default slice_axis performs slice on all axes, but ndim mismatch "
"for inputs: " << ishape.ndim() << " vs. " << from_shape.ndim();
for (int i = 0; i < ishape.ndim(); ++i) {
CHECK_GE(ishape[i], from_shape[i])
<< "Slice axis " << i << " with size " << from_shape[i]
<< "exceeds limit of input with size " << ishape[i];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, from_shape);
} else {
mxnet::TShape shape(ishape);
for (int i = 0; i < param.axes.ndim(); ++i) {
int axis = param.axes[i];
if (axis < 0) {
axis += ishape.ndim();
}
CHECK_GE(axis, 0)
<< "Slice axis: " << param.axes[i] << " too small";
CHECK_GT(ishape.ndim(), axis)
<< "Slice axis: " << axis << " exceeds first input: " << ishape.ndim();
CHECK_GT(from_shape.ndim(), axis)
<< "Slice axis: " << axis << " exceeds second input: " << from_shape.ndim();
shape[axis] = from_shape[axis];
CHECK_GE(ishape[axis], from_shape[axis])
<< "Slice axis " << axis << " with size " << from_shape[axis]
<< "exceeds limit of input with size " << ishape[axis];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
}
return true;
}
inline void SliceLikeInferRanges(const mxnet::TShape& dshape,
const mxnet::TShape& fshape,
const mxnet::Tuple<int>& axes,
mxnet::Tuple<dmlc::optional<index_t>>* param_begin,
mxnet::Tuple<dmlc::optional<index_t>>* param_end,
mxnet::Tuple<dmlc::optional<index_t>>* param_step) {
std::vector<dmlc::optional<index_t>> pb(dshape.ndim());
std::vector<dmlc::optional<index_t>> pe(dshape.ndim());
std::vector<dmlc::optional<index_t>> ps(dshape.ndim());
if (axes.ndim() == 0) {
for (int i = 0; i < dshape.ndim(); ++i) {
pb[i] = 0;
pe[i] = fshape[i];
ps[i] = 1;
}
} else {
for (int i = 0; i < axes.ndim(); ++i) {
int axis = axes[i];
if (axis < 0) {
axis += dshape.ndim();
}
CHECK_GE(axis, 0)
<< "Slice axis: " << axes[i] << " too small";
CHECK_LT(axis, dshape.ndim())
<< "Slice axis: " << axis << " exceeds first input: " << dshape.ndim();
CHECK_LT(axis, fshape.ndim())
<< "Slice axis: " << axis << " exceeds first input: " << fshape.ndim();
pb[axis] = 0;
pe[axis] = fshape[axis];
ps[axis] = 1;
}
}
*param_begin = mxnet::Tuple<dmlc::optional<index_t>>(pb.begin(), pb.end());
*param_end = mxnet::Tuple<dmlc::optional<index_t>>(pe.begin(), pe.end());
*param_step = mxnet::Tuple<dmlc::optional<index_t>>(ps.begin(), ps.end());
}
template<typename xpu>
void SliceLikeForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
using namespace mshadow::expr;
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
const mxnet::TShape& ishape = data.shape_;
const mxnet::TShape& from_shape = inputs[1].shape_;
mxnet::Tuple<dmlc::optional<index_t>> param_begin;
mxnet::Tuple<dmlc::optional<index_t>> param_end;
mxnet::Tuple<dmlc::optional<index_t>> param_step;
SliceLikeInferRanges(ishape, from_shape, param.axes, ¶m_begin, ¶m_end, ¶m_step);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(data.shape_, param_begin, param_end, param_step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = out.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= out.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(s,
num_threads, out.dptr<DType>(), data.dptr<DType>(),
data.shape_.get<ndim>(), out.shape_.get<ndim>(), begin, step);
})
})
})
}
template<typename xpu>
void SliceLikeBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 2U);
CHECK_EQ(req.size(), 2U);
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
if (req[1] != kNullOp && req[1] != kAddTo) {
Fill(s, outputs[1], req[1], 0); // Second input not relavant to gradients.
}
if (req[0] == kNullOp) return;
const TBlob& ograd = inputs[0];
const TBlob& igrad = outputs[0];
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
if (req[0] == kWriteTo) {
Fill(s, igrad, req[0], 0);
} else if (req[0] == kWriteInplace) {
LOG(FATAL) << "_slice_like_backward does not support kWriteInplace";
}
const mxnet::TShape& ishape = ograd.shape_;
const mxnet::TShape& from_shape = outputs[1].shape_;
mxnet::Tuple<dmlc::optional<index_t>> param_begin;
mxnet::Tuple<dmlc::optional<index_t>> param_end;
mxnet::Tuple<dmlc::optional<index_t>> param_step;
SliceLikeInferRanges(ishape, from_shape, param.axes, ¶m_begin, ¶m_end, ¶m_step);
MXNET_NDIM_SWITCH(ograd.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(ograd.shape_, param_begin, param_end, param_step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = ograd.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= ograd.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(s, num_threads,
igrad.dptr<DType>(), ograd.dptr<DType>(),
igrad.shape_.get<ndim>(), ograd.shape_.get<ndim>(), begin, step);
})
})
})
}
struct ClipParam : public dmlc::Parameter<ClipParam> {
real_t a_min, a_max;
DMLC_DECLARE_PARAMETER(ClipParam) {
DMLC_DECLARE_FIELD(a_min)
.describe("Minimum value");
DMLC_DECLARE_FIELD(a_max)
.describe("Maximum value");
}
};
struct clip {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* datas,
const float a_min, const float a_max) {
DType data = datas[i];
if (data > a_max) {
out[i] = a_max;
} else if (data < a_min) {
out[i] = a_min;
} else {
out[i] = data;
}
}
};
struct clip_grad {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out, const DType* grad, const DType* datas,
const float a_min, const float a_max) {
DType data = datas[i];
if (data > a_max) {
out[i] = 0;
} else if (data < a_min) {
out[i] = 0;
} else {
out[i] = grad[i];
}
}
};
template<typename xpu>
void Clip(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mxnet_op::Kernel<mxnet::op::clip, xpu>::Launch(s, outputs[0].Size(), outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(),
param.a_min, param.a_max);
});
}
template<typename xpu>
void ClipEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
CHECK_EQ(inputs[0].dtype(), outputs[0].dtype());
CHECK_EQ(inputs[0].storage_type(), outputs[0].storage_type());
CHECK_NE(inputs[0].storage_type(), kDefaultStorage);
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Clip<xpu>);
}
template<typename xpu>
void ClipGrad_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<clip_grad, xpu>::Launch(s, outputs[0].Size(), outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>(), param.a_min, param.a_max);
});
}
/*!
* \brief The parameters of the repeat operator include
* the number of repeating time and axis (optional).
* The parameters will be later used to deduce the
* output ndarray shape in bool RepeatShape() function.
*/
struct RepeatParam : public dmlc::Parameter<RepeatParam> {
int repeats = 1;
dmlc::optional<int> axis;
DMLC_DECLARE_PARAMETER(RepeatParam) {
DMLC_DECLARE_FIELD(repeats)
.describe("The number of repetitions for each element.");
DMLC_DECLARE_FIELD(axis)
.set_default(dmlc::optional<int>())
.describe("The axis along which to repeat values."
" The negative numbers are interpreted counting from the backward."
" By default, use the flattened input array,"
" and return a flat output array.");
}
};
/*!
* \brief Helper function for getting user input params for the operator repeat.
* Sanity check the user input values.
*/
inline void GetRepeatParams(const RepeatParam& param, const mxnet::TShape& ishape,
int* repeats, dmlc::optional<int>* axisOpt) {
*repeats = param.repeats;
CHECK_GE(*repeats, 0) << "repeats cannot be a negative number";
*axisOpt = param.axis;
if (static_cast<bool>(*axisOpt)) {
int ndims = ishape.ndim();
int axis = axisOpt->value();
if (axis < 0) {
axis += ndims;
}
CHECK(axis >= 0 && axis < ndims) << "axis = " << axisOpt->value() << " out of bounds";
}
}
inline bool RepeatOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& ishape = (*in_attrs)[0];
int repeats = 0;
dmlc::optional<int> axisOpt;
GetRepeatParams(param, ishape, &repeats, &axisOpt);
// If 0 repeats, return an empty 1-dim, 0-size array
if (0 == repeats) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(1, 0));
return true;
}
// If repeats > 0, multiply the size of the corresponding axis by repeats
if (static_cast<bool>(axisOpt)) {
int ndims = ishape.ndim();
int axis = axisOpt.value();
if (axis < 0) {
axis += ndims;
}
mxnet::TShape shape(ishape.ndim(), -1);
for (int i = 0; i < ishape.ndim(); ++i) {
if (i == axis) {
shape[i] = repeats * ishape[i];
} else {
shape[i] = ishape[i];
}
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
} else { // If axis is not input by user, return a flat 1D array of size = in.size*repeats
mxnet::TShape shape(1, ishape.Size() * repeats);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
}
return shape_is_known(out_attrs->at(0));
}
inline bool RepeatOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if ((*in_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]);
} else if ((*out_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]);
}
return true;
}
/*!
* \brief Reshape the input and output tensors for
* using broadcast_to to achieve the funcitonality
* of operator repeat.
* \return a pair of mxnet::TShape's, first is the reshaped
* input shape, second is the reshaped output shape.
*/
inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForRepeatOp(
const mxnet::TShape& ishape,
const dmlc::optional<int>& axisOpt,
const int repeats) {
if (static_cast<bool>(axisOpt)) {
int axis = axisOpt.value();
int ndim = ishape.ndim();
if (axis < 0) {
axis += ndim;
}
CHECK(axis >= 0 && axis < ishape.ndim()) << "Invalid input of axis";
// reshape the input tensor by adding a dim at the (axis+1)-th dim
mxnet::TShape rshape(ishape.ndim()+1, 1);
// the shape we want to broadcast to
mxnet::TShape bshape(rshape.ndim(), 1);
int i = 0;
while (i <= axis) {
rshape[i] = bshape[i] = ishape[i];
++i;
}
rshape[i] = 1;
bshape[i] = repeats;
while (i < ishape.ndim()) {
rshape[i+1] = ishape[i];
bshape[i+1] = ishape[i];
++i;
}
return std::make_pair(rshape, bshape);
} else {
// axis is not input by user
// reshape the tensor into shape (ishape.Size(), 1)
// then add one dim at axis = 1 and broadcast to
// shape (ishape.Size(), repeats)
mxnet::TShape rshape(2, 1);
rshape[0] = ishape.Size();
rshape[1] = 1;
mxnet::TShape bshape(2, 1);
bshape[0] = rshape[0];
bshape[1] = repeats;
return std::make_pair(rshape, bshape);
}
}
template<typename xpu>
void RepeatOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const TBlob& iTBlob = inputs[0];
const mxnet::TShape& ishape = iTBlob.shape_;
if (!shape_is_known(ishape)) return;
int repeats = 0;
dmlc::optional<int> axisOpt;
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
GetRepeatParams(param, ishape, &repeats, &axisOpt);
if (0 == repeats) return;
std::pair<mxnet::TShape, mxnet::TShape> rshapes = \
ReshapeInputOutputForRepeatOp(ishape, axisOpt, repeats);
// reshaped input tblob
TBlob iblob(inputs[0].dptr_, rshapes.first, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
// reshaped output tblob
TBlob oblob(outputs[0].dptr_, rshapes.second, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs);
}
/*!
* \brief Compute the gradient of the loss function
* with respect to the input of the operator.
* Backpropagation is employed to implement the
* chain rule.
* \param inputs the gradient of the loss function
* with respect to the outputs of the operator
* \param outputs the gradient of the loss function
* with respect to the inputs of the operator
*/
template<typename xpu>
void RepeatOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
const mxnet::TShape& oshape = outputs[0].shape_;
if (!shape_is_known(oshape)) return;
int repeats = 0;
dmlc::optional<int> axisOpt;
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
GetRepeatParams(param, oshape, &repeats, &axisOpt);
if (0 == repeats) return;
std::pair<mxnet::TShape, mxnet::TShape> rshapes =
ReshapeInputOutputForRepeatOp(oshape, axisOpt, repeats);
// reshaped output grad tblob
TBlob oblob(outputs[0].dptr_, rshapes.first, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
// reshaped input grad tblob
TBlob iblob(inputs[0].dptr_, rshapes.second, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>(
ctx, newInputs, req, newOutputs, rshapes.first);
}
struct TileParam : public dmlc::Parameter<TileParam> {
mxnet::Tuple<int> reps;
DMLC_DECLARE_PARAMETER(TileParam) {
DMLC_DECLARE_FIELD(reps)
.describe("The number of times for repeating the tensor a. Each dim size of reps"
" must be a positive integer."
" If reps has length d, the result will have dimension of max(d, a.ndim);"
" If a.ndim < d, a is promoted to be d-dimensional by prepending new axes."
" If a.ndim > d, reps is promoted to a.ndim by pre-pending 1's to it.");
}
};
inline bool TileOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const TileParam& param = nnvm::get<TileParam>(attrs.parsed);
const mxnet::TShape& ishape = (*in_attrs)[0];
if (!shape_is_known(ishape)) {
return false;
}
const mxnet::Tuple<int>& reps = param.reps;
// If reps is empty, return a identical input array
if (reps.ndim() == 0) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape);
return true;
}
mxnet::TShape oshape(std::max(ishape.ndim(), reps.ndim()), -1);
int i1 = ishape.ndim() - 1;
int i2 = reps.ndim() - 1;
for (int i = oshape.ndim() - 1; i >= 0; --i) {
if (i1 >= 0 && i2 >= 0) {
oshape[i] = ishape[i1--] * reps[i2--];
} else if (i1 >= 0) {
oshape[i] = ishape[i1--];
} else if (i2 >= 0) {
oshape[i] = reps[i2--];
}
}
// If reps contains 0s, oshape is a zero-size shape.
// Need to distinguish between np_shape mode and legacy mode.
if (!Imperative::Get()->is_np_shape()) {
common::ConvertToNumpyShape(&oshape);
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(oshape);
}
inline bool TileOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if ((*in_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]);
} else if ((*out_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]);
}
return true;
}
/*!
* \brief Reshape the input and output tensors for
* using broadcast_to to achieve the functionality
* of operator tile.
* \return a pair of mxnet::TShape's, first is the reshaped
* input shape, second is the reshaped output shape.
*/
inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForTileOp(
const mxnet::TShape& ishape,
const mxnet::Tuple<int>& reps) {
if (reps.ndim() == 0) {
return std::make_pair(ishape, ishape);
}
// The shape we want to broadcast to
mxnet::TShape bshape(std::max(ishape.ndim(), reps.ndim()) * 2, 1);
// The shape of the input tensor after adding new axes before each dim
mxnet::TShape rshape(bshape.ndim(), 1);
int i1 = ishape.ndim() - 1;
int i2 = reps.ndim() - 1;
for (int i = bshape.ndim() - 1; i >= 0; --i) {
if (0 == (i & 1)) {
bshape[i] = (i2 >= 0? reps[i2--] : 1);
rshape[i] = 1;
} else {
rshape[i] = bshape[i] = (i1 >= 0? ishape[i1--] : 1);
}
}
return std::make_pair(rshape, bshape);
}
/*!
* \brief Implementation of tiling the input tensor a based
* on the user-input shape, reps.
* If a.ndim < reps.ndim, new axes are pre-pended to a. For example,
* the input tensor has shape (3,), and the reps is (2, 4); the input
* tensor would be reshaped to (1, 3).
* If a.ndim > reps.ndim, pre-pending 1's to reps. For example,
* the input tensor has shape (2, 3, 4, 5), and reps is (2, 2);
* the reps would be changed to (1, 1, 2, 2).
* Suppose we have a.ndim = reps.ndim now. To achieve tiling,
* we utilize the operator broadcast_to. For example, for a tensor
* of shape (2, 3, 4, 5) and reps (2, 8, 9, 3), we first reshape
* the tensor to the shape (1, 2, 1, 3, 1, 4, 1, 5) by adding
* one axis before each dimension. Then, we want to broadcast
* the new tensor to shape (2, 2, 8, 3, 9, 4, 3, 5). The final
* output tensor would have shape (2*2, 8*3, 9*4, 3*5).
*/
template<typename xpu>
void TileOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
if (inputs[0].Size() == 0) return;
const mxnet::TShape& ishape = inputs[0].shape_;
const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps;
// If any one of the number in reps is zero, return immediately
for (int i = 0; i < reps.ndim(); ++i) {
if (0 == reps[i]) return;
}
std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(ishape, reps);
// reshaped input tblob
TBlob iblob(inputs[0].dptr_, rshapes.first, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
// reshaped output tblob
TBlob oblob(outputs[0].dptr_, rshapes.second, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs);
}
/*!
* \brief Compute the gradient of the loss function
* with respect to the input of the operator.
* Backpropagation is employed to implement the
* chain rule.
* \param inputs the gradient of the loss function
* with respect to the outputs of the operator
* \param outputs the gradient of the loss function
* with respect to the inputs of the operator
*/
template<typename xpu>
void TileOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
if (inputs[0].Size() == 0) return;
const mxnet::TShape& oshape = outputs[0].shape_;
const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps;
// If any one of the number in reps is zero, return immediately
for (int i = 0; i < reps.ndim(); ++i) {
if (0 == reps[i]) return;
}
std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(oshape, reps);
// reshaped output grad tblob
TBlob oblob(outputs[0].dptr_, rshapes.first, outputs[0].dev_mask(),
outputs[0].type_flag_, outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
// reshaped input grad tblob
TBlob iblob(inputs[0].dptr_, rshapes.second, inputs[0].dev_mask(),
inputs[0].type_flag_, inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>(
ctx, newInputs, req, newOutputs, rshapes.first);
}
struct ReverseParam : public dmlc::Parameter<ReverseParam> {
mxnet::Tuple<int> axis;
DMLC_DECLARE_PARAMETER(ReverseParam) {
DMLC_DECLARE_FIELD(axis)
.describe("The axis which to reverse elements.");
}
};
#define REVERSE_MAX_DIM 10U
struct reverse {
MSHADOW_XINLINE static index_t ReverseIndex(index_t idx,
index_t nreversedim,
const index_t * stride_,
const index_t * trailing_) {
index_t outputIndex = idx;
for (index_t i = 0; i < nreversedim; ++i) {
const index_t low = outputIndex % trailing_[i];
index_t high = outputIndex / trailing_[i];
const index_t x = high%stride_[i];
high /= stride_[i];
outputIndex = (high*stride_[i] + stride_[i] - 1 - x)*trailing_[i] + low;
}
return outputIndex;
}
#ifdef __CUDACC__
template<typename DType>
__device__ static void Map(index_t index, index_t nreversedim, const DType *src, DType *dst,
const index_t * stride_,
const index_t * trailing_) {
__shared__ index_t stride_share[REVERSE_MAX_DIM];
__shared__ index_t trailing_share[REVERSE_MAX_DIM];
if (threadIdx.x < REVERSE_MAX_DIM) {
stride_share[threadIdx.x] = stride_[threadIdx.x];
trailing_share[threadIdx.x] = trailing_[threadIdx.x];
}
__syncthreads();
index_t new_idx = ReverseIndex(index, nreversedim, stride_share, trailing_share);
dst[new_idx] = src[index];
}
#else
template<typename DType>
MSHADOW_XINLINE static void Map(index_t index, index_t nreversedim, const DType *src, DType *dst,
const index_t * stride_,
const index_t * trailing_) {
index_t new_idx = ReverseIndex(index, nreversedim, stride_, trailing_);
dst[new_idx] = src[index];
}
#endif
};
template<typename xpu>
void ReverseOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
const ReverseParam& param = nnvm::get<ReverseParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
CHECK_LT(param.axis.ndim(), REVERSE_MAX_DIM);
Stream<xpu> *s = ctx.get_stream<xpu>();
const mxnet::TShape& ishape = inputs[0].shape_;
std::vector<index_t> stride_(param.axis.ndim());
std::vector<index_t> trailing_(param.axis.ndim());
index_t reverse_index = 0;
for (int axis : param.axis) {
CHECK_LT(axis, ishape.ndim());
stride_[reverse_index] = ishape[axis];
trailing_[reverse_index] = 1;
for (int i2 = axis + 1; i2 < ishape.ndim(); ++i2) {
trailing_[reverse_index] *= ishape[i2];
}
reverse_index++;
}
#ifdef __CUDACC__
mshadow::Tensor<xpu, 1, uint8_t> workspace =
ctx.requested[0].get_space_typed<xpu, 1, uint8_t>(
mshadow::Shape1(reverse_index * sizeof(index_t) * 2), s);
auto stride_workspace = workspace.dptr_;
auto trailing_workspace = workspace.dptr_ + reverse_index * sizeof(index_t);
cudaMemcpyAsync(stride_workspace, thrust::raw_pointer_cast(stride_.data()),
stride_.size() * sizeof(index_t),
cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s));
cudaMemcpyAsync(trailing_workspace, thrust::raw_pointer_cast(trailing_.data()),
trailing_.size() * sizeof(index_t),
cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s));
#endif
#ifdef __CUDACC__
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<reverse, xpu>::Launch(s, inputs[0].Size(), reverse_index,
inputs[0].dptr<DType>(), outputs[0].dptr<DType>(),
reinterpret_cast<index_t*>(stride_workspace), reinterpret_cast<index_t*>(trailing_workspace));
});
#else
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<reverse, xpu>::Launch(s, inputs[0].Size(), reverse_index,
inputs[0].dptr<DType>(), outputs[0].dptr<DType>(),
stride_.data(), trailing_.data());
});
#endif
}
struct StackParam : public dmlc::Parameter<StackParam> {
int axis;
int num_args;
DMLC_DECLARE_PARAMETER(StackParam) {
DMLC_DECLARE_FIELD(axis)
.set_default(0)
.describe("The axis in the result array along which the input arrays are stacked.");
DMLC_DECLARE_FIELD(num_args).set_lower_bound(1)
.describe("Number of inputs to be stacked.");
}
};
inline bool StackOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
mxnet::TShape dshape;
for (const mxnet::TShape& i : (*in_attrs)) {
shape_assign(&dshape, i);
}
if (!shape_is_known(dshape)) return false;
mxnet::TShape oshape(dshape.ndim() + 1, -1);
int axis = CheckAxis(param.axis, oshape.ndim());
for (int i = 0; i < axis; ++i) {
oshape[i] = dshape[i];
}
oshape[axis] = param.num_args;
for (index_t i = axis + 1; i < oshape.ndim(); ++i) {
oshape[i] = dshape[i-1];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(oshape);
}
template<typename xpu>
void StackOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
int axis = CheckAxis(param.axis, outputs[0].ndim());
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH_WITH_BOOL(outputs[0].type_flag_, DType, {
std::vector<Tensor<xpu, 3, DType> > data(inputs.size());
Tensor<xpu, 3, DType> out;
size_t leading = 1, trailing = 1;
for (int i = 0; i < axis; ++i) {
leading *= outputs[0].shape_[i];
}
for (int i = axis + 1; i < outputs[0].ndim(); ++i) {
trailing *= outputs[0].shape_[i];
}
size_t mid = outputs[0].shape_[axis];
Shape<3> oshape = Shape3(leading, mid, trailing);
out = outputs[0].get_with_shape<xpu, 3, DType>(oshape, s);
for (size_t i = 0; i < inputs.size(); ++i) {
Shape<3> dshape = Shape3(leading, 1, trailing);
data[i] = inputs[i].get_with_shape<xpu, 3, DType>(dshape, s);
}
Concatenate(data, &out, 1, req[0]);
})
}
template<typename xpu>
void StackOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, {
std::vector<Tensor<xpu, 3, DType> > grad_in(outputs.size());
Tensor<xpu, 3, DType> grad;
size_t leading = 1, trailing = 1;
for (int i = 0; i < axis; ++i) {
leading *= inputs[0].shape_[i];
}
for (int i = axis + 1; i < inputs[0].ndim(); ++i) {
trailing *= inputs[0].shape_[i];
}
size_t mid = inputs[0].shape_[axis];
Shape<3> oshape = Shape3(leading, mid, trailing);
grad = inputs[0].get_with_shape<xpu, 3, DType>(oshape, s);
for (size_t i = 0; i < outputs.size(); ++i) {
Shape<3> dshape = Shape3(leading, 1, trailing);
grad_in[i] = outputs[i].get_with_shape<xpu, 3, DType>(dshape, s);
}
Split(grad, &grad_in, 1, req);
})
}
struct SqueezeParam : public dmlc::Parameter<SqueezeParam> {
dmlc::optional<mxnet::Tuple<int>> axis;
DMLC_DECLARE_PARAMETER(SqueezeParam) {
DMLC_DECLARE_FIELD(axis)
.set_default(dmlc::optional<mxnet::Tuple<int>>())
.describe("Selects a subset of the single-dimensional entries in the shape."
" If an axis is selected with shape entry greater than one, an error is raised.");
}
};
// Given a shape that may have dim size equal to 0,
// move all the zeros to the last of the shape array
// and keep the relative order of the non-zero values.
// Returns the new shape size after moving all zeros to the end.
inline size_t SqueezeShapeHelper(mxnet::TShape* shape) {
CHECK(shape != nullptr);
size_t count = 0;
for (int i = 0; i < shape->ndim(); ++i) {
if ((*shape)[i] == -1) {
++count;
} else {
std::swap((*shape)[i], (*shape)[i-count]);
}
}
return shape->ndim() - count;
}
inline bool SqueezeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const SqueezeParam& param = nnvm::get<SqueezeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = in_attrs->at(0);
const int dndim = dshape.ndim();
if (!shape_is_known(dshape)) return false;
mxnet::TShape oshape = dshape;
if (param.axis.has_value()) {
// preprocess axis
mxnet::Tuple<int> axes = param.axis.value();
for (int i = 0; i < axes.ndim(); ++i) {
if (axes[i] < 0) {
axes[i] += dndim;
CHECK_GE(axes[i], 0)
<< "axis " << axes[i] - dndim << " is out of bounds for array of dimension " << dndim;
}
CHECK_LT(axes[i], dndim)
<< "axis " << axes[i] << " is out of bounds for array of dimension " << dndim;
CHECK_EQ(dshape[axes[i]], 1)
<< "cannot select an axis to squeeze out which has size="
<< dshape[axes[i]] << " not equal to one";
CHECK_NE(oshape[axes[i]], -1) << "duplicate value in axis";
oshape[axes[i]] = -1;
}
} else {
for (int i = 0; i < oshape.ndim(); ++i) {
if (oshape[i] == 1) oshape[i] = -1;
}
}
size_t oshape_size = SqueezeShapeHelper(&oshape);
if (oshape_size == 0) { // corner case when dshape is (1, 1, 1, 1)
oshape[0] = 1;
oshape_size = 1;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(oshape.data(), oshape.data()+oshape_size));
return true;
}
struct DepthToSpaceParam : public dmlc::Parameter<DepthToSpaceParam> {
int block_size;
DMLC_DECLARE_PARAMETER(DepthToSpaceParam) {
DMLC_DECLARE_FIELD(block_size)
.describe("Blocks of [block_size. block_size] are moved");
}
};
inline bool DepthToSpaceOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Depth To Space requires exactly 4D tensor";
mxnet::TShape expected_out(4, -1);
mxnet::TShape& in_shape = in_attrs->at(0);
int block = param.block_size;
CHECK_NE(block, 0) << "block_size must be a positive integer value";
CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0";
CHECK_EQ(in_shape[1] % (block * block), 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:1(depth dimension) should be a multiple of 'block^2'";
CHECK_NE(in_shape[0], 0)
<< "Operation requires a 4D tensor. Size of dimension:0 cannot be 0";
CHECK_NE(in_shape[2], 0)
<< "Operation requires a 4D tensor. Size of dimension:2 cannot be 0";
CHECK_NE(in_shape[3], 0)
<< "Operation requires a 4D tensor. Size of dimension:3 cannot be 0";
expected_out[0] = in_shape[0];
expected_out[1] = in_shape[1] / (block * block);
int i = 2;
while (i < expected_out.ndim()) {
expected_out[i] = in_shape[i] * block;
++i;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out);
return shape_is_known(expected_out);
}
inline bool DepthToSpaceOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
/*!
* \brief This function updates the value of input index from where the data element
* needs to be fetched and written out to the ith location in output tensor
* \param index_position index within offset array to get offset of given dimension
* \param dim_size size of current dimension
* \param idx output tensor index
* \param inp_index index within input tensor from where value is retrieved
* \param offset_arr array containing the linear offset of input tensor
*/
MSHADOW_XINLINE void update_index(index_t index_position, index_t dim_size, index_t *idx,
index_t *inp_index, const index_t* offset_arr) {
index_t next_idx_val = *idx / dim_size;
*inp_index += (*idx - next_idx_val * dim_size) * offset_arr[index_position];
*idx = next_idx_val;
}
/*!
* \brief This function performs the tensor transpose (0, 1, 2, 3, 4, 5) ->
* (0, 3, 4, 1, 5, 2) by computing linear index within input tensor to be mapped
* to the ith index of output tensor
* \param i tensor index
* \param out_data output tensor
* \param in_data input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size array containing the size of each dimension of input tensor
* \param offset_arr array containing the linear offset of input tensor
*/
template<int req>
struct depth_to_space_forward {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out_data, const DType* in_data,
const int block, const index_t* size, const index_t* offset_arr) {
index_t inp_index = 0, idx = i, dim_size;
dim_size = block;
update_index(2, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[3];
update_index(5, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(1, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[2];
update_index(4, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[1] / (block * block);
update_index(3, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[0];
update_index(0, dim_size, &idx, &inp_index, offset_arr);
KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]);
}
};
/*!
* \brief This function calculates the linear offset for each dimension of
* input tensor and stores them in an array, which is later used in
* performing depth_to_space operation
* \param i global thread id
* \param offset_arr array to be populated with offset values
* \param size array to be populated with size of each dimension of input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size0 size of Dim 0 of input tensor
* \param size1 size of Dim 1 of input tensor
* \param size2 size of Dim 2 of input tensor
* \param size3 size of Dim 3 of input tensor
*/
template<int req>
struct compute_offset_for_depth_to_space {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* offset_arr, DType* size, const int block,
const index_t size0, const index_t size1, const index_t size2,
const index_t size3) {
size[0] = size0;
size[1] = size1;
size[2] = size2;
size[3] = size3;
offset_arr[5] = 1;
offset_arr[4] = offset_arr[5] * size[3];
offset_arr[3] = offset_arr[4] * size[2];
offset_arr[2] = offset_arr[3] * size[1] / (block * block);
offset_arr[1] = offset_arr[2] * block;
offset_arr[0] = offset_arr[1] * block;
}
};
template<typename xpu>
void DepthToSpaceOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
using namespace mxnet_op;
int block = param.block_size;
mshadow::Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s);
char* workspace_curr_ptr = workspace.dptr_;
index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr);
index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6);
MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
Kernel<compute_offset_for_depth_to_space<req_type>, xpu>::Launch(
s, 1, offset_arr, size, block, in_data.shape_[0], in_data.shape_[1],
in_data.shape_[2], in_data.shape_[3]);
Kernel<depth_to_space_forward<req_type>, xpu>::Launch(
s, out_data.Size(), out_data.dptr<DType>(), in_data.dptr<DType>(),
block, size, offset_arr);
});
});
}
inline bool SpaceToDepthOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Space To Depth requires exactly 4D tensor";
mxnet::TShape expected_out(in_attrs->at(0).ndim(), -1);
mxnet::TShape& in_shape = in_attrs->at(0);
int block = param.block_size;
CHECK_NE(block, 0) << "block_size must be a positive integer value";
CHECK_NE(in_shape[0], 0)
<< "Operation requires a 4D tensor. Size of dimension:0 cannot be 0";
CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0";
CHECK_NE(in_shape[2], 0)
<< "Operation requires a 4D tensor. Size of dimension:2 cannot be 0";
CHECK_EQ(in_shape[2] % block, 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:2(1st Space dimension) should be a multiple of 'block' ";
CHECK_NE(in_shape[3], 0)
<< "Operation requires a 4D tensor. Size of dimension:3 cannot be 0";
CHECK_EQ(in_shape[3] % block, 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:3(2nd space dimension) should be a multiple of 'block' ";
expected_out[0] = in_shape[0];
expected_out[1] = in_shape[1] * block * block;
int i = 2;
while (i < expected_out.ndim()) {
expected_out[i] = in_shape[i] / block;
++i;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out);
return shape_is_known(expected_out);
}
inline bool SpaceToDepthOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
/*!
* \brief This function preforms the tensor transpose (0, 1, 2, 3, 4, 5) ->
* (0, 3, 5, 1, 2, 4) by computing linear index within input tensor to be mapped
* to the ith index of output tensor
* \param i tensor index
* \param out_data output tensor
* \param in_data input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size array containing the size of each dimension of input tensor
* \param offset_arr array containing the linear offset of input tensor
*/
template<int req>
struct space_to_depth_forward {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* out_data, const DType* in_data, const int block,
const index_t* size, const index_t* offset_arr) {
index_t inp_index = 0, idx = i, dim_size;
dim_size = size[3] / block;
update_index(4, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[2] / block;
update_index(2, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[1];
update_index(1, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(5, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(3, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[0];
update_index(0, dim_size, &idx, &inp_index, offset_arr);
KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]);
}
};
/*!
* \brief This function calculates the linear offset for each dimension of
* input tensor and stores them in an array, which is later used in
* performing space_to_depth operation
* \param i global thread id
* \param offset_arr array to be populated with offset values
* \param size array to be populated with size of each dimension of input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size0 size of Dim 0 of input tensor
* \param size1 size of Dim 1 of input tensor
* \param size2 size of Dim 2 of input tensor
* \param size3 size of Dim 3 of input tensor
*/
template<int req>
struct compute_offset_for_space_to_depth {
template<typename DType>
MSHADOW_XINLINE static void Map(index_t i, DType* offset_arr, DType* size, const int block,
const index_t size0, const index_t size1,
const index_t size2, const index_t size3) {
size[0] = size0;
size[1] = size1;
size[2] = size2;
size[3] = size3;
offset_arr[5] = 1;
offset_arr[4] = offset_arr[5] * block;
offset_arr[3] = offset_arr[4] * size[3] / block;
offset_arr[2] = offset_arr[3] * block;
offset_arr[1] = offset_arr[2] * size[2] / block;
offset_arr[0] = offset_arr[1] * size[1];
}
};
template<typename xpu>
void SpaceToDepthOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
using namespace mxnet_op;
int block = param.block_size;
mshadow::Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s);
char* workspace_curr_ptr = workspace.dptr_;
index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr);
index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6);
MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
Kernel<compute_offset_for_space_to_depth<req_type>, xpu>::Launch(
s, 1, offset_arr, size, block, in_data.shape_[0], in_data.shape_[1],
in_data.shape_[2], in_data.shape_[3]);
Kernel<space_to_depth_forward<req_type>, xpu>::Launch(
s, out_data.Size(), out_data.dptr<DType>(), in_data.dptr<DType>(),
block, size, offset_arr);
});
});
}
namespace split_enum {
enum SplitOpInputs {kData};
} // namespace split_enum
struct SplitParam : public dmlc::Parameter<SplitParam> {
mxnet::TShape indices;
int axis;
bool squeeze_axis;
int sections;
DMLC_DECLARE_PARAMETER(SplitParam) {
DMLC_DECLARE_FIELD(indices)
.describe("Indices of splits. The elements should denote the boundaries of at which split"
" is performed along the `axis`.");
DMLC_DECLARE_FIELD(axis).set_default(1)
.describe("Axis along which to split.");
DMLC_DECLARE_FIELD(squeeze_axis).set_default(0)
.describe("If true, Removes the axis with length 1 from the shapes of the output arrays."
" **Note** that setting `squeeze_axis` to ``true`` removes axis with length 1"
" only along the `axis` which it is split."
" Also `squeeze_axis` can be set to ``true``"
" only if ``input.shape[axis] == num_outputs``.");
DMLC_DECLARE_FIELD(sections).set_default(0)
.describe("Number of sections if equally splitted. Default to 0 which means split by indices.");
}
}; // struct SplitParam
inline mxnet::TShape GetSplitIndices(const mxnet::TShape& ishape, int axis, int sections) {
mxnet::TShape indices(sections+1, -1);
indices[0] = 0;
int64_t section_size_b = (int64_t) (ishape[axis] / sections);
int64_t section_size_a = section_size_b + 1;
int section_a = ishape[axis] % sections;
for (int i = 0; i < sections; ++i) {
if ( i < section_a ) {
indices[i+1] = section_size_a * (i + 1);
} else {
indices[i+1] = section_size_b + indices[i];
}
}
return indices;
}
inline bool SplitOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
int dtype = (*in_attrs)[0];
CHECK_NE(dtype, -1) << "First input must have specified type";
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
out_attrs->clear();
int num_outputs = (param.sections > 0) ? param.sections : param.indices.ndim();
for (int i = 0; i < num_outputs; ++i) {
out_attrs->push_back(dtype);
}
return true;
}
inline bool SplitOpShapeImpl(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs,
const int real_axis) {
using namespace mshadow;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
mxnet::TShape dshape = in_attrs->at(split_enum::kData);
mxnet::TShape ishape = in_attrs->at(split_enum::kData);
const mxnet::TShape indices =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
int num_outputs = (param.sections > 0) ? indices.ndim() - 1 : indices.ndim();
// Pre-compute squeezed output shape for future usage
mxnet::TShape squeezed_dshape = dshape;
for (int d = real_axis; d < squeezed_dshape.ndim() - 1; ++d) {
squeezed_dshape[d] = squeezed_dshape[d+1];
}
squeezed_dshape = mxnet::TShape(&squeezed_dshape[0], &squeezed_dshape[squeezed_dshape.ndim()-1]);
// Assign shape to every output
for (int i = 0; i < num_outputs; ++i) {
int start = indices[i];
int end = (i < num_outputs - 1) ? indices[i + 1] : ishape[real_axis];
if (ishape[real_axis] == 0U) {
end = start;
} else {
CHECK(start <= end)
<< "start " << start << " is not less than end " << end << "for subarray " << i;
CHECK(end <= ishape[real_axis])
<< "end " << end << " is no less than the size of the axis " << ishape[real_axis];
}
dshape[real_axis] = (end - start);
if (param.squeeze_axis) {
CHECK_EQ(end - start, 1U) << "expected axis size of 1 but got " << end - start;
SHAPE_ASSIGN_CHECK(*out_attrs, i, squeezed_dshape);
} else {
SHAPE_ASSIGN_CHECK(*out_attrs, i, dshape);
}
}
mxnet::TShape back_calculate_dshape = ishape;
back_calculate_dshape[real_axis] = 0;
for (int d = 0; d < real_axis; ++d) {
back_calculate_dshape[d] = (*out_attrs)[0][d];
}
if (param.squeeze_axis) {
back_calculate_dshape[real_axis] = num_outputs;
} else {
for (int i = 0; i < num_outputs; ++i) {
back_calculate_dshape[real_axis] += (*out_attrs)[i][real_axis];
}
}
for (int d = real_axis + 1; d < ishape.ndim(); ++d) {
if (param.squeeze_axis) {
back_calculate_dshape[d] = (*out_attrs)[0][d - 1];
} else {
back_calculate_dshape[d] = (*out_attrs)[0][d];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, split_enum::kData, back_calculate_dshape);
return true;
}
inline bool SplitOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
using namespace mshadow;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
mxnet::TShape dshape = in_attrs->at(split_enum::kData);
if (!mxnet::ndim_is_known(dshape)) return false;
if (param.axis >= 0) {
CHECK_LT(param.axis, dshape.ndim());
} else {
CHECK_LT(param.axis + dshape.ndim(), dshape.ndim());
}
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += dshape.ndim();
}
return SplitOpShapeImpl(attrs, in_attrs, out_attrs, real_axis);
}
struct SplitKernel {
/*!
* \brief Map function for forward split_v2 operator
* \param i global thread id
* \param in_data ptr to input buffer
* \param out_data ptr to ptr of outputs buffer
* \param indices ptr to indices buffer
* \param num_sections # of sections after split
* \param axis_size size of axis to be splitted on
* \param trailing_size step size within the data buffer of the axis to be splitted on
*/
template<typename DType>
static MSHADOW_XINLINE void Map(size_t i,
const DType *in_data, DType** out_data, const size_t* indices,
const size_t num_sections, const size_t axis_size,
const size_t trailing_size) {
size_t idx = i / trailing_size % axis_size;
size_t target = 0;
for (size_t section = 0;
section < num_sections && indices[section] <= idx;
target = section++) {}
DType* target_data = out_data[target];
const size_t mid_idx = idx - indices[target];
const size_t head_idx = i / (trailing_size * axis_size);
const size_t tail_idx = i % trailing_size;
const size_t section_size = indices[target + 1] - indices[target];
const size_t target_idx =
head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx;
target_data[target_idx] = in_data[i];
}
};
struct ConcatenateKernel {
/*!
* \brief Map function for backward split_v2 operator
* \param i global thread id
* \param out_grad ptr to ptr of out grads buffer
* \param in_grad ptr to input grad buffer
* \param indices ptr to indices buffer
* \param num_sections # of sections after split
* \param axis_size size of axis to be splitted on
* \param trailing_size step size within the data buffer of the axis to be splitted on
*/
template<typename DType>
static MSHADOW_XINLINE void Map(size_t i,
DType** out_grad, DType* in_grad, const size_t* indices,
const size_t num_sections, const size_t axis_size,
const size_t trailing_size) {
size_t idx = i / trailing_size % axis_size;
size_t src = 0;
for (size_t section = 0;
section < num_sections && indices[section] <= idx;
src = section++) {}
DType* src_grad = out_grad[src];
const size_t mid_idx = idx - indices[src];
const size_t head_idx = i / (trailing_size * axis_size);
const size_t tail_idx = i % trailing_size;
const size_t section_size = indices[src + 1] - indices[src];
const size_t src_idx =
head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx;
in_grad[i] = src_grad[src_idx];
}
};
template<typename xpu>
inline void SplitOpForwardImpl(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs,
const int real_axis) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
Stream<xpu> *s = ctx.get_stream<xpu>();
const TBlob& input_data = inputs[split_enum::kData];
size_t leading = 1, trailing = 1;
CHECK_LT(real_axis, input_data.ndim());
size_t mid = input_data.shape_[real_axis];
for (int i = 0; i < real_axis; ++i) {
leading *= input_data.shape_[i];
}
for (int i = real_axis + 1; i < input_data.ndim(); ++i) {
trailing *= input_data.shape_[i];
}
size_t workspace_size = 0;
const mxnet::TShape& ishape = input_data.shape_;
const mxnet::TShape split_pts =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
std::vector<size_t> indices;
for (const auto& section : split_pts) {
indices.push_back(section);
}
if (param.sections == 0) {
indices.push_back(ishape[real_axis]);
}
workspace_size += indices.size() * sizeof(size_t);
MSHADOW_TYPE_SWITCH(input_data.type_flag_, DType, {
std::vector<DType*> output_data;
for (const TBlob& data : outputs) {
output_data.push_back(data.dptr<DType>());
}
workspace_size += output_data.size() * sizeof(DType*);
Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size()));
Tensor<xpu, 1, size_t> indices_xpu_tensor(
reinterpret_cast<size_t*>(workspace.dptr_), Shape1(indices.size()));
Tensor<cpu, 1, DType*> ptrs_cpu_tensor(output_data.data(), Shape1(output_data.size()));
Tensor<xpu, 1, DType*> ptrs_xpu_tensor(
reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)),
Shape1(output_data.size()));
mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s);
mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s);
Kernel<SplitKernel, xpu>::Launch(
s, input_data.Size(), input_data.dptr<DType>(), ptrs_xpu_tensor.dptr_,
indices_xpu_tensor.dptr_, indices.size() - 1, mid, trailing);
});
}
template<typename xpu>
inline void SplitOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim());
const TBlob& input_data = inputs[split_enum::kData];
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += input_data.ndim();
}
SplitOpForwardImpl<xpu>(attrs, ctx, inputs, req, outputs, real_axis);
}
template<typename xpu>
inline void SplitOpBackwardImpl(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs,
const int real_axis) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
Stream<xpu> *s = ctx.get_stream<xpu>();
TBlob input_grad = outputs[split_enum::kData];
size_t leading = 1, trailing = 1;
CHECK_LT(real_axis, input_grad.ndim());
size_t mid = input_grad.shape_[real_axis];
for (int i = 0; i < real_axis; ++i) {
leading *= input_grad.shape_[i];
}
for (int i = real_axis + 1; i < input_grad.ndim(); ++i) {
trailing *= input_grad.shape_[i];
}
size_t workspace_size = 0;
const mxnet::TShape& ishape = input_grad.shape_;
const mxnet::TShape split_pts =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
std::vector<size_t> indices;
for (const auto& section : split_pts) {
indices.push_back(section);
}
if (param.sections == 0) {
indices.push_back(ishape[real_axis]);
}
workspace_size += indices.size() * sizeof(size_t);
MSHADOW_TYPE_SWITCH(input_grad.type_flag_, DType, {
std::vector<DType*> out_grads;
for (const TBlob& output_grad : inputs) {
out_grads.push_back(output_grad.dptr<DType>());
}
workspace_size += out_grads.size() * sizeof(DType*);
Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size()));
Tensor<xpu, 1, size_t> indices_xpu_tensor(
reinterpret_cast<size_t*>(workspace.dptr_), Shape1(indices.size()));
Tensor<cpu, 1, DType*> ptrs_cpu_tensor(out_grads.data(), Shape1(inputs.size()));
Tensor<xpu, 1, DType*> ptrs_xpu_tensor(
reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)),
Shape1(inputs.size()));
mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s);
mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s);
Kernel<ConcatenateKernel, xpu>::Launch(
s, input_grad.Size(), ptrs_xpu_tensor.dptr_, input_grad.dptr<DType>(),
indices_xpu_tensor.dptr_, indices.size() - 1, mid, trailing);
});
}
template<typename xpu>
inline void SplitOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(inputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim())
<< "out grad vector size mush match the output size";
CHECK_EQ(outputs.size(), 1U);
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += outputs[split_enum::kData].ndim();
}
SplitOpBackwardImpl<xpu>(attrs, ctx, inputs, req, outputs, real_axis);
}
inline uint32_t SplitNumOutputs(const NodeAttrs& attrs) {
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
return (param.sections > 0) ? param.sections : param.indices.ndim();
}
} // namespace op
} // namespace mxnet
namespace std {
template<>
struct hash<mxnet::op::TransposeParam> {
size_t operator()(const mxnet::op::TransposeParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.axes);
return ret;
}
};
template<>
struct hash<mxnet::op::ReshapeParam> {
size_t operator()(const mxnet::op::ReshapeParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.target_shape);
ret = dmlc::HashCombine(ret, val.keep_highest);
ret = dmlc::HashCombine(ret, val.shape);
ret = dmlc::HashCombine(ret, val.reverse);
return ret;
}
};
template<>
struct hash<mxnet::op::ExpandDimParam> {
size_t operator()(const mxnet::op::ExpandDimParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.axis);
return ret;
}
};
} // namespace std
#endif // MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
|
GrB_Matrix_exportSize.c | //------------------------------------------------------------------------------
// GrB_Matrix_exportSize: determine sizes of arrays for GrB_Matrix_export
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB_transpose.h"
#define GB_FREE_ALL ;
GrB_Info GrB_Matrix_exportSize // determine sizes of user arrays for export
(
GrB_Index *Ap_len, // # of entries required for Ap (not # of bytes)
GrB_Index *Ai_len, // # of entries required for Ai (not # of bytes)
GrB_Index *Ax_len, // # of entries required for Ax (not # of bytes)
GrB_Format format, // export format
GrB_Matrix A // matrix to export
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GrB_Matrix_exportSize (&Ap_len, &Ai_len, &Ax_len, format, A)") ;
GB_BURBLE_START ("GrB_Matrix_exportSize") ;
GB_RETURN_IF_NULL_OR_FAULTY (A) ;
GB_RETURN_IF_NULL (Ap_len) ;
GB_RETURN_IF_NULL (Ai_len) ;
GB_RETURN_IF_NULL (Ax_len) ;
GrB_Info info ;
GrB_Index nvals ;
GB_OK (GB_nvals (&nvals, A, Context)) ;
(*Ax_len) = nvals ;
//--------------------------------------------------------------------------
// determine the sizes of Ap and Ai for each format
//--------------------------------------------------------------------------
switch (format)
{
case GrB_CSR_FORMAT :
(*Ap_len) = GB_NROWS (A) + 1 ;
(*Ai_len) = nvals ;
break ;
case GrB_CSC_FORMAT :
(*Ap_len) = GB_NCOLS (A) + 1 ;
(*Ai_len) = nvals ;
break ;
// case GrB_DENSE_ROW_FORMAT :
// case GrB_DENSE_COL_FORMAT :
// (*Ap_len) = 0 ;
// (*Ai_len) = 0 ;
// if (!GB_is_dense (A))
// {
// // A must dense or full
// return (GrB_INVALID_VALUE) ;
// }
// break ;
case GrB_COO_FORMAT :
(*Ap_len) = nvals ;
(*Ai_len) = nvals ;
break ;
default :
// unknown format
return (GrB_INVALID_VALUE) ;
}
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
GB_unop__identity_int32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int32_fp32
// op(A') function: GB_unop_tran__identity_int32_fp32
// C type: int32_t
// A type: float
// cast: int32_t cij = GB_cast_to_int32_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = GB_cast_to_int32_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = GB_cast_to_int32_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int32_fp32
(
int32_t *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
int32_t z = GB_cast_to_int32_t ((double) (aij)) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
int32_t z = GB_cast_to_int32_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_uint16_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint16_fp64)
// op(A') function: GB (_unop_tran__identity_uint16_fp64)
// C type: uint16_t
// A type: double
// cast: uint16_t cij = GB_cast_to_uint16_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint16_fp64)
(
uint16_t *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint16_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
eavlScatterOp.h | // Copyright 2010-2013 UT-Battelle, LLC. See LICENSE.txt for more information.
#ifndef EAVL_SCATTER_OP_H
#define EAVL_SCATTER_OP_H
#include "eavlCUDA.h"
#include "eavlDataSet.h"
#include "eavlArray.h"
#include "eavlOpDispatch.h"
#include "eavlOperation.h"
#include "eavlException.h"
#include <time.h>
#ifdef HAVE_OPENMP
#include <omp.h>
#endif
#ifndef DOXYGEN
struct eavlScatterOp_CPU
{
static inline eavlArray::Location location() { return eavlArray::HOST; }
template <class F, class IN, class OUT, class INDEX>
static void call(int nitems, int,
const IN inputs, OUT outputs,
INDEX indices, F&)
{
int *denseindices = get<0>(indices).array;
#pragma omp parallel for
for (int sparseindex = 0; sparseindex < nitems; ++sparseindex)
{
int denseindex = denseindices[get<0>(indices).indexer.index(sparseindex)];
// can't use operator= because it's ambiguous when only
// one input and one output array (without a functor that
// would force a cast to a known type situation).
collect(denseindex, outputs).CopyFrom(collect(sparseindex, inputs));
}
}
};
#if defined __CUDACC__
template <class IN, class OUT, class INDEX>
__global__ void
eavlScatterOp_kernel(int nitems,
const IN inputs, OUT outputs,
INDEX indices)
{
int *denseindices = get<0>(indices).array;
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int sparseindex = threadID; sparseindex < nitems; sparseindex += numThreads)
{
int denseindex = denseindices[get<0>(indices).indexer.index(sparseindex)];
// can't use operator= because it's ambiguous when only
// one input and one output array (without a functor that
// would force a cast to a known type situation).
collect(denseindex, outputs).CopyFrom(collect(sparseindex, inputs));
}
}
struct eavlScatterOp_GPU
{
static inline eavlArray::Location location() { return eavlArray::DEVICE; }
template <class F, class IN, class OUT, class INDEX>
static void call(int nitems, int,
const IN inputs, OUT outputs,
INDEX indices, F&)
{
int numThreads = 256;
dim3 threads(numThreads, 1, 1);
dim3 blocks (32, 1, 1);
eavlScatterOp_kernel<<< blocks, threads >>>(nitems,
inputs, outputs,
indices);
CUDA_CHECK_ERROR();
}
};
#endif
#endif
#include "eavlExplicitConnectivity.h"
// ****************************************************************************
// Class: eavlScatterOp
//
// Purpose:
/// A simple scatter operation on a single input and output array; copies
/// the values specified by the indices array from the source array to
/// the destination array.
/// Example:
/// output array before operation: [0 0 0 0 0]
/// input : [8 5 9]
/// indexes : [2 1 4]
// output : [0 5 8 0 9]
//
// Programmer: Jeremy Meredith
// Creation: August 1, 2013
//
// Modifications:
// Matt Larsen- 2/5/2014 (used eavlGatherOp as a template to create op)
// Matt Larsen- 7/10/2014 Added support for operating on subset of input
// ****************************************************************************
template <class I, class O, class INDEX>
class eavlScatterOp : public eavlOperation
{
protected:
DummyFunctor functor;
I inputs;
O outputs;
INDEX indices;
int nitems;
public:
eavlScatterOp(I i, O o, INDEX ind)
: inputs(i), outputs(o), indices(ind), nitems(-1)
{
}
eavlScatterOp(I i, O o, INDEX ind, int itemsToProcess)
: inputs(i), outputs(o), indices(ind), nitems(itemsToProcess)
{
}
virtual void GoCPU()
{
int dummy;
int n=0;
if(nitems > 0) n = nitems;
else n = inputs.first.length();
eavlOpDispatch<eavlScatterOp_CPU>(n, dummy, inputs, outputs, indices, functor);
}
virtual void GoGPU()
{
#ifdef HAVE_CUDA
int dummy;
int n=0;
if(nitems > 0) n = nitems;
else n = inputs.first.length();
eavlOpDispatch<eavlScatterOp_GPU>(n, dummy, inputs, outputs, indices, functor);
#else
THROW(eavlException,"Executing GPU code without compiling under CUDA compiler.");
#endif
}
};
// helper function for type deduction
template <class I, class O, class INDEX>
eavlScatterOp<I,O,INDEX> *new_eavlScatterOp(I i, O o, INDEX indices)
{
return new eavlScatterOp<I,O,INDEX>(i,o,indices);
}
template <class I, class O, class INDEX>
eavlScatterOp<I,O,INDEX> *new_eavlScatterOp(I i, O o, INDEX indices, int itemsToProcess)
{
return new eavlScatterOp<I,O,INDEX>(i,o,indices, itemsToProcess);
}
#endif
|
GB_binop__lor_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lor_fp32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__lor_fp32)
// A.*B function (eWiseMult): GB (_AemultB_03__lor_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_fp32)
// A*D function (colscale): GB (_AxD__lor_fp32)
// D*A function (rowscale): GB (_DxB__lor_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__lor_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__lor_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_fp32)
// C=scalar+B GB (_bind1st__lor_fp32)
// C=scalar+B' GB (_bind1st_tran__lor_fp32)
// C=A+scalar GB (_bind2nd__lor_fp32)
// C=A'+scalar GB (_bind2nd_tran__lor_fp32)
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = ((aij != 0) || (bij != 0))
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ((x != 0) || (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_FP32 || GxB_NO_LOR_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lor_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lor_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lor_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lor_fp32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lor_fp32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lor_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lor_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lor_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lor_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lor_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lor_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = Bx [p] ;
Cx [p] = ((x != 0) || (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lor_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = Ax [p] ;
Cx [p] = ((aij != 0) || (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = ((x != 0) || (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lor_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) || (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lor_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Threads1xxx.c | #include <stdio.h>
#include <omp.h>
int thdLim =1024;
//__attribute__((amdgpu_flat_work_group_size(1024, 1024)))
int main()
{
int numTeams=128;
int N = 12;
int NN = 1024;
int CUs[numTeams*NN];
int lims[N] , threads[N], Res[numTeams*NN];
int i;
for (i=0; i <N; i++) lims[i] = threads[i] = -1;
for (i=0; i <N*NN; i++) {Res[i] = -1; CUs[i] = -2;}
#pragma omp target teams num_teams(numTeams) thread_limit(1024) map (tofrom: CUs, lims, threads, Res)
#pragma omp distribute
for (int j=0; j < numTeams; j++) {
if (j<N) {
lims[j] = omp_get_num_teams();
threads[j] = omp_get_num_threads();
}
#pragma omp parallel for
for (int i=j*NN; i <(j+1)*NN; i++) {
Res[i] = i;
CUs[i] = omp_ext_get_smid();
if (i ==1) printf("i= %d\n",i);
}
if (j == 1) printf("Res 1 CUs 1 %d %d\n", Res[1], CUs[1]);
}
for (i=0; i <N; i++) {
printf("i=%d lims[%d] threads[%d]\n", i, lims[i], threads[i]);
}
for (i=0; i <numTeams*NN; i++) {
if (Res[i] != i) {
printf("Failed %d %d\n",i, Res[i]);
printf("Failed %d %d\n",i+1, Res[i+1]);
return 1;
}
}
//for (i=0; i <numTeams*NN; i++)
// printf("CUs %d\n",CUs[i]);
return 0;
}
|
rt_dlacpy.c | #include "runtime.h"
#include "core_blas-gpu.h"
#ifdef PLASMA_WITH_SMP
#pragma omp target device (smp) copy_deps
#pragma omp task in([lda*(n-1)+m]A) out([ldb*(n-1)+m]B) label(dlacpy_smp)
void CORE_dlacpy_ompss(PLASMA_enum uplo, int m, int n, double *A, int lda, double *B, int ldb)
{
LAPACKE_dlacpy_work(LAPACK_COL_MAJOR, lapack_const(uplo),
m, n, A, lda, B, ldb);
}
#endif
/*
*/
#ifdef PLASMA_WITH_CUDA_PURE
#pragma omp target device (cuda) copy_deps
#pragma omp task in([lda*(n-1)+m]A) out([ldb*(n-1)+m]B) label(dlacpy_cuda)
void CORE_dlacpy_ompss(PLASMA_enum uplo, int m, int n, double *A, int lda, double *B, int ldb)
{
cublasHandle_t handle = nanos_get_cublas_handle();
cudaStream_t stream = nanos_get_kernel_execution_stream();
cublasSetStream(handle, stream);
//printf("=========> cublasDlacpy m %d n %d lda %d ldb %d sizeA %d sizeB %d\n\n", m, n, lda, ldb, lda*(n-1)+m, ldb*(n-1)+m);
cublasDlacpy(stream, uplo, m, n, A, lda, B, ldb );
//LAPACKE_dlacpy_work(LAPACK_COL_MAJOR, lapack_const(uplo),
//m, n, A, lda, B, ldb);
}
#endif
void RT_CORE_dlacpy(Quark *quark, Quark_Task_Flags *task_flags,
PLASMA_enum uplo, int m, int n, int nb,
const double *A, int lda,
double *B, int ldb)
{
plasma_context_t *plasma;
plasma = plasma_context_self();
if (plasma->runtime == PLASMA_QUARK) {
QUARK_CORE_dlacpy(
quark, task_flags, uplo,
m, n, nb,
A, lda,
B, ldb);
}
else if (plasma->runtime == PLASMA_OMPSS) {
CORE_dlacpy_ompss(uplo, m, n, A, lda, B, ldb);
}
}
|
mca-api-openmp.c | /* A simple program to test MCA API integration with OpenMP 3 tasks
instead of hard-core pthreads
Ronan.Keryell@wild-systems.com
*/
#include "mca-api-common/smecy-mca-common.h"
enum {
PRODUCER_DOMAIN = 42,
PRODUCER_NODE = 3,
CONSUMER_DOMAIN = 42,
CONSUMER_NODE = 7,
SEND_MSG_PORT = 11,
RECEIVE_MSG_PORT = 13,
SEND_PKT_PORT = 41,
RECEIVE_PKT_PORT = 43,
N_MSG = 10
};
#include "mca-api-common/producer.h"
#include "mca-api-common/consumer.h"
int main() {
/* Increase the level to trace down MCA things */
mcapi_set_debug_level(0);
/* Launch 2 threads */
#pragma omp parallel num_threads(2)
{
/* But only one can run now. Useless to wait at the end of single
section since there is the join at the end of the parallel
section */
#pragma omp single nowait
{
/* Execute producer() in a new task */
#pragma omp task
producer(NULL);
/* We can keep consumer() in the current task, so no need for the
following pragma */
//#pragma omp task
consumer(NULL);
}
}
return EXIT_SUCCESS;
}
|
ejercicio7.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
//#define PRINTF_ALL
main(int argc, char **argv)
{
if(argc < 4) {
fprintf(stderr,"Falta fila y columna\n");
exit(-1);
}
omp_set_num_threads(16);
omp_set_dynamic(0);
struct timespec cgt1,cgt2; double ncgt; //para tiempo de ejecución
int i,k, n = atoi(argv[1]);
int kind = atoi(argv[2]);
int chunk=atoi(argv[3]);
omp_set_schedule(kind, chunk);
double *v1,*v2;
v1 = (double*)malloc(n*sizeof(double));
v2 = (double*)malloc(n*sizeof(double));
double sumalocal=0;
double **m;
m = (double**)malloc(n*sizeof(double*));
//Inicializo v1 y reservo el espacio para la matriz
#pragma omp parallel for
for(i=0;i<n;++i){
m[i]=(double*)malloc(n*sizeof(double));
v1[i]=2;
}
//Inicializo la matriz
#pragma omp parallel private(k)
{
#pragma omp for
for (i=0; i<n; i++)
for(k=0;k<n;++k)
if(k<i+1)
m[i][k]=2;
else
m[i][k]=0;
}
//Calculo la multiplicacion de la matriz por el vector y obtengo el tiempo
clock_gettime(CLOCK_REALTIME,&cgt1);
#pragma omp parallel private(k,sumalocal)
{
sumalocal=0;
#pragma omp for
for (i=0; i<n; i++){
for(k=0;k<i+1;++k)
sumalocal+=m[i][k]*v1[k];
#pragma omp critical
{
v2[i]=sumalocal;
sumalocal=0;
}
}
}
clock_gettime(CLOCK_REALTIME,&cgt2);
ncgt=(double) (cgt2.tv_sec-cgt1.tv_sec)+(double) ((cgt2.tv_nsec-cgt1.tv_nsec)/(1.e+9));
//Imprimo los resultados
#ifdef PRINTF_ALL
printf("Tiempo(seg.):%11.9f\t / Tamaño Vectores:%u\n",ncgt,n);
for (i=0; i<n; i++){
for(k=0;k<i+1;++k){
printf("/ m[%d][%d]*V1[%d] (%8.6f*%8.6f)+ ",i,k,k,m[i][k],v1[k]);
}
printf("=v2[%i] =%8.6f/ \n",i,v2[i]);
}
#else
printf("Tiempo(seg.):%11.9f\t / Tamaño Vectores:%u\t/ m[0][0]*V1[0]=V2[0](%8.6f+%8.6f=%8.6f) // m[%d][%d]*V1[%d]=V2[%d](%8.6f+%8.6f=%8.6f) /\n", ncgt,n,m[0][0],v1[0],v2[0],n-1,n-1,n-1,n-1,m[n-1][n-1],v1[n-1],v2[n-1]);
#endif
free(v1); // libera el espacio reservado para v1
free(v2); // libera el espacio reservado para v2
for(i=0;i<n;++i){
free(m[i]);
}
free(m); // libera el espacio reservado para m
}
|
convdw5x5s2_pack4_neon.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "option.h"
#include "mat.h"
namespace ncnn{
static void convdw5x5s2_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2*outw + w) * 4;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f);
const float* k0 = kernel.row(g);
float* outptr0 = out;
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j+3 < outw; j+=4)
{
float32x4_t _sum0 = _bias0;
float32x4_t _sum1 = _bias0;
float32x4_t _sum2 = _bias0;
float32x4_t _sum3 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0+4);
float32x4_t _r02 = vld1q_f32(r0+8);
float32x4_t _r03 = vld1q_f32(r0+12);
float32x4_t _r04 = vld1q_f32(r0+16);
float32x4_t _r05 = vld1q_f32(r0+20);
float32x4_t _r06 = vld1q_f32(r0+24);
float32x4_t _r07 = vld1q_f32(r0+28);
float32x4_t _r08 = vld1q_f32(r0+32);
float32x4_t _r09 = vld1q_f32(r0+36);
float32x4_t _r010 = vld1q_f32(r0+40);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k03 = vld1q_f32(k0+12);
float32x4_t _k04 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
_sum1 = vmlaq_f32(_sum1, _k00, _r02);
_sum1 = vmlaq_f32(_sum1, _k01, _r03);
_sum1 = vmlaq_f32(_sum1, _k02, _r04);
_sum1 = vmlaq_f32(_sum1, _k03, _r05);
_sum1 = vmlaq_f32(_sum1, _k04, _r06);
_sum2 = vmlaq_f32(_sum2, _k00, _r04);
_sum2 = vmlaq_f32(_sum2, _k01, _r05);
_sum2 = vmlaq_f32(_sum2, _k02, _r06);
_sum2 = vmlaq_f32(_sum2, _k03, _r07);
_sum2 = vmlaq_f32(_sum2, _k04, _r08);
_sum3 = vmlaq_f32(_sum3, _k00, _r06);
_sum3 = vmlaq_f32(_sum3, _k01, _r07);
_sum3 = vmlaq_f32(_sum3, _k02, _r08);
_sum3 = vmlaq_f32(_sum3, _k03, _r09);
_sum3 = vmlaq_f32(_sum3, _k04, _r010);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1+4);
float32x4_t _r12 = vld1q_f32(r1+8);
float32x4_t _r13 = vld1q_f32(r1+12);
float32x4_t _r14 = vld1q_f32(r1+16);
float32x4_t _r15 = vld1q_f32(r1+20);
float32x4_t _r16 = vld1q_f32(r1+24);
float32x4_t _r17 = vld1q_f32(r1+28);
float32x4_t _r18 = vld1q_f32(r1+32);
float32x4_t _r19 = vld1q_f32(r1+36);
float32x4_t _r110 = vld1q_f32(r1+40);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0+4);
float32x4_t _k12 = vld1q_f32(k0+8);
float32x4_t _k13 = vld1q_f32(k0+12);
float32x4_t _k14 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
_sum1 = vmlaq_f32(_sum1, _k10, _r12);
_sum1 = vmlaq_f32(_sum1, _k11, _r13);
_sum1 = vmlaq_f32(_sum1, _k12, _r14);
_sum1 = vmlaq_f32(_sum1, _k13, _r15);
_sum1 = vmlaq_f32(_sum1, _k14, _r16);
_sum2 = vmlaq_f32(_sum2, _k10, _r14);
_sum2 = vmlaq_f32(_sum2, _k11, _r15);
_sum2 = vmlaq_f32(_sum2, _k12, _r16);
_sum2 = vmlaq_f32(_sum2, _k13, _r17);
_sum2 = vmlaq_f32(_sum2, _k14, _r18);
_sum3 = vmlaq_f32(_sum3, _k10, _r16);
_sum3 = vmlaq_f32(_sum3, _k11, _r17);
_sum3 = vmlaq_f32(_sum3, _k12, _r18);
_sum3 = vmlaq_f32(_sum3, _k13, _r19);
_sum3 = vmlaq_f32(_sum3, _k14, _r110);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2+4);
float32x4_t _r22 = vld1q_f32(r2+8);
float32x4_t _r23 = vld1q_f32(r2+12);
float32x4_t _r24 = vld1q_f32(r2+16);
float32x4_t _r25 = vld1q_f32(r2+20);
float32x4_t _r26 = vld1q_f32(r2+24);
float32x4_t _r27 = vld1q_f32(r2+28);
float32x4_t _r28 = vld1q_f32(r2+32);
float32x4_t _r29 = vld1q_f32(r2+36);
float32x4_t _r210 = vld1q_f32(r2+40);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0+4);
float32x4_t _k22 = vld1q_f32(k0+8);
float32x4_t _k23 = vld1q_f32(k0+12);
float32x4_t _k24 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
_sum1 = vmlaq_f32(_sum1, _k20, _r22);
_sum1 = vmlaq_f32(_sum1, _k21, _r23);
_sum1 = vmlaq_f32(_sum1, _k22, _r24);
_sum1 = vmlaq_f32(_sum1, _k23, _r25);
_sum1 = vmlaq_f32(_sum1, _k24, _r26);
_sum2 = vmlaq_f32(_sum2, _k20, _r24);
_sum2 = vmlaq_f32(_sum2, _k21, _r25);
_sum2 = vmlaq_f32(_sum2, _k22, _r26);
_sum2 = vmlaq_f32(_sum2, _k23, _r27);
_sum2 = vmlaq_f32(_sum2, _k24, _r28);
_sum3 = vmlaq_f32(_sum3, _k20, _r26);
_sum3 = vmlaq_f32(_sum3, _k21, _r27);
_sum3 = vmlaq_f32(_sum3, _k22, _r28);
_sum3 = vmlaq_f32(_sum3, _k23, _r29);
_sum3 = vmlaq_f32(_sum3, _k24, _r210);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3+4);
float32x4_t _r32 = vld1q_f32(r3+8);
float32x4_t _r33 = vld1q_f32(r3+12);
float32x4_t _r34 = vld1q_f32(r3+16);
float32x4_t _r35 = vld1q_f32(r3+20);
float32x4_t _r36 = vld1q_f32(r3+24);
float32x4_t _r37 = vld1q_f32(r3+28);
float32x4_t _r38 = vld1q_f32(r3+32);
float32x4_t _r39 = vld1q_f32(r3+36);
float32x4_t _r310 = vld1q_f32(r3+40);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0+4);
float32x4_t _k32 = vld1q_f32(k0+8);
float32x4_t _k33 = vld1q_f32(k0+12);
float32x4_t _k34 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
_sum1 = vmlaq_f32(_sum1, _k30, _r32);
_sum1 = vmlaq_f32(_sum1, _k31, _r33);
_sum1 = vmlaq_f32(_sum1, _k32, _r34);
_sum1 = vmlaq_f32(_sum1, _k33, _r35);
_sum1 = vmlaq_f32(_sum1, _k34, _r36);
_sum2 = vmlaq_f32(_sum2, _k30, _r34);
_sum2 = vmlaq_f32(_sum2, _k31, _r35);
_sum2 = vmlaq_f32(_sum2, _k32, _r36);
_sum2 = vmlaq_f32(_sum2, _k33, _r37);
_sum2 = vmlaq_f32(_sum2, _k34, _r38);
_sum3 = vmlaq_f32(_sum3, _k30, _r36);
_sum3 = vmlaq_f32(_sum3, _k31, _r37);
_sum3 = vmlaq_f32(_sum3, _k32, _r38);
_sum3 = vmlaq_f32(_sum3, _k33, _r39);
_sum3 = vmlaq_f32(_sum3, _k34, _r310);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4+4);
float32x4_t _r42 = vld1q_f32(r4+8);
float32x4_t _r43 = vld1q_f32(r4+12);
float32x4_t _r44 = vld1q_f32(r4+16);
float32x4_t _r45 = vld1q_f32(r4+20);
float32x4_t _r46 = vld1q_f32(r4+24);
float32x4_t _r47 = vld1q_f32(r4+28);
float32x4_t _r48 = vld1q_f32(r4+32);
float32x4_t _r49 = vld1q_f32(r4+36);
float32x4_t _r410 = vld1q_f32(r4+40);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0+4);
float32x4_t _k42 = vld1q_f32(k0+8);
float32x4_t _k43 = vld1q_f32(k0+12);
float32x4_t _k44 = vld1q_f32(k0+16);
k0 -= 80;
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
_sum1 = vmlaq_f32(_sum1, _k40, _r42);
_sum1 = vmlaq_f32(_sum1, _k41, _r43);
_sum1 = vmlaq_f32(_sum1, _k42, _r44);
_sum1 = vmlaq_f32(_sum1, _k43, _r45);
_sum1 = vmlaq_f32(_sum1, _k44, _r46);
_sum2 = vmlaq_f32(_sum2, _k40, _r44);
_sum2 = vmlaq_f32(_sum2, _k41, _r45);
_sum2 = vmlaq_f32(_sum2, _k42, _r46);
_sum2 = vmlaq_f32(_sum2, _k43, _r47);
_sum2 = vmlaq_f32(_sum2, _k44, _r48);
_sum3 = vmlaq_f32(_sum3, _k40, _r46);
_sum3 = vmlaq_f32(_sum3, _k41, _r47);
_sum3 = vmlaq_f32(_sum3, _k42, _r48);
_sum3 = vmlaq_f32(_sum3, _k43, _r49);
_sum3 = vmlaq_f32(_sum3, _k44, _r410);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr0+4, _sum1);
vst1q_f32(outptr0+8, _sum2);
vst1q_f32(outptr0+12, _sum3);
r0 += 8*4;
r1 += 8*4;
r2 += 8*4;
r3 += 8*4;
r4 += 8*4;
outptr0 += 16;
}
for (; j+1 < outw; j+=2)
{
float32x4_t _sum0 = _bias0;
float32x4_t _sum1 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0+4);
float32x4_t _r02 = vld1q_f32(r0+8);
float32x4_t _r03 = vld1q_f32(r0+12);
float32x4_t _r04 = vld1q_f32(r0+16);
float32x4_t _r05 = vld1q_f32(r0+20);
float32x4_t _r06 = vld1q_f32(r0+24);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k03 = vld1q_f32(k0+12);
float32x4_t _k04 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
_sum1 = vmlaq_f32(_sum1, _k00, _r02);
_sum1 = vmlaq_f32(_sum1, _k01, _r03);
_sum1 = vmlaq_f32(_sum1, _k02, _r04);
_sum1 = vmlaq_f32(_sum1, _k03, _r05);
_sum1 = vmlaq_f32(_sum1, _k04, _r06);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1+4);
float32x4_t _r12 = vld1q_f32(r1+8);
float32x4_t _r13 = vld1q_f32(r1+12);
float32x4_t _r14 = vld1q_f32(r1+16);
float32x4_t _r15 = vld1q_f32(r1+20);
float32x4_t _r16 = vld1q_f32(r1+24);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0+4);
float32x4_t _k12 = vld1q_f32(k0+8);
float32x4_t _k13 = vld1q_f32(k0+12);
float32x4_t _k14 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
_sum1 = vmlaq_f32(_sum1, _k10, _r12);
_sum1 = vmlaq_f32(_sum1, _k11, _r13);
_sum1 = vmlaq_f32(_sum1, _k12, _r14);
_sum1 = vmlaq_f32(_sum1, _k13, _r15);
_sum1 = vmlaq_f32(_sum1, _k14, _r16);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2+4);
float32x4_t _r22 = vld1q_f32(r2+8);
float32x4_t _r23 = vld1q_f32(r2+12);
float32x4_t _r24 = vld1q_f32(r2+16);
float32x4_t _r25 = vld1q_f32(r2+20);
float32x4_t _r26 = vld1q_f32(r2+24);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0+4);
float32x4_t _k22 = vld1q_f32(k0+8);
float32x4_t _k23 = vld1q_f32(k0+12);
float32x4_t _k24 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
_sum1 = vmlaq_f32(_sum1, _k20, _r22);
_sum1 = vmlaq_f32(_sum1, _k21, _r23);
_sum1 = vmlaq_f32(_sum1, _k22, _r24);
_sum1 = vmlaq_f32(_sum1, _k23, _r25);
_sum1 = vmlaq_f32(_sum1, _k24, _r26);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3+4);
float32x4_t _r32 = vld1q_f32(r3+8);
float32x4_t _r33 = vld1q_f32(r3+12);
float32x4_t _r34 = vld1q_f32(r3+16);
float32x4_t _r35 = vld1q_f32(r3+20);
float32x4_t _r36 = vld1q_f32(r3+24);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0+4);
float32x4_t _k32 = vld1q_f32(k0+8);
float32x4_t _k33 = vld1q_f32(k0+12);
float32x4_t _k34 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
_sum1 = vmlaq_f32(_sum1, _k30, _r32);
_sum1 = vmlaq_f32(_sum1, _k31, _r33);
_sum1 = vmlaq_f32(_sum1, _k32, _r34);
_sum1 = vmlaq_f32(_sum1, _k33, _r35);
_sum1 = vmlaq_f32(_sum1, _k34, _r36);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4+4);
float32x4_t _r42 = vld1q_f32(r4+8);
float32x4_t _r43 = vld1q_f32(r4+12);
float32x4_t _r44 = vld1q_f32(r4+16);
float32x4_t _r45 = vld1q_f32(r4+20);
float32x4_t _r46 = vld1q_f32(r4+24);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0+4);
float32x4_t _k42 = vld1q_f32(k0+8);
float32x4_t _k43 = vld1q_f32(k0+12);
float32x4_t _k44 = vld1q_f32(k0+16);
k0 -= 80;
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
_sum1 = vmlaq_f32(_sum1, _k40, _r42);
_sum1 = vmlaq_f32(_sum1, _k41, _r43);
_sum1 = vmlaq_f32(_sum1, _k42, _r44);
_sum1 = vmlaq_f32(_sum1, _k43, _r45);
_sum1 = vmlaq_f32(_sum1, _k44, _r46);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr0+4, _sum1);
r0 += 4*4;
r1 += 4*4;
r2 += 4*4;
r3 += 4*4;
r4 += 4*4;
outptr0 += 8;
}
for (; j < outw; j++)
{
float32x4_t _sum0 = _bias0;
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0+4);
float32x4_t _r02 = vld1q_f32(r0+8);
float32x4_t _r03 = vld1q_f32(r0+12);
float32x4_t _r04 = vld1q_f32(r0+16);
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k01 = vld1q_f32(k0+4);
float32x4_t _k02 = vld1q_f32(k0+8);
float32x4_t _k03 = vld1q_f32(k0+12);
float32x4_t _k04 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k03, _r03);
_sum0 = vmlaq_f32(_sum0, _k04, _r04);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1+4);
float32x4_t _r12 = vld1q_f32(r1+8);
float32x4_t _r13 = vld1q_f32(r1+12);
float32x4_t _r14 = vld1q_f32(r1+16);
float32x4_t _k10 = vld1q_f32(k0);
float32x4_t _k11 = vld1q_f32(k0+4);
float32x4_t _k12 = vld1q_f32(k0+8);
float32x4_t _k13 = vld1q_f32(k0+12);
float32x4_t _k14 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k13, _r13);
_sum0 = vmlaq_f32(_sum0, _k14, _r14);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2+4);
float32x4_t _r22 = vld1q_f32(r2+8);
float32x4_t _r23 = vld1q_f32(r2+12);
float32x4_t _r24 = vld1q_f32(r2+16);
float32x4_t _k20 = vld1q_f32(k0);
float32x4_t _k21 = vld1q_f32(k0+4);
float32x4_t _k22 = vld1q_f32(k0+8);
float32x4_t _k23 = vld1q_f32(k0+12);
float32x4_t _k24 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
_sum0 = vmlaq_f32(_sum0, _k23, _r23);
_sum0 = vmlaq_f32(_sum0, _k24, _r24);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r31 = vld1q_f32(r3+4);
float32x4_t _r32 = vld1q_f32(r3+8);
float32x4_t _r33 = vld1q_f32(r3+12);
float32x4_t _r34 = vld1q_f32(r3+16);
float32x4_t _k30 = vld1q_f32(k0);
float32x4_t _k31 = vld1q_f32(k0+4);
float32x4_t _k32 = vld1q_f32(k0+8);
float32x4_t _k33 = vld1q_f32(k0+12);
float32x4_t _k34 = vld1q_f32(k0+16);
k0 += 20;
_sum0 = vmlaq_f32(_sum0, _k30, _r30);
_sum0 = vmlaq_f32(_sum0, _k31, _r31);
_sum0 = vmlaq_f32(_sum0, _k32, _r32);
_sum0 = vmlaq_f32(_sum0, _k33, _r33);
_sum0 = vmlaq_f32(_sum0, _k34, _r34);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r41 = vld1q_f32(r4+4);
float32x4_t _r42 = vld1q_f32(r4+8);
float32x4_t _r43 = vld1q_f32(r4+12);
float32x4_t _r44 = vld1q_f32(r4+16);
float32x4_t _k40 = vld1q_f32(k0);
float32x4_t _k41 = vld1q_f32(k0+4);
float32x4_t _k42 = vld1q_f32(k0+8);
float32x4_t _k43 = vld1q_f32(k0+12);
float32x4_t _k44 = vld1q_f32(k0+16);
k0 -= 80;
_sum0 = vmlaq_f32(_sum0, _k40, _r40);
_sum0 = vmlaq_f32(_sum0, _k41, _r41);
_sum0 = vmlaq_f32(_sum0, _k42, _r42);
_sum0 = vmlaq_f32(_sum0, _k43, _r43);
_sum0 = vmlaq_f32(_sum0, _k44, _r44);
vst1q_f32(outptr0, _sum0);
r0 += 2*4;
r1 += 2*4;
r2 += 2*4;
r3 += 2*4;
r4 += 2*4;
outptr0 += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
}
|
GB_transpose.c | //------------------------------------------------------------------------------
// GB_transpose: C=A' or C=op(A'), with typecasting
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// CALLS: GB_builder
// Transpose a matrix, C=A', and optionally apply a unary operator and/or
// typecast the values. The transpose may be done in-place, in which case C or
// A are modified in-place.
// If the input matrix has a single vector, it must be already sorted on input.
// The input matrix may have shallow components (even if in-place), and the
// output may also have shallow components (even if the input matrix is not
// shallow).
// This function is CSR/CSC agnostic; it sets the output matrix format from
// C_is_csc but otherwise ignores the CSR/CSC type of A and C.
// If A_in is NULL, then C = (*Chandle) is transposed in-place. If out of
// memory, (*Chandle) is always returned as NULL, which frees the input matrix
// C if the transpose is done in-place.
// If A_in is not NULL and Chandle is NULL, then A is modified in-place, and
// the A_in matrix is not freed when done.
// The bucket sort is parallel, but not highly scalable. If e=nnz(A) and A is
// m-by-n, then at most O(e/n) threads are used. The GB_builder method is more
// scalable, but not as fast with a modest number of threads.
#include "GB_transpose.h"
#include "GB_build.h"
#include "GB_apply.h"
#define GB_FREE_ALL ;
// free prior content of A, if transpose is done in-place
#define GB_FREE_IN_PLACE_A \
{ \
if (in_place) \
{ \
/* A is being transposed in-place */ \
/* free prior content of A but not &A itself */ \
if (!Ap_shallow) GB_FREE (Ap) ; \
if (!Ah_shallow) GB_FREE (Ah) ; \
if (!Ab_shallow) GB_FREE (Ab) ; \
if (!Ai_shallow) GB_FREE (Ai) ; \
if (!Ax_shallow) GB_FREE (Ax) ; \
} \
else \
{ \
/* A is not modified; it is purely an input matrix */ \
; \
} \
}
// free the new C matrix, unless C=A' is being done in-place of A
#define GB_FREE_C \
{ \
if (!in_place_A) \
{ \
/* free all of C and all its contents &C */ \
GB_Matrix_free (Chandle) ; \
} \
}
// free both A (if in-place) and C (if not in-place of A)
#define GB_FREE_A_AND_C \
{ \
GB_FREE_IN_PLACE_A ; \
GB_FREE_C ; \
}
//------------------------------------------------------------------------------
// GB_transpose
//------------------------------------------------------------------------------
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_transpose // C=A', C=(ctype)A or C=op(A')
(
GrB_Matrix *Chandle, // output matrix C, possibly modified in-place
GrB_Type ctype, // desired type of C; if NULL use A->type.
// ignored if op is present (cast to op->ztype)
const bool C_is_csc, // desired CSR/CSC format of C
const GrB_Matrix A_in, // input matrix
// no operator is applied if both op1 and op2 are NULL
const GrB_UnaryOp op1_in, // unary operator to apply
const GrB_BinaryOp op2_in, // binary operator to apply
const GxB_Scalar scalar, // scalar to bind to binary operator
bool binop_bind1st, // if true, binop(x,A) else binop(A,y)
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs and determine if transpose is done in-place
//--------------------------------------------------------------------------
GrB_Info info ;
GBURBLE ("(transpose) ") ;
GrB_Matrix A, C ;
bool in_place_C, in_place_A ;
if (A_in == NULL)
{
//----------------------------------------------------------------------
// C = C' ; &C is transposed in-place
//----------------------------------------------------------------------
// GB_transpose (&C, ctype, csc, NULL, op) ;
// C=A' is transposed in-place, in the matrix C.
// The matrix C is freed if an error occurs and C is set to NULL.
ASSERT (Chandle != NULL) ; // at least &C or A must be non-NULL
A = (*Chandle) ;
C = A ; // C must be freed if an error occurs
in_place_C = true ; // C is modified in-place
in_place_A = false ;
ASSERT (A == C && A == (*Chandle)) ;
}
else if (Chandle == NULL || (*Chandle) == A_in)
{
//----------------------------------------------------------------------
// A = A' ; A is transposed in-place; reuse the header of A
//----------------------------------------------------------------------
// GB_transpose (NULL, ctype, csc, A, op) ;
// GB_transpose (&A, ctype, csc, A, op) ;
// C=A' is transposed in-place, in the matrix A.
// The matrix A_in is not freed if an error occurs.
A = A_in ;
Chandle = &A ; // C must not be freed if an error occurs
C = A ;
in_place_C = false ;
in_place_A = true ; // A is modified in-place
ASSERT (A == C && A == (*Chandle)) ;
}
else
{
//----------------------------------------------------------------------
// C = A' ; C and A are different
//----------------------------------------------------------------------
// GB_transpose (&C, ctype, csc, A, op) ;
// C and A are both non-NULL, and not aliased.
// C=A' where C is a new matrix constructed here.
// The matrix C is freed if an error occurs, and C is set to NULL.
A = A_in ;
C = NULL ;
(*Chandle) = NULL ; // C must be allocated; freed on error
in_place_C = false ; // C and A are different matrices
in_place_A = false ;
ASSERT (A != C && A != (*Chandle)) ;
}
bool in_place = (in_place_A || in_place_C) ;
ASSERT_MATRIX_OK (A, "A input for GB_transpose", GB0) ;
ASSERT_TYPE_OK_OR_NULL (ctype, "ctype for GB_transpose", GB0) ;
ASSERT_UNARYOP_OK_OR_NULL (op1_in, "unop for GB_transpose", GB0) ;
ASSERT_BINARYOP_OK_OR_NULL (op2_in, "binop for GB_transpose", GB0) ;
ASSERT_SCALAR_OK_OR_NULL (scalar, "scalar for GB_transpose", GB0) ;
// get the current sparsity control of A
float A_hyper_switch = A->hyper_switch ;
int A_sparsity = A->sparsity ;
// wait if A has pending tuples or zombies, but leave it jumbled
GB_MATRIX_WAIT_IF_PENDING_OR_ZOMBIES (A) ;
ASSERT (!GB_PENDING (A)) ;
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
GrB_Type atype = A->type ;
size_t asize = atype->size ;
GB_Type_code acode = atype->code ;
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
int64_t anzmax = A->nzmax ;
// if in-place, these must be freed when done, whether successful or not
int64_t *GB_RESTRICT Ap = A->p ;
int64_t *GB_RESTRICT Ah = A->h ;
int64_t *GB_RESTRICT Ai = A->i ;
int8_t *GB_RESTRICT Ab = A->b ;
GB_void *GB_RESTRICT Ax = (GB_void *) A->x ;
bool A_is_bitmap = GB_IS_BITMAP (A) ;
bool A_is_packed = GB_is_packed (A) ;
bool A_is_hyper = GB_IS_HYPERSPARSE (A) ;
bool Ap_shallow = A->p_shallow ;
bool Ah_shallow = A->h_shallow ;
bool Ai_shallow = A->i_shallow ;
bool Ax_shallow = A->x_shallow ;
bool Ab_shallow = A->b_shallow ;
int64_t anz = GB_NNZ (A) ;
int64_t anvec = A->nvec ;
int64_t anvals = A->nvals ;
//--------------------------------------------------------------------------
// determine the max number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// determine the type of C and get the unary or binary operator
//--------------------------------------------------------------------------
// If a unary or binary operator is present, C is always returned as
// the ztype of the operator. The input ctype is ignored.
GrB_UnaryOp op1 = NULL ;
GrB_BinaryOp op2 = NULL ;
GB_Opcode opcode = GB_NOP_opcode ;
if (op1_in != NULL)
{
// get the unary operator
opcode = op1_in->opcode ;
if (atype == op1_in->xtype && opcode == GB_IDENTITY_opcode)
{
// op1 is a built-in identity operator, with the same type as A, so
// do not apply the operator and do not typecast. op1 is NULL.
ctype = atype ;
}
else
{
// apply the operator, z=op1(x)
op1 = op1_in ;
ctype = op1->ztype ;
}
}
else if (op2_in != NULL)
{
// get the binary operator
GrB_Type op2_intype = binop_bind1st ? op2_in->xtype : op2_in->ytype ;
opcode = op2_in->opcode ;
// only GB_apply calls GB_transpose with op2_in, and it ensures this
// condition holds: the first(A,y), second(x,A), and any(...) have
// been renamed to identity(A), so these cases do not occur here.
ASSERT (!
((opcode == GB_ANY_opcode) ||
(opcode == GB_FIRST_opcode && !binop_bind1st) ||
(opcode == GB_SECOND_opcode && binop_bind1st))) ;
// apply the operator, z=op2(A,y) or op2(x,A)
op2 = op2_in ;
ctype = op2->ztype ;
}
else
{
// no operator. both op1 and op2 are NULL
if (ctype == NULL)
{
// no typecasting if ctype is NULL
ctype = atype ;
}
}
GB_Type_code ccode = ctype->code ;
size_t csize = ctype->size ;
//--------------------------------------------------------------------------
// check for positional operators
//--------------------------------------------------------------------------
bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ;
GrB_UnaryOp save_op1 = op1 ;
GrB_BinaryOp save_op2 = op2 ;
if (op_is_positional)
{
// do not apply the op until after the transpose
op1 = NULL ;
op2 = NULL ;
// replace op1 with the ONE operator, as a placeholder
ASSERT (ctype == GrB_INT64 || ctype == GrB_INT32) ;
op1 = (ctype == GrB_INT64) ? GxB_ONE_INT64 : GxB_ONE_INT32 ;
}
//--------------------------------------------------------------------------
// C = A'
//--------------------------------------------------------------------------
ASSERT (GB_IMPLIES (avlen == 0 || avdim == 0, anz == 0)) ;
bool allocate_new_Cx = (ctype != atype) || (op1 != NULL) || (op2 != NULL) ;
if (anz == 0)
{
//======================================================================
// quick return if A is empty
//======================================================================
// free prior space of A, if transpose is done in-place
GB_FREE_IN_PLACE_A ;
// A is empty; create a new empty matrix C, with the new type and
// dimensions. C is hypersparse for now but may convert when
// returned.
info = GB_new_bix (Chandle, // hyper, old or new header
ctype, avdim, avlen, GB_Ap_calloc, C_is_csc,
GxB_HYPERSPARSE, true, A_hyper_switch, 1, 1, true, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_C ;
return (info) ;
}
ASSERT_MATRIX_OK (*Chandle, "C transpose empty", GB0) ;
ASSERT (!GB_JUMBLED (*Chandle)) ;
}
else if (A_is_packed)
{
//======================================================================
// transpose a packed or bitmap matrix or vector
//======================================================================
// A is packed if it is either: (a) bitmap, (b) full, or (c) sparse or
// hypersparse with all entries present, no zombies, no pending tuples,
// and not jumbled. For (c), the matrix A can be treated as if it was
// full, and the pattern (A->p, A->h, and A->i) can be ignored.
int sparsity = (A_is_bitmap) ? GxB_BITMAP : GxB_FULL ;
bool T_cheap = // T can be done quickly if:
(avlen == 1 || avdim == 1) // A is a row or column vector,
&& op1 == NULL && op2 == NULL // no operator to apply, and
&& atype == ctype ; // no typecasting
// allocate T
GrB_Matrix T = NULL ;
if (T_cheap)
{
// allocate just the header of T, not T->b or T->x
info = GB_new (&T, // bitmap or full, new header
ctype, avdim, avlen, GB_Ap_null, C_is_csc,
sparsity, A_hyper_switch, 1, Context) ;
}
else
{
// allocate all of T, including T->b and T->x
info = GB_new_bix (&T, // bitmap or full, new header
ctype, avdim, avlen, GB_Ap_null, C_is_csc,
sparsity, true, A_hyper_switch, 1, anzmax, true, Context) ;
}
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_C ;
return (GrB_OUT_OF_MEMORY) ;
}
T->magic = GB_MAGIC ;
if (sparsity == GxB_BITMAP)
{
T->nvals = anvals ; // for bitmap case only
}
//----------------------------------------------------------------------
// T = A'
//----------------------------------------------------------------------
// Since A is full, # threads to use is nthreads, and the
// nworkspaces parameter is not used
int64_t anz_held = GB_NNZ_HELD (A) ;
int nthreads = GB_nthreads (anz_held + anvec, chunk, nthreads_max) ;
if (T_cheap)
{
// no work to do. Transposing does not change A->b or A->x
T->b = Ab ;
T->x = Ax ;
T->nzmax = A->nzmax ;
if (in_place)
{
// transplant A->b and A->x into T
T->b_shallow = Ab_shallow ;
T->x_shallow = Ax_shallow ;
Ab = NULL ; // do not free prior Ab
Ax = NULL ; // do not free prior Ax
A->b = NULL ;
A->x = NULL ;
}
else
{
// T is a purely shallow copy of A
T->b_shallow = (Ab != NULL) ;
T->x_shallow = true ;
}
}
else if (op1 == NULL && op2 == NULL)
{
// do not apply an operator; optional typecast to C->type
GB_transpose_ix (T, A, NULL, NULL, 0, nthreads) ;
}
else
{
// apply an operator, C has type op->ztype
GB_transpose_op (T, op1, op2, scalar, binop_bind1st, A,
NULL, NULL, 0, nthreads) ;
}
ASSERT_MATRIX_OK (T, "T dense/bitmap", GB0) ;
ASSERT (!GB_JUMBLED (T)) ;
// free prior space of A, if transpose is done in-place
GB_FREE_IN_PLACE_A ;
//----------------------------------------------------------------------
// transplace T into C
//----------------------------------------------------------------------
// allocate the output matrix C as a full or bitmap matrix
// if *Chandle == NULL, allocate a new header; otherwise reuse existing
info = GB_new (Chandle, // bitmap or full, old or new header
ctype, avdim, avlen, GB_Ap_null, C_is_csc,
sparsity, A_hyper_switch, 0, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
ASSERT (!in_place) ; // cannot fail if in-place
GB_FREE_C ;
GB_Matrix_free (&T) ;
return (info) ;
}
// Transplant T into the result C, making a copy if T is shallow
info = GB_transplant (*Chandle, ctype, &T, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_A_AND_C ;
return (GrB_OUT_OF_MEMORY) ;
}
ASSERT_MATRIX_OK (*Chandle, "Chandle, GB_transpose, bitmap/full", GB0) ;
}
else if (avdim == 1)
{
//======================================================================
// transpose a "column" vector into a "row"
//======================================================================
// transpose a vector (avlen-by-1) into a "row" matrix (1-by-avlen).
// A must be sorted first.
ASSERT_MATRIX_OK (A, "the vector A must already be sorted", GB0) ;
GB_MATRIX_WAIT (A) ;
ASSERT (!GB_JUMBLED (A)) ;
//----------------------------------------------------------------------
// allocate space
//----------------------------------------------------------------------
// Allocate the header of C, with no C->p, C->h, C->i, C->b, or C->x
// content, and initialize the type and dimension of C. The new matrix
// is hypersparse. This step does not allocate anything if in-place.
// if *Chandle == NULL, allocate a new header; otherwise reuse existing
info = GB_new (Chandle, // hyper; old or new header
ctype, 1, avlen, GB_Ap_null, C_is_csc,
GxB_HYPERSPARSE, A_hyper_switch, 0, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
ASSERT (!in_place) ; // cannot fail if in-place
GB_FREE_C ;
return (info) ;
}
if (!in_place)
{
C = (*Chandle) ;
}
else
{
ASSERT (A == C && A == (*Chandle)) ;
}
// allocate new space for the values and pattern
int64_t *GB_RESTRICT Cp = NULL ;
int64_t *GB_RESTRICT Ci = NULL ;
GB_void *GB_RESTRICT Cx = NULL ;
bool ok = true ;
Cp = GB_MALLOC (anz+1, int64_t) ;
Ci = GB_CALLOC (anz , int64_t) ;
ok = (Cp != NULL && Ci != NULL) ;
if (allocate_new_Cx)
{
// allocate new space for the new typecasted numerical values of C
Cx = GB_MALLOC (anz * ctype->size, GB_void) ;
ok = ok && (Cx != NULL) ;
}
if (!ok)
{
// out of memory
GB_FREE (Cp) ;
GB_FREE (Ci) ;
GB_FREE (Cx) ;
GB_FREE_A_AND_C ;
return (GrB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// fill the content of C
//----------------------------------------------------------------------
// numerical values: apply the operator, typecast, or make shallow copy
if (op1 != NULL || op2 != NULL)
{
// Cx = op (A)
info = GB_apply_op ( // op1 != identity of same types
(GB_void *) Cx, op1, op2, scalar, binop_bind1st, A, Context) ;
// GB_apply_op can only fail if op1/op2 are positional
ASSERT (!GB_OP_IS_POSITIONAL (op1)) ;
ASSERT (!GB_OP_IS_POSITIONAL (op2)) ;
ASSERT (info == GrB_SUCCESS) ;
C->x = Cx ;
C->x_shallow = false ;
// prior Ax will be freed
}
else if (ctype != atype)
{
// copy the values from A into C and cast from atype to ctype
C->x = Cx ;
C->x_shallow = false ;
GB_cast_array (Cx, ccode, Ax, acode, Ab, asize, anz, 1) ;
// prior Ax will be freed
}
else // ctype == atype
{
// no type change; numerical values of C are a shallow copy of A.
C->x = Ax ;
C->x_shallow = (in_place) ? Ax_shallow : true ;
Ax = NULL ; // do not free prior Ax
}
// each entry in A becomes a non-empty vector in C
// C is a hypersparse 1-by-avlen matrix
C->h = Ai ;
C->h_shallow = (in_place) ? Ai_shallow : true ;
Ai = NULL ; // do not free prior Ai
// C->p = 0:anz and C->i = zeros (1,anz), newly allocated
C->plen = anz ;
C->nvec = anz ;
C->nvec_nonempty = anz ;
C->i = Ci ;
C->p = Cp ;
// fill the vector pointers C->p
int nthreads = GB_nthreads (anz, chunk, nthreads_max) ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k <= anz ; k++)
{
Cp [k] = k ;
}
C->nzmax = anz ;
C->magic = GB_MAGIC ;
// free prior space of A, if transpose is done in-place
GB_FREE_IN_PLACE_A ;
}
else if (avlen == 1)
{
//======================================================================
// transpose a "row" into a "column" vector
//======================================================================
// transpose a "row" matrix (1-by-avdim) into a vector (avdim-by-1).
// if A->vlen is 1, all vectors of A are implicitly sorted
ASSERT_MATRIX_OK (A, "1-by-n input A already sorted", GB0) ;
//----------------------------------------------------------------------
// allocate workspace, if needed
//----------------------------------------------------------------------
int ntasks = 0 ;
int nth = GB_nthreads (avdim, chunk, nthreads_max) ;
int64_t *GB_RESTRICT Count = NULL ;
if (nth > 1 && !A_is_hyper)
{
// ntasks and Count are not needed if nth == 1
ntasks = 8 * nth ;
ntasks = GB_IMIN (ntasks, avdim) ;
ntasks = GB_IMAX (ntasks, 1) ;
Count = GB_CALLOC (ntasks+1, int64_t) ;
if (Count == NULL)
{
// out of memory
GB_FREE_C ;
return (GrB_OUT_OF_MEMORY) ;
}
}
// Allocate the header of C, with no C->p, C->h, C->i, or C->x content,
// and initialize the type and dimension of C. If in-place, A->p,
// A->h, A->i, and A->x are all NULL. The new matrix is sparse, but
// can be CSR or CSC. This step does not allocate anything if in
// place.
// if *Chandle == NULL, allocate a new header; otherwise reuse existing
info = GB_new (Chandle, // sparse; old or new header
ctype, avdim, 1, GB_Ap_null, C_is_csc,
GxB_SPARSE, A_hyper_switch, 0, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
ASSERT (!in_place) ; // cannot fail if in-place
GB_FREE_C ;
GB_FREE (Count) ;
return (info) ;
}
if (!in_place)
{
C = (*Chandle) ;
}
else
{
ASSERT (A == C && A == (*Chandle)) ;
}
// allocate new space for the values and pattern
GB_void *GB_RESTRICT Cx = NULL ;
int64_t *GB_RESTRICT Cp = NULL ;
int64_t *GB_RESTRICT Ci = NULL ;
bool ok = true ;
Cp = GB_CALLOC (2, int64_t) ;
ok = ok && (Cp != NULL) ;
if (!A_is_hyper)
{
// A is sparse, so new space is needed for Ci
Ci = GB_MALLOC (anz, int64_t) ;
ok = ok && (Ci != NULL) ;
}
if (allocate_new_Cx)
{
// allocate new space for the new typecasted numerical values of C
Cx = GB_MALLOC (anz * ctype->size, GB_void) ;
ok = ok && (Cx != NULL) ;
}
if (!ok)
{
// out of memory
GB_FREE (Cp) ;
GB_FREE (Ci) ;
GB_FREE (Cx) ;
GB_FREE_A_AND_C ;
GB_FREE (Count) ;
return (GrB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// numerical values of C: apply the op, typecast, or make shallow copy
//----------------------------------------------------------------------
// numerical values: apply the operator, typecast, or make shallow copy
if (op1 != NULL || op2 != NULL)
{
// Cx = op (A)
info = GB_apply_op ( // op1 != identity of same types
(GB_void *) Cx, op1, op2, scalar, binop_bind1st, A, Context) ;
// GB_apply_op can only fail if op1/op2 are positional
ASSERT (!GB_OP_IS_POSITIONAL (op1)) ;
ASSERT (!GB_OP_IS_POSITIONAL (op2)) ;
ASSERT (info == GrB_SUCCESS) ;
C->x = Cx ;
C->x_shallow = false ;
// prior Ax will be freed
}
else if (ctype != atype)
{
// copy the values from A into C and cast from atype to ctype
C->x = Cx ;
C->x_shallow = false ;
GB_cast_array (Cx, ccode, Ax, acode, Ab, asize, anz, 1) ;
// prior Ax will be freed
}
else // ctype == atype
{
// no type change; numerical values of C are a shallow copy of A
C->x = Ax ;
C->x_shallow = (in_place) ? Ax_shallow : true ;
Ax = NULL ; // do not free prior Ax
}
//----------------------------------------------------------------------
// pattern of C
//----------------------------------------------------------------------
if (A_is_hyper)
{
//------------------------------------------------------------------
// each non-empty vector in A becomes an entry in C
//------------------------------------------------------------------
C->i = Ah ;
C->i_shallow = (in_place) ? Ah_shallow : true ;
ASSERT (anvec == anz) ;
Ah = NULL ; // do not free prior Ah
}
else
{
//------------------------------------------------------------------
// find the non-empty vectors of A, which become entries in C
//------------------------------------------------------------------
ASSERT (Ah == NULL) ;
if (nth == 1)
{
//--------------------------------------------------------------
// construct Ci with a single thread
//--------------------------------------------------------------
int64_t k = 0 ;
for (int64_t j = 0 ; j < avdim ; j++)
{
if (Ap [j] < Ap [j+1])
{
Ci [k++] = j ;
}
}
ASSERT (k == anz) ;
}
else
{
//--------------------------------------------------------------
// construct Ci in parallel
//--------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nth) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t jstart, jend, k = 0 ;
GB_PARTITION (jstart, jend, avdim, tid, ntasks) ;
for (int64_t j = jstart ; j < jend ; j++)
{
if (Ap [j] < Ap [j+1])
{
k++ ;
}
}
Count [tid] = k ;
}
GB_cumsum (Count, ntasks, NULL, 1) ;
ASSERT (Count [ntasks] == anz) ;
#pragma omp parallel for num_threads(nth) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t jstart, jend, k = Count [tid] ;
GB_PARTITION (jstart, jend, avdim, tid, ntasks) ;
for (int64_t j = jstart ; j < jend ; j++)
{
if (Ap [j] < Ap [j+1])
{
Ci [k++] = j ;
}
}
}
}
#ifdef GB_DEBUG
int64_t k = 0 ;
for (int64_t j = 0 ; j < avdim ; j++)
{
if (Ap [j] < Ap [j+1])
{
ASSERT (Ci [k] == j) ;
k++ ;
}
}
ASSERT (k == anz) ;
#endif
C->i = Ci ;
C->i_shallow = false ;
}
//---------------------------------------------------------------------
// vector pointers of C
//---------------------------------------------------------------------
// C->p = [0 anz] and C->h = NULL
ASSERT (C->plen == 1) ;
ASSERT (C->nvec == 1) ;
ASSERT (C->h == NULL) ;
C->p = Cp ;
C->p_shallow = false ;
C->nvec_nonempty = (anz == 0) ? 0 : 1 ;
// fill the vector pointers C->p
Cp [0] = 0 ;
Cp [1] = anz ;
C->nzmax = anz ;
C->magic = GB_MAGIC ;
ASSERT (!GB_JUMBLED (C)) ;
// free prior space of A, if transpose done in-place, and free workspace
GB_FREE_IN_PLACE_A ;
GB_FREE (Count) ;
}
else
{
//======================================================================
// transpose a general sparse or hypersparse matrix
//======================================================================
ASSERT_MATRIX_OK (A, "A for GB_transpose", GB0) ;
// T=A' with optional typecasting, or T=op(A')
//----------------------------------------------------------------------
// select the method
//----------------------------------------------------------------------
int nworkspaces_bucket, nthreads_bucket ;
bool use_builder = GB_transpose_method (A,
&nworkspaces_bucket, &nthreads_bucket, Context) ;
//----------------------------------------------------------------------
// transpose the matrix with the selected method
//----------------------------------------------------------------------
if (use_builder)
{
//==================================================================
// transpose via GB_builder
//==================================================================
//------------------------------------------------------------------
// allocate and create iwork
//------------------------------------------------------------------
// allocate iwork of size anz
int64_t *iwork = GB_MALLOC (anz, int64_t) ;
if (iwork == NULL)
{
// out of memory
GB_FREE_C ;
return (GrB_OUT_OF_MEMORY) ;
}
// Construct the "row" indices of C, which are "column" indices of
// A. This array becomes the permanent T->i on output. This phase
// must be done before Chandle is created below, since that step
// destroys A.
int nthreads = GB_nthreads (anz + anvec, chunk, nthreads_max) ;
GB_extract_vector_list (iwork, A, nthreads) ;
//------------------------------------------------------------------
// allocate the output matrix and additional space (jwork and S)
//------------------------------------------------------------------
// Allocate the header of C, with no C->p, C->h, C->i, or C->x
// content, and initialize the type and dimension of C. If in
// place, A->p, A->h, A->i, and A->x are all NULL. The new matrix
// is hypersparse, but can be CSR or CSC. This step does not
// allocate anything if in-place.
// if *Chandle == NULL, allocate a new header; otherwise reuse
info = GB_new (Chandle, // hyper, old or new header
ctype, avdim, avlen, GB_Ap_null, C_is_csc,
GxB_HYPERSPARSE, A_hyper_switch, 0, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
ASSERT (!in_place) ; // cannot fail if in-place
GB_FREE (iwork) ;
GB_FREE_C ;
return (info) ;
}
if (!in_place)
{
C = (*Chandle) ;
}
else
{
ASSERT (A == C && A == (*Chandle)) ;
}
// if in_place, the prior Ap and Ah can now be freed
if (in_place)
{
if (!Ap_shallow) GB_FREE (Ap) ;
if (!Ah_shallow) GB_FREE (Ah) ;
}
int64_t *jwork = NULL ;
GB_Type_code scode ;
GB_void *S = NULL ;
GB_void *Swork = NULL ;
// for the GB_builder method, if the transpose is done in-place and
// A->i is not shallow, A->i can be used and then freed.
// Otherwise, A->i is not modified at all.
bool ok = true ;
bool recycle_Ai = (in_place && !Ai_shallow) ;
if (!recycle_Ai)
{
// allocate jwork of size anz
jwork = GB_MALLOC (anz, int64_t) ;
ok = ok && (jwork != NULL) ;
}
if (op1 != NULL || op2 != NULL)
{
// allocate Swork of size anz * csize
Swork = GB_MALLOC (anz * csize, GB_void) ;
ok = ok && (Swork != NULL) ;
}
if (!ok)
{
// out of memory
GB_FREE (iwork) ;
GB_FREE (jwork) ;
GB_FREE (Swork) ;
GB_FREE_A_AND_C ;
return (GrB_OUT_OF_MEMORY) ;
}
//------------------------------------------------------------------
// construct jwork and Swork
//------------------------------------------------------------------
// "row" indices of A become "column" indices of C
if (recycle_Ai)
{
// Ai is used as workspace for the "column" indices of C.
// jwork is a shallow copy of Ai, and is freed by GB_builder.
jwork = Ai ;
ASSERT (in_place) ;
// set Ai to NULL so it is not freed by GB_FREE_IN_PLACE_A
Ai = NULL ;
}
else
{
// jwork = Ai, making a deep copy. jwork is freed by
// GB_builder. A->i is not modified, even if out of memory.
GB_memcpy (jwork, Ai, anz * sizeof (int64_t), nthreads) ;
}
// numerical values: apply the op, typecast, or make shallow copy
if (op1 != NULL || op2 != NULL)
{
// Swork = op (A)
info = GB_apply_op ( // op1 != identity of same types
(GB_void *) Swork, op1, op2, scalar, binop_bind1st,
A, Context) ;
// GB_apply_op can only fail if op1/op2 are positional
ASSERT (!GB_OP_IS_POSITIONAL (op1)) ;
ASSERT (!GB_OP_IS_POSITIONAL (op2)) ;
ASSERT (info == GrB_SUCCESS) ;
// GB_builder will not need to typecast Swork to T->x, and it
// may choose to transplant it into T->x
scode = ccode ;
#if 0
if (in_place && !Ax_shallow)
{
// A is being transposed in-place so A->x is no longer
// needed. If A->x is shallow this can be skipped. T->x
// will not be shallow if the op is present. A->x should
// be freed early to free up space for GB_builder.
// However, in the current usage, when op is used, A is not
// transposed in-place, so this step is not needed.
ASSERT (GB_DEAD_CODE) ;
GB_FREE (Ax) ;
}
#endif
}
else
{
// GB_builder will typecast S from atype to ctype if needed.
// S is a shallow copy of Ax, and must not be modified.
S = Ax ;
scode = acode ;
}
//------------------------------------------------------------------
// build the matrix: T = (ctype) A' or op ((xtype) A')
//------------------------------------------------------------------
// internally, jwork is freed and then T->x is allocated, so the
// total high-water memory usage is anz * max (csize,
// sizeof(int64_t)). T is always hypersparse.
// If op is not NULL, then Swork can be transplanted into T in
// GB_builder, instead. However, this requires the tuples to be
// sorted on input, which is possible but rare for GB_transpose.
GrB_Matrix T = NULL ;
info = GB_builder
(
&T, // create T
ctype, // T is of type ctype
avdim, // T->vlen = A->vdim, always > 1
avlen, // T->vdim = A->vlen, always > 1
C_is_csc, // T has the same CSR/CSC format as C
&iwork, // iwork_handle, becomes T->i on output
&jwork, // jwork_handle, freed on output
&Swork, // Swork_handle, freed on output
false, // tuples are not sorted on input
true, // tuples have no duplicates
anz, // size of iwork, jwork, and Swork
true, // is_matrix: unused
NULL, NULL, // original I,J indices: not used here
S, // array of values of type scode, not modified
anz, // number of tuples
NULL, // no dup operator needed (input has no duplicates)
scode, // type of S or Swork
Context
) ;
// GB_builder always frees jwork, and either frees iwork or
// transplants it in to T->i and sets iwork to NULL. So iwork and
// jwork are always NULL on output. GB_builder does not modify S.
ASSERT (iwork == NULL && jwork == NULL && Swork == NULL) ;
//------------------------------------------------------------------
// free prior space and transplant T into C
//------------------------------------------------------------------
// Free the prior content of the input matrix, if done in-place.
// Ap, Ah, and Ai have already been freed, but Ax has not.
GB_FREE_IN_PLACE_A ;
if (info != GrB_SUCCESS)
{
// out of memory in GB_builder
GB_FREE_A_AND_C ;
return (info) ;
}
// Transplant T in to the result C. The matrix T is not shallow
// and no typecasting is done, so this will always succeed.
ASSERT (!GB_JUMBLED (T)) ;
info = GB_transplant (*Chandle, ctype, &T, Context) ;
ASSERT (info == GrB_SUCCESS) ;
}
else
{
//==================================================================
// transpose via bucket sort
//==================================================================
// This method does not operate on the matrix in-place, so it must
// create a temporary matrix T. Then the input matrix is freed and
// replaced with the new matrix T.
// T is also typecasted to ctype, if not NULL
GrB_Matrix T = NULL ;
info = GB_transpose_bucket (&T, ctype, C_is_csc, A,
op1, op2, scalar, binop_bind1st,
nworkspaces_bucket, nthreads_bucket, Context) ;
// free prior content, if C=A' is being done in-place
if (in_place_A)
{
// free all content of A, but not the header, if in-place of A
GB_phbix_free (A) ; // transpose in-place
}
else if (in_place_C)
{
// free all of C, including the header, if done in-place of C
GB_Matrix_free (Chandle) ;
}
if (info != GrB_SUCCESS)
{
// out of memory in GB_transpose_bucket
GB_FREE_C ;
return (info) ;
}
ASSERT_MATRIX_OK (T, "T from bucket", GB0) ;
ASSERT (GB_JUMBLED_OK (T)) ;
if (in_place_A)
{
// The header of A has not been freed, since it is used for the
// output. Transplant T back into A and free T. T is not
// shallow and no typecast is done so this will always succeed.
info = GB_transplant (A, ctype, &T, Context) ;
ASSERT (info == GrB_SUCCESS) ;
}
else
{
// If C=A' is done in-place of C, then the header and content
// of the input C has been freed. The output T can now be
// moved to the Chandle.
ASSERT (*Chandle == NULL) ;
(*Chandle) = T ;
}
}
}
//--------------------------------------------------------------------------
// get the output matrix
//--------------------------------------------------------------------------
C = (*Chandle) ;
ASSERT (GB_JUMBLED_OK (C)) ;
//--------------------------------------------------------------------------
// apply a positional operator, after transposing the matrix
//--------------------------------------------------------------------------
if (op_is_positional)
{
// the positional operator is applied in-place to the values of C
op1 = save_op1 ;
op2 = save_op2 ;
// Cx = op (C)
info = GB_apply_op (C->x, op1, // positional unary/binary op only
op2, scalar, binop_bind1st, C, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_C ;
return (info) ;
}
}
//--------------------------------------------------------------------------
// conform the result to the desired sparisty structure of A
//--------------------------------------------------------------------------
// transplant the hyper_switch and sparsity structure from A to C
C->hyper_switch = A_hyper_switch ;
C->sparsity = A_sparsity ; // transplant sparsity control into C
ASSERT_MATRIX_OK (C, "C to conform in GB_transpose", GB0) ;
info = GB_conform (C, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_C ;
return (info) ;
}
ASSERT_MATRIX_OK (*Chandle, "Chandle conformed in GB_transpose", GB0) ;
return (GrB_SUCCESS) ;
}
|
spmm_blocking_libxsmm.h | /*!
* Copyright (c) 2021 Intel Corporation
* \file array/cpu/spmm.h
* \brief SPMM CPU kernel function header.
* \author Sanchit Misra <sanchit.misra@intel.com>,
* Ramanarayan Mohanty <ramanarayan.mohanty@intel.com>,
* Vasimuddin Md <vasimuddin.md@intel.com>,
* Sasikanth Avancha <sasikanth.avancha@intel.com>
*/
#ifndef DGL_ARRAY_CPU_SPMM_BLOCKING_LIBXSMM_H_
#define DGL_ARRAY_CPU_SPMM_BLOCKING_LIBXSMM_H_
#include <dgl/array.h>
#include <dgl/bcast.h>
#include <dmlc/logging.h>
#include <algorithm>
#if !defined(_WIN32)
#ifdef USE_AVX
#ifdef USE_LIBXSMM
#include <unistd.h>
#include <libxsmm.h>
#ifdef DEBUG
#include <x86intrin.h>
#endif // DEBUG
#include <dmlc/omp.h>
#define NUM_BLOCKS_PER_THREAD 20
#define BLOCKING_HEURISTIC_PARAM 500
namespace dgl {
namespace aten {
namespace cpu {
template <typename IdType, typename DType>
struct CSRMatrixInternal {
IdType num_rows;
IdType num_cols;
IdType *indptr;
IdType *indices;
DType *data;
};
int32_t GetLLCSize() {
int32_t cache_size = sysconf(_SC_LEVEL3_CACHE_SIZE);
if (cache_size < 0) cache_size = DGL_CPU_LLC_SIZE;
return cache_size;
}
/*!
* \brief Tile the CSR matrix to roughly make sure that the column tiles and
* corresponding neighbor features fit into LLC and the row tiles
* are assigned to OMP threads.
* \param csr The Csr matrix.
* \param block_csr_array The array containing csr matrices of all blocks.
* \param num_M_blocks Number of blocks to create along the rows of adjacency matrix.
* \param num_K_blocks Number of blocks to create along the columns of adjacency matrix.
* \param M_block_size block size along the rows of adjacency matrix.
* \param K_block_size block size along the columns of adjacency matrix.
* \param use_lhs Whether to use lhs.
* \param use_rhs Whether to use rhs.
*/
template <typename IdType>
inline void SpMMCreateBlocks(
const CSRMatrix& csr,
CSRMatrixInternal<IdType, IdType> *block_csr_array,
IdType num_M_blocks,
IdType num_K_blocks,
IdType M_block_size,
IdType K_block_size,
bool use_lhs, bool use_rhs) {
const IdType M = csr.num_rows;
const IdType K = csr.num_cols;
IdType* indptr = csr.indptr.Ptr<IdType>();
IdType* indices = csr.indices.Ptr<IdType>();
IdType* edges = csr.data.Ptr<IdType>();
CHECK_NOTNULL(indptr);
if (use_lhs)
CHECK_NOTNULL(indices);
if (use_rhs)
CHECK_NOTNULL(edges);
if (num_K_blocks > 1) {
IdType *indptr_block_buf = reinterpret_cast<IdType *>(aligned_alloc(64,
(M_block_size + 1) * num_M_blocks *
num_K_blocks * sizeof(IdType)));
IdType *indices_block_buf = reinterpret_cast<IdType *>(aligned_alloc(64,
indptr[M] * sizeof(IdType)));
IdType *edges_block_buf = reinterpret_cast<IdType *>(aligned_alloc(64,
indptr[M] * sizeof(IdType)));
#pragma omp parallel
{
IdType *my_cur_col_id = reinterpret_cast<IdType *>(aligned_alloc(64, 2 * M_block_size *
sizeof(IdType)));
#pragma omp for
for (IdType m = 0; m < num_M_blocks; m++) {
const IdType M_start = m * M_block_size;
const IdType M_end = std::min((m + 1) * M_block_size, M);
const IdType nnz = indptr[M_end] - indptr[M_start];
IdType cur_indices_id = 0;
IdType *my_indices_block_buf, *my_edges_block_buf;
if (use_lhs)
my_indices_block_buf = indices_block_buf + indptr[M_start];
if (use_rhs)
my_edges_block_buf = edges_block_buf + indptr[M_start];
for (IdType i = M_start; i < M_end; i++) {
my_cur_col_id[(i - M_start) * 2] = indptr[i];
my_cur_col_id[(i - M_start) * 2 + 1] = indptr[i + 1];
}
for (IdType k = 0; k < num_K_blocks; k++) {
const IdType K_start = k * K_block_size;
const IdType K_end = std::min((k + 1) * K_block_size, K);
CSRMatrixInternal<IdType, IdType> cur_csr;
cur_csr.num_rows = M_end - M_start;
cur_csr.num_cols = K_end - K_start;
// Create csr_ij
IdType *cur_csr_indptr = indptr_block_buf + (m * num_K_blocks + k) * (M_block_size + 1);
IdType *cur_csr_indices = nullptr, *cur_csr_edges = nullptr;
if (use_lhs)
cur_csr_indices = my_indices_block_buf + cur_indices_id;
if (use_rhs)
cur_csr_edges = my_edges_block_buf + cur_indices_id;
IdType cur_nnz = 0;
for (IdType i = M_start; i < M_end; i++) {
const IdType row_start = my_cur_col_id[(i - M_start) * 2];
const IdType row_end = my_cur_col_id[(i - M_start) * 2 + 1];
cur_csr_indptr[i - M_start] = cur_nnz;
IdType eid;
for (eid = row_start; eid < row_end; eid++) {
const IdType src = indices[eid];
const IdType edge = edges[eid];
if (src >= K_end) {
break;
}
CHECK_LT(cur_indices_id + cur_nnz, nnz);
if (use_lhs)
cur_csr_indices[cur_nnz] = src;
if (use_rhs)
cur_csr_edges[cur_nnz] = edge;
cur_nnz++;
}
my_cur_col_id[(i - M_start) * 2] = eid;
}
cur_csr_indptr[cur_csr.num_rows] = cur_nnz;
cur_indices_id += cur_nnz;
cur_csr.indptr = cur_csr_indptr;
if (use_lhs)
cur_csr.indices = cur_csr_indices;
if (use_rhs)
cur_csr.data = cur_csr_edges;
block_csr_array[m * num_K_blocks + k] = cur_csr;
}
CHECK_EQ(nnz, cur_indices_id);
}
free(my_cur_col_id);
}
} else {
#pragma omp for
for (IdType m = 0; m < num_M_blocks; m++) {
const IdType M_start = m * M_block_size;
const IdType M_end = std::min((m + 1) * M_block_size, M);
CSRMatrixInternal<IdType, IdType> cur_csr;
cur_csr.num_rows = M_end - M_start;
cur_csr.num_cols = K;
cur_csr.indptr = indptr + M_start;
cur_csr.indices = indices;
cur_csr.data = edges;
block_csr_array[m] = cur_csr;
}
}
}
/*!
* \brief Create libxsmm kernel.
* \param has_idx For the edge features, are there indices available.
* \param N Feature size.
* \param redop_flag Flag specifying the reduction operation.
* \param is_cmp Is the reduction operation a compare operation.
* \note libxsmm_dispatch_meltw_opreduce_vecs_idx creates a JIT'ed kernel.
* Given a node u, the kernel performs an elementwise "Op" on the
* features of the neighbors and/or the edges incident on u.
* Subsequently, it performs an elementwise "Redop" on all such
* features created and stores into the feature of node u.
* It uses a SIMD and a cache efficient design and also provides
* support to enable software prefetching if needed. For IdType,
* it supports INT32 and INT64. For DType, it supports BF16 and FP32.
* It supports all the "Ops" and "Redops" supported by DGL. Once a
* kernel is generated by libxsmm_dispatch_meltw_opreduce_vecs_idx,
* it is cached for the entire duration of the execution of a program
* so that subsequently if the kernel is needed again, it just returns
* the cached copy.
*/
template <typename IdType, typename DType, typename Op>
inline libxsmm_meltwfunction_opreduce_vecs_idx SpMMCreateLibxsmmKernel(
bool has_idx,
IdType N,
libxsmm_meltw_opreduce_vecs_flags redop_flag,
bool is_cmp) {
int _ld = N;
libxsmm_meltw_opreduce_vecs_flags opredop_flags;
// First, set the Op in the opredop_flags
if (std::is_same<Op, op::Add<DType>>::value) {
opredop_flags = LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OP_ADD;
} else if (std::is_same<Op, op::Sub<DType>>::value) {
opredop_flags = LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OP_SUB;
} else if (std::is_same<Op, op::Mul<DType>>::value) {
opredop_flags = LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OP_MUL;
} else if (std::is_same<Op, op::Div<DType>>::value) {
opredop_flags = LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OP_DIV;
} else if (std::is_same<Op, op::CopyLhs<DType>>::value) {
opredop_flags = LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OP_COPY;
} else if (std::is_same<Op, op::CopyRhs<DType>>::value) {
opredop_flags = LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OP_COPY;
}
// Second, set which of lhs or rhs is considered first and second operand.
// This is needed since libxsmm assumes that the copy operation always copies the first operand.
// So, if we need to copy rhs, we need to set that as the first operand.
// For rhs, we also set whether to use implicit indices or provided indices.
if (std::is_same<Op, op::CopyLhs<DType>>::value) {
opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags |
LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OPORDER_VECIDX_VECIN);
} else if (std::is_same<Op, op::CopyRhs<DType>>::value) {
opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags |
LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OPORDER_VECIN_VECIDX);
if (!has_idx) {
opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags |
LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_IMPLICIT_INDEXED_VECIDX);
}
} else {
opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags |
LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OPORDER_VECIDX_VECIN);
if (has_idx) {
opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags |
LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_INDEXED_VEC);
} else {
opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags |
LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_IMPLICIT_INDEXED_VEC);
}
}
// Third, we set the Redop in the opredop_flags
opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags | redop_flag);
// Fourth, in case of Cmp Redop, set whether to record argmax/argmin for lhs/rhs
if (is_cmp) {
if (Op::use_lhs) {
opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags |
LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_RECORD_ARGOP_OFF_VEC_0);
}
if (Op::use_rhs) {
opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags |
LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_RECORD_ARGOP_OFF_VEC_1);
}
}
libxsmm_meltwfunction_opreduce_vecs_idx kernel = nullptr;
if (std::is_same<DType, float>::value) {
kernel = libxsmm_dispatch_meltw_opreduce_vecs_idx(
N, &_ld, &_ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32,
(sizeof(IdType) == 8) ? LIBXSMM_DATATYPE_I64 : LIBXSMM_DATATYPE_I32, opredop_flags);
}
if (kernel == nullptr) {
LOG(FATAL) << "Failed to generate libxsmm kernel for the SpMM operation!";
}
return kernel;
}
/*!
* \brief Use libxsmm to perform SpMM-Sum on all blocks.
* \param block_csr_array The array containing csr matrices of all blocks.
* \param B The feature on source nodes.
* \param E The feature on edges.
* \param C The result feature on destination nodes.
* \param has_idx For the edge features, are there indices available.
* \param N Feature size.
* \param num_M_blocks Number of blocks to create along the rows of adjacency matrix.
* \param num_K_blocks Number of blocks to create along the columns of adjacency matrix.
* \param M_block_size block size along the rows of adjacency matrix.
* \param kernel The libxsmm kernel.
*/
template <typename IdType, typename DType>
inline void SpMMBlockwiseOpSum(
CSRMatrixInternal<IdType, IdType> *block_csr_array,
const DType *B, const DType *E, DType *C, bool has_idx, IdType N,
IdType num_M_blocks, IdType num_K_blocks, IdType M_block_size,
libxsmm_meltwfunction_opreduce_vecs_idx kernel) {
DType (*in_matrix1)[N] = (DType (*)[N])B;
DType (*in_matrix2)[N] = (DType (*)[N])E;
DType (*output)[N] = (DType (*)[N])C;
#pragma omp parallel
{
for (IdType k = 0; k < num_K_blocks; k++) {
#pragma omp for schedule(dynamic)
for (IdType m = 0; m < num_M_blocks; m++) {
CSRMatrixInternal<IdType, IdType> cur_csr = block_csr_array[m * num_K_blocks + k];
const IdType M_start = m * M_block_size;
for (IdType i = 0; i < cur_csr.num_rows; i++) {
const IdType row_start = cur_csr.indptr[i];
const IdType row_end = cur_csr.indptr[i + 1];
const IdType dst = i + M_start;
libxsmm_meltw_opreduce_vecs_idx_param params;
params.n = row_end - row_start;
params.indices = &cur_csr.indices[row_start];
params.in_matrix = in_matrix1;
params.out_vec = &output[dst][0];
params.scale_vals = nullptr;
if (has_idx) {
params.in_matrix2 = in_matrix2;
params.indices2 = &cur_csr.data[row_start];
} else {
params.in_matrix2 = &in_matrix2[row_start];
}
kernel(¶ms);
}
}
}
}
}
/*!
* \brief Use libxsmm to perform SpMM-Max/Min on all blocks.
* \param block_csr_array The array containing csr matrices of all blocks.
* \param B The feature on source nodes.
* \param E The feature on edges.
* \param C The result feature on destination nodes.
* \param argB Arg-Min/Max on source nodes.
* \param argE Arg-Min/Max on edges.
* \param has_idx For the edge features, are there indices available.
* \param N Feature size.
* \param num_M_blocks Number of blocks to create along the rows of adjacency matrix.
* \param num_K_blocks Number of blocks to create along the columns of adjacency matrix.
* \param M_block_size block size along the rows of adjacency matrix.
* \param kernel The libxsmm kernel.
*/
template <typename IdType, typename DType, typename Op, typename Cmp>
inline void SpMMBlockwiseOpCmp(
CSRMatrixInternal<IdType, IdType> *block_csr_array,
const DType *B, const DType *E, DType *C, IdType *argB, IdType *argE,
bool has_idx, IdType N,
IdType num_M_blocks, IdType num_K_blocks, IdType M_block_size,
libxsmm_meltwfunction_opreduce_vecs_idx kernel) {
DType (*in_matrix1)[N] = (DType (*)[N])B;
DType (*in_matrix2)[N] = (DType (*)[N])E;
DType (*output)[N] = (DType (*)[N])C;
IdType (*out_matrix1)[N] = (IdType (*)[N])argB;
IdType (*out_matrix2)[N] = (IdType (*)[N])argE;
#pragma omp parallel
{
for (IdType k = 0; k < num_K_blocks; k++) {
#pragma omp for schedule(dynamic)
for (IdType m = 0; m < num_M_blocks; m++) {
CSRMatrixInternal<IdType, IdType> cur_csr = block_csr_array[m * num_K_blocks + k];
const IdType M_start = m * M_block_size;
for (IdType i = 0; i < cur_csr.num_rows; i++) {
const IdType row_start = cur_csr.indptr[i];
const IdType row_end = cur_csr.indptr[i + 1];
const IdType dst = i + M_start;
libxsmm_meltw_opreduce_vecs_idx_param params;
params.n = row_end - row_start;
params.indices = &cur_csr.indices[row_start];
params.in_matrix = in_matrix1;
params.out_vec = &output[dst][0];
params.argop_off_vec_0 = &out_matrix1[dst][0];
params.argop_off_vec_1 = &out_matrix2[dst][0];
params.scale_vals = nullptr;
if (has_idx) {
params.in_matrix2 = in_matrix2;
params.indices2 = &cur_csr.data[row_start];
} else {
params.in_matrix2 = &in_matrix2[row_start];
}
kernel(¶ms);
}
}
}
}
}
/*!
* \brief Free the tiled CSR matrix data.
* \param block_csr_array The array containing csr matrices of all blocks.
* \param num_M_blocks Number of blocks to create along the rows of adjacency matrix.
* \param num_K_blocks Number of blocks to create along the columns of adjacency matrix.
* \param use_lhs Whether to use lhs.
* \param use_rhs Whether to use rhs.
*/
template <typename IdType>
inline void SpMMFreeBlocks(
CSRMatrixInternal<IdType, IdType> *block_csr_array,
IdType num_M_blocks, IdType num_K_blocks,
bool use_lhs, bool use_rhs) {
if (num_K_blocks > 1) {
free(block_csr_array[0].indptr);
if (use_lhs)
free(block_csr_array[0].indices);
if (use_rhs)
free(block_csr_array[0].data);
}
free(block_csr_array);
}
/*!
* \brief Optimized CPU kernel of SpMM-Sum/Max/Min on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes.
* \param arge Arg-Min/Max on edges.
* \note it uses libxsmm, blocking and dynamic thread scheduling.
*/
template <typename IdType, typename DType, typename Op, typename Redop>
void SpMMRedopCsrOpt(
const BcastOff& bcast,
const CSRMatrix& csr,
NDArray ufeat, NDArray efeat,
NDArray out,
NDArray argu, NDArray arge) {
int32_t llc_size = GetLLCSize();
#ifdef DEBUG
uint64_t startTick, endTick;
startTick = __rdtsc();
#endif // DEBUG
const bool has_idx = !IsNullArray(csr.data);
DType* C = out.Ptr<DType>();
const DType* B = ufeat.Ptr<DType>();
const DType* E = efeat.Ptr<DType>();
IdType *argB, *argE;
if (std::is_same<Redop, op::Max<DType>>::value || std::is_same<Redop, op::Min<DType>>::value) {
argB = argu.Ptr<IdType>();
argE = arge.Ptr<IdType>();
}
const int nthreads = omp_get_max_threads();
const IdType M = csr.num_rows;
const IdType N = bcast.out_len;
const IdType K = csr.num_cols;
const IdType* indptr = csr.indptr.Ptr<IdType>();
CHECK_NOTNULL(indptr);
const int total_nnz = indptr[M];
if (M <= 0 || K <= 0 || N <= 0 || total_nnz <= 0) return;
const double avg_degree = total_nnz * 1.0 / M;
const double nnz_prob = avg_degree / K;
IdType K_block_size = std::min((int64_t)K, (int64_t)(llc_size / (N * sizeof(DType) *
nnz_prob * BLOCKING_HEURISTIC_PARAM)));
IdType M_block_size = M / (nthreads * NUM_BLOCKS_PER_THREAD);
if (M_block_size == 0) M_block_size = 1;
if (K_block_size == 0) K_block_size = 1;
IdType num_M_blocks = (M + M_block_size - 1) / M_block_size;
IdType num_K_blocks = (K + K_block_size - 1) / K_block_size;
CSRMatrixInternal<IdType, IdType> *block_csr_array =
(CSRMatrixInternal<IdType, IdType> *)aligned_alloc(64,
sizeof(CSRMatrixInternal<IdType, IdType>) * num_M_blocks * num_K_blocks);
#ifdef DEBUG
endTick = __rdtsc();
if (std::is_same<Redop, op::Max<DType>>::value) {
LOG(INFO) << "Redop = Max";
} else if (std::is_same<Redop, op::Min<DType>>::value) {
LOG(INFO) << "Redop = Min";
} else if (std::is_same<Redop, op::Add<DType>>::value) {
LOG(INFO) << "Redop = Add";
}
LOG(INFO) << "nthreads = " << nthreads << ", llc_size = " << llc_size;
LOG(INFO) << "M = " << M << ", K = " << K << ", N = " << N;
LOG(INFO) << "use_lhs = " << Op::use_lhs << ", use_rhs = " << Op::use_rhs;
LOG(INFO) << "total_nnz = " << total_nnz << ", avg_degree = " << avg_degree;
LOG(INFO) << "has_idx = " << has_idx;
LOG(INFO) << "nnz_prob = " << nnz_prob;
LOG(INFO) << "K_block_size = " << K_block_size << ", M_block_size = " << M_block_size;
LOG(INFO) << "num_K_blocks = " << num_K_blocks << ", num_M_blocks = " << num_M_blocks;
LOG(INFO) << "stage0 ticks = " << (endTick - startTick);
startTick = __rdtsc();
#endif // DEBUG
SpMMCreateBlocks(csr, block_csr_array, num_M_blocks, num_K_blocks, M_block_size, K_block_size,
Op::use_lhs, Op::use_rhs);
#ifdef DEBUG
endTick = __rdtsc();
LOG(INFO) << "stage1 ticks = " << (endTick - startTick);
startTick = __rdtsc();
#endif // DEBUG
libxsmm_meltwfunction_opreduce_vecs_idx kernel = nullptr;
if (std::is_same<Redop, op::Max<DType>>::value) {
kernel = SpMMCreateLibxsmmKernel<IdType, DType, Op>(has_idx, N,
LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_REDOP_MAX,
true);
} else if (std::is_same<Redop, op::Min<DType>>::value) {
kernel = SpMMCreateLibxsmmKernel<IdType, DType, Op>(has_idx, N,
LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_REDOP_MIN,
true);
} else if (std::is_same<Redop, op::Add<DType>>::value) {
kernel = SpMMCreateLibxsmmKernel<IdType, DType, Op>(has_idx, N,
LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_REDOP_SUM,
false);
}
#ifdef DEBUG
endTick = __rdtsc();
LOG(INFO) << "stage2 ticks = " << (endTick - startTick);
startTick = __rdtsc();
#endif // DEBUG
if (std::is_same<Redop, op::Max<DType>>::value || std::is_same<Redop, op::Min<DType>>::value) {
SpMMBlockwiseOpCmp<IdType, DType, Op, Redop>(block_csr_array, B, E, C, argB, argE, has_idx, N,
num_M_blocks, num_K_blocks, M_block_size, kernel);
} else {
SpMMBlockwiseOpSum(block_csr_array, B, E, C, has_idx, N, num_M_blocks, num_K_blocks,
M_block_size, kernel);
}
#ifdef DEBUG
endTick = __rdtsc();
LOG(INFO) << "stage3 ticks = " << (endTick - startTick);
startTick = __rdtsc();
#endif // DEBUG
SpMMFreeBlocks(block_csr_array, num_M_blocks, num_K_blocks, Op::use_lhs, Op::use_rhs);
#ifdef DEBUG
endTick = __rdtsc();
LOG(INFO) << "stage4 ticks = " << (endTick - startTick);
#endif // DEBUG
}
/*!
* \brief Optimized CPU kernel of SpMM-Sum on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \note it uses libxsmm, blocking and dynamic thread scheduling.
*/
template <typename IdType, typename DType, typename Op>
void SpMMSumCsrLibxsmm(const BcastOff& bcast, const CSRMatrix& csr,
NDArray ufeat, NDArray efeat, NDArray out) {
NDArray dummy;
SpMMRedopCsrOpt<IdType, DType, Op, op::Add<DType>>(bcast, csr, ufeat, efeat, out, dummy, dummy);
}
/*!
* \brief Optimized CPU kernel of SpMM-Min/Max on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes.
* \param arge Arg-Min/Max on edges.
* \note it uses libxsmm, blocking and dynamic thread scheduling.
*/
template <typename IdType, typename DType, typename Op, typename Cmp>
void SpMMCmpCsrLibxsmm(const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat,
NDArray efeat, NDArray out, NDArray argu, NDArray arge) {
SpMMRedopCsrOpt<IdType, DType, Op, Cmp>(bcast, csr, ufeat, efeat, out, argu, arge);
}
} // namespace cpu
} // namespace aten
} // namespace dgl
#endif // USE_LIBXSMM
#endif // USE_AVX
#endif // _WIN32
#endif // DGL_ARRAY_CPU_SPMM_BLOCKING_LIBXSMM_H_
|
GB_binop__isgt_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isgt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__isgt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__isgt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__isgt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_fp32)
// A*D function (colscale): GB (_AxD__isgt_fp32)
// D*A function (rowscale): GB (_DxB__isgt_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__isgt_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__isgt_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_fp32)
// C=scalar+B GB (_bind1st__isgt_fp32)
// C=scalar+B' GB (_bind1st_tran__isgt_fp32)
// C=A+scalar GB (_bind2nd__isgt_fp32)
// C=A'+scalar GB (_bind2nd_tran__isgt_fp32)
// C type: float
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGT || GxB_NO_FP32 || GxB_NO_ISGT_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isgt_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isgt_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isgt_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isgt_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isgt_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isgt_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__isgt_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__isgt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
base_split.h | #include "callback.h"
#include <omp.h>
/* With the combined parallel-for construct (base.h), the return-addresses are hard to compare.
With the separate parallel and for-nowait construct, the addresses become more predictable,
but the begin of the for-loop still generates additional code, so the offset of loop-begin
to the label is >4 Byte.
*/
int main()
{
unsigned int i;
#pragma omp parallel num_threads(4)
{
print_current_address(0);
#pragma omp for schedule(SCHEDULE) nowait
for (i = 0; i < 4; i++) {
print_fuzzy_address(1);
}
print_fuzzy_address(2);
}
print_fuzzy_address(3);
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_work'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, codeptr_ra=[[PARALLEL_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, invoker={{[0-9]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_loop_begin: parallel_id=[[PARALLEL_ID]], parent_task_id={{[0-9]+}}, codeptr_ra=[[LOOP_BEGIN_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_loop_end: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}, codeptr_ra=[[LOOP_END_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[LOOP_END_RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}, invoker={{[0-9]+}}, codeptr_ra=[[PARALLEL_RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[PARALLEL_RETURN_ADDRESS]]
// CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_loop_begin: parallel_id=[[PARALLEL_ID]], parent_task_id={{[0-9]+}}, codeptr_ra=0x{{[0-f]+}}
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_end: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}, codeptr_ra=[[LOOP_END_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK: {{^}}[[THREAD_ID]]: fuzzy_address={{.*}}[[LOOP_END_RETURN_ADDRESS]]
// CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_loop_begin: parallel_id=[[PARALLEL_ID]], parent_task_id={{[0-9]+}}, codeptr_ra=0x{{[0-f]+}}
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_end: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}, codeptr_ra=[[LOOP_END_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK: {{^}}[[THREAD_ID]]: fuzzy_address={{.*}}[[LOOP_END_RETURN_ADDRESS]]
// CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_loop_begin: parallel_id=[[PARALLEL_ID]], parent_task_id={{[0-9]+}}, codeptr_ra=0x{{[0-f]+}}
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_end: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}, codeptr_ra=[[LOOP_END_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK: {{^}}[[THREAD_ID]]: fuzzy_address={{.*}}[[LOOP_END_RETURN_ADDRESS]]
// CHECK-LOOP: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK-LOOP: 0: ompt_event_runtime_shutdown
// CHECK-LOOP: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, codeptr_ra={{0x[0-f]+}}, invoker={{[0-9]+}}
// CHECK-LOOP: {{^}}[[MASTER_ID]]: ompt_event_loop_begin: parallel_id=[[PARALLEL_ID]], parent_task_id={{[0-9]+}}, codeptr_ra=[[LOOP_BEGIN_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK-LOOP: {{^}}{{[0-9]+}}: fuzzy_address={{.*}}[[LOOP_BEGIN_RETURN_ADDRESS]]
// CHECK-LOOP: {{^}}{{[0-9]+}}: fuzzy_address={{.*}}[[LOOP_BEGIN_RETURN_ADDRESS]]
// CHECK-LOOP: {{^}}{{[0-9]+}}: fuzzy_address={{.*}}[[LOOP_BEGIN_RETURN_ADDRESS]]
// CHECK-LOOP: {{^}}{{[0-9]+}}: fuzzy_address={{.*}}[[LOOP_BEGIN_RETURN_ADDRESS]]
return 0;
}
|
DRB077-single-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A single directive is used to protect a write.
*/
#include <stdio.h>
int main()
{
int count=0;
#pragma omp parallel shared(count)
{
#pragma omp single
count += 1;
}
printf ("count= %d\n", count);
return 0;
}
|
GB_unaryop__minv_uint16_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint16_uint16
// op(A') function: GB_tran__minv_uint16_uint16
// C type: uint16_t
// A type: uint16_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 16)
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 16) ;
// casting
#define GB_CASTING(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint16_uint16
(
uint16_t *Cx, // Cx and Ax may be aliased
uint16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint16_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
streamingbc_internal_fine.c | #include <omp.h>
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include "streamingbc_aux.h"
#define PARENT_ANCHORED 3
#define SIBLING_ANCHORED 4
void addEdgeWithoutMovementBrandesFG(bcForest * forest, struct stinger * sStinger,
int64_t currRoot, int64_t startVertex, int64_t parentVertex,
int64_t addedPathsToRoot, extraArraysPerThread * eAPT, int64_t cores)
{
bcTree * tree = forest->forest[currRoot];
int64_t * QueueDown = eAPT->QueueDown;
int64_t * QueueUp = eAPT->QueueUp;
int64_t * QueueDownBorders = eAPT->QueueSame;
int64_t NV = forest->NV;
eAPT->sV[startVertex].newSigma = tree->vArr[startVertex].sigma;
eAPT->sV[parentVertex].newEdgesBelow = tree->vArr[parentVertex].edgesBelow;
eAPT->sV[startVertex].newEdgesBelow = tree->vArr[startVertex].edgesBelow;
eAPT->sV[parentVertex].newEdgesAbove = tree->vArr[parentVertex].edgesAbove;
eAPT->sV[startVertex].newEdgesAbove = tree->vArr[startVertex].edgesAbove;
eAPT->sV[startVertex].touched = 1;
eAPT->sV[startVertex].newSigma += addedPathsToRoot;
eAPT->sV[startVertex].diffPath = addedPathsToRoot;
eAPT->sV[startVertex].newEdgesAbove += eAPT->sV[parentVertex].newEdgesAbove + 1;
eAPT->sV[parentVertex].newEdgesBelow += eAPT->sV[startVertex].newEdgesBelow + 1;
QueueDown[0] = startVertex;
int64_t * qStart = &(eAPT->qStart);
int64_t * qEnd = &(eAPT->qEnd);
int64_t * qStart_nxt = &(eAPT->qStart_nxt);
int64_t * qEnd_nxt = &(eAPT->qEnd_nxt);
int64_t qDownBIndex = 0;
*qEnd = 1;
*qStart_nxt = 1;
*qEnd_nxt = 1;
int64_t deepestLevel = tree->vArr[startVertex].level;
int64_t intialLevel = tree->vArr[startVertex].level;
int64_t qDownEndMarker = -1;
int64_t qDownEnd = -1;
// Starting BFS decent from "startVertex", down to all the vertices that have shortest paths through "startVertex"
// All elements that will be touched will receive a positive value in their touched field.
// In this implementation, "STACKS" are not used for the "moving up" stage. Rather, a multi-level queue is used.
// Each level in the tree(max depth NV) has a queue and a counter specifiying how deep a specific deepth-queue is.
// For simplicity, all elements are pushed both into the multi-level queue and into the regular queue which is used
// for the BFS traversal.
#pragma omp parallel num_threads(cores)
{
while (*qStart < *qEnd) {
#pragma omp master
{
QueueDownBorders[qDownBIndex++] = *qStart;
QueueDownBorders[qDownBIndex++] = *qEnd;
}
#pragma omp barrier
#pragma omp for
for (int64_t i = *qStart; i < *qEnd; i++) {
int64_t currElement = QueueDown[i];
int64_t levelCurrPlusOne = tree->vArr[currElement].level + 1;
int64_t touchedCurrPlusOne = eAPT->sV[currElement].touched + 1;
STINGER_FORALL_EDGES_OF_VTX_BEGIN(sStinger, currElement) {
int64_t k = STINGER_EDGE_DEST;
// if this vertex has not been added yet
if (levelCurrPlusOne == (tree->vArr[k].level)) {
if (__sync_bool_compare_and_swap(&(eAPT->sV[k].touched), 0, currElement)) {
// Checking if a "deeper level" has been reached.
if (deepestLevel < tree->vArr[k].level) {
deepestLevel = tree->vArr[k].level;
}
__atomic_fetch_add(&(eAPT->sV[k].newEdgesAbove), tree->vArr[k].edgesAbove - tree->vArr[currElement].edgesAbove +
eAPT->sV[currElement].newEdgesAbove, __ATOMIC_RELAXED);
__atomic_fetch_add(&(eAPT->sV[k].newSigma), tree->vArr[k].sigma, __ATOMIC_RELAXED);
// insert this vertex into the BFS queue
QueueDown[__atomic_fetch_add(qEnd_nxt, 1, __ATOMIC_RELAXED)] = k;
// indicate that it is in the next level of the BFS
// add new paths to root that go through current BFS Vertex
__atomic_fetch_add(&(eAPT->sV[k].newSigma), eAPT->sV[currElement].diffPath, __ATOMIC_RELAXED);
// pass on my new paths to root for its search
__atomic_fetch_add(&(eAPT->sV[k].diffPath), eAPT->sV[currElement].diffPath, __ATOMIC_RELAXED);
}
// otherwise if it has been touched, but is specifically in the next level
// of the search (meaning it has more than one edge to the current level)
else if (eAPT->sV[k].touched != currElement) {
__atomic_fetch_add(&(eAPT->sV[k].newEdgesAbove), -tree->vArr[currElement].edgesAbove, __ATOMIC_RELAXED);
__atomic_fetch_add(&(eAPT->sV[k].newEdgesAbove), eAPT->sV[currElement].newEdgesAbove, __ATOMIC_RELAXED);
// add new paths to root that go through current BFS Vertex
__atomic_fetch_add(&(eAPT->sV[k].newSigma), eAPT->sV[currElement].diffPath, __ATOMIC_RELAXED);
// pass on my new paths to root for its search
__atomic_fetch_add(&(eAPT->sV[k].diffPath), eAPT->sV[currElement].diffPath, __ATOMIC_RELAXED);
}
}
}
STINGER_FORALL_EDGES_OF_VTX_END();
#if COUNT_TRAVERSALS==1
eAPT->dynamicTraverseEdgeCounter += stinger_typed_outdegree(sStinger, currElement, 0);
eAPT->dynamicTraverseVerticeCounter++;
#endif
}
#pragma omp master
{
*qStart = *qStart_nxt;
*qEnd = *qEnd_nxt;
*qStart_nxt = *qEnd;
*qEnd_nxt = *qStart_nxt;
}
#pragma omp barrier
}
#pragma omp master
{
qDownEndMarker = *qEnd - 1;
}
#pragma omp barrier
#pragma omp master
{
*qStart = 0;
*qEnd = 0;
*qStart_nxt = 0;
*qEnd_nxt = 0;
}
#pragma omp barrier
// Starting Multi-Level "BFS" ascent.
// The ascent continues going up as long as the root has not been reached and that there
// are elements in the current level of the ascent. The ascent starts in the deepest level
// of the graph.
// It was worth noting that in the ascent stage:
// 1) All previously untouched elements that are touched are marked with "-1".
// 2) On the way up, it is possible that elements that were touched in the BFS decent, will
// touch elements that were not touchded in the decsent and that are below "vertex". These
// are elements that do not have shortest paths going through "vertex" ,however, there BC
// values have changed due to the changes occuring below them. Because of this, they are
// placed in the Multi-level queue.
//#pragma omp parallel num_threads(cores)
//{
while (!(qDownBIndex <= 0 && *qStart >= *qEnd && *qStart_nxt >= *qEnd_nxt)) {
if (qDownBIndex >= 2) {
#pragma omp for
for (int64_t i = QueueDownBorders[qDownBIndex - 2]; i < QueueDownBorders[qDownBIndex - 1]; i++) {
int64_t currElement = QueueDown[i];
int64_t levelCurrMinusOne = tree->vArr[currElement].level - 1;
STINGER_FORALL_EDGES_OF_VTX_BEGIN(sStinger, currElement) {
int64_t k = STINGER_EDGE_DEST;
if (tree->vArr[k].level == levelCurrMinusOne) {
// Checking to see if "k" has been touched before.
if (__sync_bool_compare_and_swap(&(eAPT->sV[k].touched), 0, -1)) {
eAPT->sV[k].newDelta += tree->vArr[k].delta;
// Marking element as touched in the ascent stage.
QueueUp[__atomic_fetch_add(qEnd_nxt, 1, __ATOMIC_RELAXED)] = k;
__atomic_fetch_add(&(eAPT->sV[k].newSigma), tree->vArr[k].sigma, __ATOMIC_RELAXED);
if (k != parentVertex) {
__atomic_fetch_add(&(eAPT->sV[k].newEdgesBelow), tree->vArr[k].edgesBelow, __ATOMIC_RELAXED);
}
}
if (k != parentVertex && tree->vArr[k].level <= tree->vArr[parentVertex].level) {
__atomic_fetch_add(&(eAPT->sV[k].newEdgesBelow), -tree->vArr[currElement].edgesBelow, __ATOMIC_RELAXED);
__atomic_fetch_add(&(eAPT->sV[k].newEdgesBelow), eAPT->sV[currElement].newEdgesBelow, __ATOMIC_RELAXED);
}
}
if (tree->vArr[k].level == tree->vArr[currElement].level + 1 && eAPT->sV[k].touched != 0) {
eAPT->sV[currElement].newDelta +=
((bc_t)eAPT->sV[currElement].newSigma / (bc_t)eAPT->sV[k].newSigma) *
(bc_t)(eAPT->sV[k].newDelta + 1);
// For the elements that are touched in the ascent stage it is necessary to
// to reduce the values that they previously had.
// In addition to this, the "parentVertex" that is connected to "vertex", i.e.
// the vertices of the new edge, needs to increase its betweenness centrality
// following the new connection, without removing the old delta value.
if (eAPT->sV[currElement].touched < 0 && (currElement != parentVertex || k != startVertex)) {
eAPT->sV[currElement].newDelta -=
((bc_t)tree->vArr[currElement].sigma / (bc_t)tree->vArr[k].sigma) *
(bc_t)(tree->vArr[k].delta + 1);
}
}
}
STINGER_FORALL_EDGES_OF_VTX_END();
#if COUNT_TRAVERSALS==1
eAPT->dynamicTraverseEdgeCounter += stinger_typed_outdegree(sStinger, currElement, 0);
eAPT->dynamicTraverseVerticeCounter++;
#endif
if (currElement != currRoot) {
eAPT->sV[currElement].totalBC += eAPT->sV[currElement].newDelta - tree->vArr[currElement].delta;
}
}
}
#pragma omp master
{
qDownBIndex -= 2;
}
#pragma omp barrier
#pragma omp master
{
*qStart = *qStart_nxt;
*qEnd = *qEnd_nxt;
*qStart_nxt = *qEnd;
*qEnd_nxt = *qStart_nxt;
}
#pragma omp barrier
#pragma omp for
for (int64_t i = *qStart; i < *qEnd; i++) {
int64_t currElement = QueueUp[i];
int64_t levelCurrMinusOne = tree->vArr[currElement].level - 1;
STINGER_FORALL_EDGES_OF_VTX_BEGIN(sStinger, currElement) {
int64_t k = STINGER_EDGE_DEST;
if (tree->vArr[k].level == levelCurrMinusOne) {
// Checking to see if "k" has been touched before.
if (__sync_bool_compare_and_swap(&(eAPT->sV[k].touched), 0, -1)) {
eAPT->sV[k].newDelta += tree->vArr[k].delta;
// Marking element as touched in the ascent stage.
QueueUp[__atomic_fetch_add(qEnd_nxt, 1, __ATOMIC_RELAXED)] = k;
__atomic_fetch_add(&(eAPT->sV[k].newSigma), tree->vArr[k].sigma, __ATOMIC_RELAXED);
if (k != parentVertex) {
__atomic_fetch_add(&(eAPT->sV[k].newEdgesBelow), tree->vArr[k].edgesBelow, __ATOMIC_RELAXED);
}
}
if (k != parentVertex && tree->vArr[k].level <= tree->vArr[parentVertex].level) {
__atomic_fetch_add(&(eAPT->sV[k].newEdgesBelow), -tree->vArr[currElement].edgesBelow, __ATOMIC_RELAXED);
__atomic_fetch_add(&(eAPT->sV[k].newEdgesBelow), eAPT->sV[currElement].newEdgesBelow, __ATOMIC_RELAXED);
}
}
if (tree->vArr[k].level == tree->vArr[currElement].level + 1 && eAPT->sV[k].touched != 0) {
eAPT->sV[currElement].newDelta +=
((bc_t)eAPT->sV[currElement].newSigma / (bc_t)eAPT->sV[k].newSigma) *
(bc_t)(eAPT->sV[k].newDelta + 1);
// For the elements that are touched in the ascent stage it is necessary to
// to reduce the values that they previously had.
// In addition to this, the "parentVertex" that is connected to "vertex", i.e.
// the vertices of the new edge, needs to increase its betweenness centrality
// following the new connection, without removing the old delta value.
if (eAPT->sV[currElement].touched < 0 && (currElement != parentVertex || k != startVertex)) {
eAPT->sV[currElement].newDelta -=
((bc_t)tree->vArr[currElement].sigma / (bc_t)tree->vArr[k].sigma) *
(bc_t)(tree->vArr[k].delta + 1);
}
}
}
STINGER_FORALL_EDGES_OF_VTX_END();
#if COUNT_TRAVERSALS==1
eAPT->dynamicTraverseEdgeCounter += stinger_typed_outdegree(sStinger, currElement, 0);
eAPT->dynamicTraverseVerticeCounter++;
#endif
if (currElement != currRoot) {
eAPT->sV[currElement].totalBC += eAPT->sV[currElement].newDelta - tree->vArr[currElement].delta;
}
}
#pragma omp master
{
*qStart = *qEnd;
}
#pragma omp barrier
}
#pragma omp for
for (int64_t c = 0; c <= qDownEndMarker; c++) {
int64_t k = QueueDown[c];
tree->vArr[k].delta = eAPT->sV[k].newDelta;
tree->vArr[k].sigma = eAPT->sV[k].newSigma;
tree->vArr[k].edgesAbove = eAPT->sV[k].newEdgesAbove;
eAPT->sV[k].diffPath = 0;
eAPT->sV[k].touched = 0;
eAPT->sV[k].newDelta = 0.0;
eAPT->sV[k].newSigma = 0;
eAPT->sV[k].newEdgesAbove = 0;
}
eAPT->sV[startVertex].newEdgesAbove = 0;
eAPT->sV[parentVertex].newEdgesAbove = 0;
#pragma omp for
for (int64_t c = 0; c < *qEnd; c++) {
int64_t k = QueueUp[c];
tree->vArr[k].delta = eAPT->sV[k].newDelta;
tree->vArr[k].edgesBelow = eAPT->sV[k].newEdgesBelow;
eAPT->sV[k].diffPath = 0;
eAPT->sV[k].touched = 0;
eAPT->sV[k].newDelta = 0.0;
eAPT->sV[k].newEdgesBelow = 0;
eAPT->sV[k].newSigma = 0;
eAPT->sV[k].newEdgesBelow = 0;
}
}
eAPT->sV[parentVertex].newEdgesBelow = 0;
eAPT->sV[startVertex].newEdgesBelow = 0;
eAPT->qStart = 0;
eAPT->qEnd = 0;
eAPT->qStart_nxt = 0;
eAPT->qEnd_nxt = 0;
}
void moveUpTreeBrandesFG(bcForest * forest, struct stinger * sStinger,
int64_t currRoot, int64_t startVertex, int64_t parentVertex,
int64_t prevDist, extraArraysPerThread * eAPT, int64_t cores)
{
bcTree * tree = forest->forest[currRoot];
int64_t NV = forest->NV;
int64_t * QueueDown = eAPT->QueueDown;
int64_t * QueueUp = eAPT->QueueUp;
int64_t * QueueSame = eAPT->QueueSame;
int64_t * QueueDownBorders = eAPT->Stack;
list_ptr * multiLevelQueues = eAPT->multiLevelQueues;
queue_t * queue = eAPT->queue;
level_node_t * levelIndices = eAPT->levelIndices;
eAPT->sV[parentVertex].newSigma = tree->vArr[parentVertex].sigma;
eAPT->sV[startVertex].newSigma = tree->vArr[startVertex].sigma;
int64_t * qStart = &(eAPT->qStart);
int64_t * qEnd = &(eAPT->qEnd);
int64_t * qStart_nxt = &(eAPT->qStart_nxt);
int64_t * qEnd_nxt = &(eAPT->qEnd_nxt);
int64_t qDownBIndex = 0;
int64_t * qStartSame = &(eAPT->qStartSame);
int64_t * qEndSame = &(eAPT->qEndSame);
int64_t * qStartSame_nxt = &(eAPT->qStartSame_nxt);
int64_t * qEndSame_nxt = &(eAPT->qEndSame_nxt);
*qEnd = 1;
*qStart_nxt = 1;
*qEnd_nxt = 1;
int64_t qDownEndMarker = -1;
int64_t depthDown = -1, depthUp = -1, depthSame = -1;
int64_t upCounter = -1;
int64_t currElement = 0; //dummy initilization - variable will be initialized in function.
int operation = -1; // 0 - down, 1 - up, 2 - same for dependency accumulation.
QueueDown[0] = startVertex;
eAPT->sV[startVertex].touched = 1;
eAPT->sV[startVertex].newSigma = eAPT->sV[parentVertex].newSigma;
eAPT->sV[startVertex].diffPath = eAPT->sV[parentVertex].newSigma;
eAPT->sV[startVertex].movementDelta = prevDist;
eAPT->sV[startVertex].IMoved = 1;
eAPT->sV[parentVertex].newEdgesAbove = tree->vArr[parentVertex].edgesAbove;
eAPT->sV[startVertex].newEdgesAbove = eAPT->sV[parentVertex].newEdgesAbove + 1;
int64_t deepestLevel = tree->vArr[parentVertex].level + 1;
// Starting BFS decent from "startVertex", down to all the vertices that have shortest paths through "startVertex"
// All elements that will be touched will receive a positive value in their touched field.
// In this implementation, "STACKS" are not used for the "moving up" stage. Rather, a multi-level queue is used.
// Each level in the tree(max depth NV) has a queue and a counter specifiying how deep a specific deepth-queue is.
// For simplicity, all elements are pushed both into the multi-level queue and into the regular queue which is used
// for the BFS traversal.
#pragma omp parallel num_threads(cores)
{
while (*qStart < *qEnd) {
#pragma omp master
{
QueueDownBorders[qDownBIndex++] = *qStart;
QueueDownBorders[qDownBIndex++] = *qEnd;
}
#pragma omp barrier
#pragma omp for
for (int64_t i = *qStart; i < *qEnd; i++) {
int64_t currElement = QueueDown[i];
int64_t touchedCurrPlusOne = eAPT->sV[currElement].touched + 1;
__atomic_fetch_add(&(eAPT->sV[currElement].newEdgesAbove), -eAPT->sV[currElement].newEdgesAbove, __ATOMIC_RELAXED);
STINGER_FORALL_EDGES_OF_VTX_BEGIN(sStinger, currElement) {
int64_t k = STINGER_EDGE_DEST;
int64_t computedDelta = eAPT->sV[currElement].movementDelta -
(tree->vArr[currElement].level - tree->vArr[k].level + 1);
int64_t newCurrLevel = 0;
__atomic_fetch_add(&newCurrLevel, tree->vArr[currElement].level, __ATOMIC_RELAXED);
__atomic_fetch_add(&newCurrLevel, -eAPT->sV[currElement].movementDelta, __ATOMIC_RELAXED);
int64_t newKLevel = 0;
__atomic_fetch_add(&newKLevel, tree->vArr[k].level, __ATOMIC_RELAXED);
__atomic_fetch_add(&newKLevel, -computedDelta, __ATOMIC_RELAXED);
if (computedDelta < 0 && eAPT->sV[k].touched == 0) {
if (computedDelta >= 0 && newKLevel < newCurrLevel) {
__atomic_fetch_add(&(eAPT->sV[k].newEdgesAbove), tree->vArr[k].edgesAbove, __ATOMIC_RELAXED);
__atomic_fetch_add(&(eAPT->sV[currElement].newEdgesAbove), eAPT->sV[k].newEdgesAbove + 1, __ATOMIC_RELAXED);
}
if (computedDelta < 0 && tree->vArr[k].level < newCurrLevel) {
__atomic_fetch_add(&(eAPT->sV[currElement].newEdgesAbove), tree->vArr[k].edgesAbove + 1, __ATOMIC_RELAXED);
}
}
else if (__sync_bool_compare_and_swap(&(eAPT->sV[k].touched), 0, touchedCurrPlusOne)) {
if (computedDelta >= 0 && newKLevel < newCurrLevel) {
__atomic_fetch_add(&(eAPT->sV[k].newEdgesAbove), tree->vArr[k].edgesAbove, __ATOMIC_RELAXED);
__atomic_fetch_add(&(eAPT->sV[currElement].newEdgesAbove), eAPT->sV[k].newEdgesAbove + 1, __ATOMIC_RELAXED);
}
if (computedDelta < 0 && tree->vArr[k].level < newCurrLevel) {
__atomic_fetch_add(&(eAPT->sV[currElement].newEdgesAbove), tree->vArr[k].edgesAbove + 1, __ATOMIC_RELAXED);
}
// if the adjacent vertex should be moved, put it in the queue
if (computedDelta > 0) {
__atomic_fetch_add(&(eAPT->sV[k].newSigma), eAPT->sV[currElement].diffPath, __ATOMIC_RELAXED);
__atomic_fetch_add(&(eAPT->sV[k].diffPath), eAPT->sV[currElement].diffPath, __ATOMIC_RELAXED);
__atomic_fetch_add(&(eAPT->sV[k].movementDelta), computedDelta, __ATOMIC_RELAXED);
__atomic_fetch_add(&(eAPT->sV[k].IMoved), 2, __ATOMIC_RELAXED);
QueueDown[__atomic_fetch_add(qEnd_nxt, 1, __ATOMIC_RELAXED)] = k;
}
// Vertex that will not be moved has been found.
else if (computedDelta == 0) {
__atomic_fetch_add(&(eAPT->sV[k].newSigma), tree->vArr[k].sigma, __ATOMIC_RELAXED);
__atomic_fetch_add(&(eAPT->sV[k].newSigma), eAPT->sV[currElement].diffPath, __ATOMIC_RELAXED);
__atomic_fetch_add(&(eAPT->sV[k].diffPath), eAPT->sV[currElement].diffPath, __ATOMIC_RELAXED);
__atomic_fetch_add(&(eAPT->sV[k].movementDelta), computedDelta, __ATOMIC_RELAXED);
__atomic_fetch_add(&(eAPT->sV[k].IMoved), -eAPT->sV[k].IMoved, __ATOMIC_RELAXED);
QueueDown[__atomic_fetch_add(qEnd_nxt, 1, __ATOMIC_RELAXED)] = k;
}
// Vertex that the number of shortest path to the root does not change has been found.
// This vertex is not marked as it might be touched on the way up.
// if adjacent and in the next level
}
else if (eAPT->sV[k].touched == touchedCurrPlusOne) {
if (computedDelta >= 0 && newKLevel < newCurrLevel) {
__atomic_fetch_add(&(eAPT->sV[currElement].newEdgesAbove), eAPT->sV[k].newEdgesAbove + 1, __ATOMIC_RELAXED);
}
if (computedDelta < 0 && tree->vArr[k].level < newCurrLevel) {
__atomic_fetch_add(&(eAPT->sV[currElement].newEdgesAbove), eAPT->sV[k].newEdgesAbove + 1, __ATOMIC_RELAXED);
}
if (computedDelta >= 0) {
__atomic_fetch_add(&(eAPT->sV[k].newSigma), eAPT->sV[currElement].diffPath, __ATOMIC_RELAXED);
__atomic_fetch_add(&(eAPT->sV[k].diffPath), eAPT->sV[currElement].diffPath, __ATOMIC_RELAXED);
}
} else if (computedDelta >= 0 && newKLevel < newCurrLevel) {
__atomic_fetch_add(&(eAPT->sV[currElement].newEdgesAbove), eAPT->sV[k].newEdgesAbove + 1, __ATOMIC_RELAXED);
} else if (computedDelta < 0 && tree->vArr[k].level < newCurrLevel) {
__atomic_fetch_add(&(eAPT->sV[currElement].newEdgesAbove), eAPT->sV[k].newEdgesAbove + 1, __ATOMIC_RELAXED);
}
}
STINGER_FORALL_EDGES_OF_VTX_END();
#if COUNT_TRAVERSALS==1
eAPT->dynamicTraverseEdgeCounter += stinger_typed_outdegree(sStinger, currElement, 0);
eAPT->dynamicTraverseVerticeCounter++;
#endif
// move ourself and retire
__atomic_fetch_add(&(tree->vArr[currElement].level), -eAPT->sV[currElement].movementDelta, __ATOMIC_RELAXED);
appendDS2(queue, levelIndices, tree->vArr[currElement].level, currElement, omp_get_thread_num());
// Checking if a "deeper level" has been reached.
if (deepestLevel < tree->vArr[currElement].level) {
deepestLevel = tree->vArr[currElement].level;
}
}
#pragma omp master
{
*qStart = *qStart_nxt;
*qEnd = *qEnd_nxt;
*qStart_nxt = *qEnd;
*qEnd_nxt = *qStart_nxt;
}
#pragma omp barrier
}
// Starting Multi-Level "BFS" ascent.
#pragma omp master
{
*qEnd = 0;
queue_node_t * temp_node;
for (int lev = tree->vArr[startVertex].level; lev < NV; lev++) {
temp_node = getFirstDS(queue, levelIndices, lev);
while (temp_node != NULL) {
QueueDown[(*qEnd)++] = temp_node->data;
deleteFirstDS(queue, levelIndices, lev);
temp_node = getFirstDS(queue, levelIndices, lev);
}
}
}
#pragma omp barrier
#pragma omp master
{
(*qEnd)--;
qDownEndMarker = *qEnd;
}
#pragma omp barrier
int64_t QUpStart = 0, QUpEnd = 0;
int64_t QSameStart = 0, QSameEnd = 0;
currElement = 0; //dummy initilization - variable will be initialized in function.
upCounter = 0;
depthDown = tree->vArr[QueueDown[*qEnd]].level, depthUp = -1, depthSame = -1;
*qStart = 0;
*qEnd = 0;
*qStart_nxt = 0;
*qEnd_nxt = 0;
*qStartSame = 0;
*qEndSame = 0;
*qStartSame_nxt = 0;
*qEndSame_nxt = 0;
// The ascent continues going up as long as the root has not been reached and that there
// are elements in the current level of the ascent. The ascent starts in the deepest level
// of the graph.
// It was worth noting that in the ascent stage:
// 1) All previously untouched elements that are touched are marked with "-1".
// 2) On the way up, it is possible that elements that were touched in the BFS decent, will
// touch elements that were not touchded in the decsent and that are below "startVertex". These
// are elements that do not have shortest paths going through "startVertex" ,however, there BC
// values have changed due to the changes occuring below them. Because of this, they are
// placed in the Multi-level queue.
// 3) There are vertices that did not move and that one of their neighbors move up(such that
// the vertices are now in the same level). Consequently, the number of shortest path going
// through the vertex that did not move was reduced. These vertices will be touched as -2
// and added to the queue and the "BFS ascent" will continue from these vertices as well.
while (!(qDownBIndex <= 0 && *qStart >= *qEnd && *qStart_nxt >= *qEnd_nxt && *qStartSame >= *qEndSame && *qStartSame_nxt >= *qEndSame_nxt)
&& !(depthDown == -1 && depthSame == -1 && depthUp == -1)) {
#pragma omp master
{
operation = -1; // 0 - down, 1 - up, 2 - same
}
#pragma omp barrier
#pragma omp master
{
if (depthUp >= depthSame && depthUp >= depthDown)
{
operation = 1;
if (*qEnd_nxt > *qStart_nxt)
depthUp = -1;
else {
depthUp = tree->vArr[QueueUp[*qStart_nxt]].level;
}
} else if (depthDown >= depthSame && depthDown >= depthUp)
{
operation = 0;
if (qDownBIndex < 2 || QueueDownBorders[qDownBIndex - 2] > QueueDownBorders[qDownBIndex - 1])
depthDown = -1;
else if (qDownBIndex > 2) {
depthDown = tree->vArr[QueueDown[QueueDownBorders[qDownBIndex - 2] - 1]].level;
}
}
else if (depthDown <= depthSame && depthUp <= depthSame)
{
operation = 2;
if (*qEndSame_nxt > *qStartSame_nxt)
depthSame = -1;
else
depthSame = tree->vArr[QueueSame[*qStartSame_nxt]].level;
}
}
#pragma omp barrier
if (operation == 0 && qDownBIndex >= 2) {
#pragma omp for
for (int64_t i = QueueDownBorders[qDownBIndex - 2]; i < QueueDownBorders[qDownBIndex - 1]; i++) {
int64_t currElement = QueueDown[i];
int64_t levelCurrMinusOne = tree->vArr[currElement].level - 1;
eAPT->sV[currElement].newEdgesBelow = 0;
STINGER_FORALL_EDGES_OF_VTX_BEGIN(sStinger, currElement) {
int64_t k = STINGER_EDGE_DEST;
// Checking that the vertices are in different levels.
if (tree->vArr[k].level == tree->vArr[currElement].level + 1) {
if (eAPT->sV[k].touched == 0) {
eAPT->sV[currElement].newEdgesBelow += tree->vArr[k].edgesBelow + 1;
} else {
eAPT->sV[currElement].newEdgesBelow += eAPT->sV[k].newEdgesBelow + 1;
}
}
if (tree->vArr[k].level == levelCurrMinusOne) {
// Checking to see if "k" has been touched before.
if (__sync_bool_compare_and_swap(&(eAPT->sV[k].touched), 0, -1)) {
eAPT->sV[k].newDelta += tree->vArr[k].delta;
upCounter++;
// Marking element as touched in the ascent stage.
eAPT->sV[k].touched = -1;
__sync_bool_compare_and_swap(&depthUp, -1, tree->vArr[k].level);
QueueUp[__atomic_fetch_add(qEnd_nxt, 1, __ATOMIC_RELAXED)] = k;
if (k != parentVertex)
eAPT->sV[k].newSigma += tree->vArr[k].sigma;
}
}
if (tree->vArr[k].level == tree->vArr[currElement].level + 1 && eAPT->sV[k].touched != 0) {
eAPT->sV[currElement].newDelta +=
((bc_t)eAPT->sV[currElement].newSigma / (bc_t)eAPT->sV[k].newSigma) *
(bc_t)(eAPT->sV[k].newDelta + 1);
// For the elements that are touched in the ascent stage it is necessary to
// to reduce the values that they previously had.
// In addition to this, the "parentVertex" that is connected to "startVertex", i.e.
// the vertices of the new edge, needs to increase its betweenness centrality
// following the new connection, without removing the old delta value.
if (eAPT->sV[currElement].touched < 0 && ( currElement != parentVertex || k != startVertex)) {
eAPT->sV[currElement].newDelta -=
((bc_t)tree->vArr[currElement].sigma / (bc_t)tree->vArr[k].sigma) *
(bc_t)(tree->vArr[k].delta + 1);
}
}
// Vertices that did not move and that one of their neighbors move up(such that
// the vertices are now in the same level).
if (tree->vArr[k].level == tree->vArr[currElement].level && ((eAPT->sV[currElement].IMoved == 1 && eAPT->sV[k].IMoved < 0) )) {
// Checking to see if "k" has been touched before.
if (__sync_bool_compare_and_swap(&(eAPT->sV[k].touched), 0, -1)) {
eAPT->sV[k].newDelta += tree->vArr[k].delta;
upCounter++;
// Marking element as touched in the ascent stage.
eAPT->sV[k].touched = -2;
__sync_bool_compare_and_swap(&depthSame, -1, tree->vArr[k].level);
QueueSame[__atomic_fetch_add(qEndSame_nxt, 1, __ATOMIC_RELAXED)] = k;
eAPT->sV[k].newSigma += tree->vArr[k].sigma;
}
// Paths that previosul went through this vertex no longer go through them, thus the
// shortest path count(BC) is reduced.
}
if (tree->vArr[k].level == tree->vArr[currElement].level && ((eAPT->sV[k].IMoved == 1 && eAPT->sV[currElement].IMoved < 0) )) {
eAPT->sV[currElement].newDelta -=
((bc_t)tree->vArr[currElement].sigma / (bc_t)tree->vArr[k].sigma) *
(bc_t)(tree->vArr[k].delta + 1);
}
}
STINGER_FORALL_EDGES_OF_VTX_END();
#if COUNT_TRAVERSALS==1
eAPT->dynamicTraverseEdgeCounter += stinger_typed_outdegree(sStinger, currElement, 0);
eAPT->dynamicTraverseVerticeCounter++;
#endif
if (currElement != currRoot) {
eAPT->sV[currElement].totalBC += eAPT->sV[currElement].newDelta - tree->vArr[currElement].delta;
}
}
#pragma omp master
{
qDownBIndex -= 2;
}
#pragma omp barrier
}
if (operation == 1) {
#pragma omp master
{
*qStart = *qStart_nxt;
* qEnd = *qEnd_nxt;
* qStart_nxt = *qEnd;
* qEnd_nxt = *qStart_nxt;
}
#pragma omp barrier
#pragma omp for
for (int64_t i = *qStart; i < *qEnd; i++) {
int64_t currElement = QueueUp[i];
int64_t levelCurrMinusOne = tree->vArr[currElement].level - 1;
eAPT->sV[currElement].newEdgesBelow = 0;
STINGER_FORALL_EDGES_OF_VTX_BEGIN(sStinger, currElement) {
int64_t k = STINGER_EDGE_DEST;
// Checking that the vertices are in different levels.
if (tree->vArr[k].level == tree->vArr[currElement].level + 1) {
if (eAPT->sV[k].touched == 0) {
eAPT->sV[currElement].newEdgesBelow += tree->vArr[k].edgesBelow + 1;
} else {
eAPT->sV[currElement].newEdgesBelow += eAPT->sV[k].newEdgesBelow + 1;
}
}
if (tree->vArr[k].level == levelCurrMinusOne) {
// Checking to see if "k" has been touched before.
if (__sync_bool_compare_and_swap(&(eAPT->sV[k].touched), 0, -1)) {
eAPT->sV[k].newDelta += tree->vArr[k].delta;
upCounter++;
// Marking element as touched in the ascent stage.
eAPT->sV[k].touched = -1;
__sync_bool_compare_and_swap(&depthUp, -1, tree->vArr[k].level);
QueueUp[__atomic_fetch_add(qEnd_nxt, 1, __ATOMIC_RELAXED)] = k;
if (k != parentVertex)
eAPT->sV[k].newSigma += tree->vArr[k].sigma;
}
}
if (tree->vArr[k].level == tree->vArr[currElement].level + 1 && eAPT->sV[k].touched != 0) {
eAPT->sV[currElement].newDelta +=
((bc_t)eAPT->sV[currElement].newSigma / (bc_t)eAPT->sV[k].newSigma) *
(bc_t)(eAPT->sV[k].newDelta + 1);
// For the elements that are touched in the ascent stage it is necessary to
// to reduce the values that they previously had.
// In addition to this, the "parentVertex" that is connected to "startVertex", i.e.
// the vertices of the new edge, needs to increase its betweenness centrality
// following the new connection, without removing the old delta value.
if (eAPT->sV[currElement].touched < 0 && ( currElement != parentVertex || k != startVertex)) {
eAPT->sV[currElement].newDelta -=
((bc_t)tree->vArr[currElement].sigma / (bc_t)tree->vArr[k].sigma) *
(bc_t)(tree->vArr[k].delta + 1);
}
}
// Vertices that did not move and that one of their neighbors move up(such that
// the vertices are now in the same level).
if (tree->vArr[k].level == tree->vArr[currElement].level && ((eAPT->sV[currElement].IMoved == 1 && eAPT->sV[k].IMoved < 0) )) {
// Checking to see if "k" has been touched before.
if (__sync_bool_compare_and_swap(&(eAPT->sV[k].touched), 0, -1)) {
eAPT->sV[k].newDelta += tree->vArr[k].delta;
upCounter++;
// Marking element as touched in the ascent stage.
eAPT->sV[k].touched = -2;
__sync_bool_compare_and_swap(&depthSame, -1, tree->vArr[k].level);
QueueSame[__atomic_fetch_add(qEndSame_nxt, 1, __ATOMIC_RELAXED)] = k;
eAPT->sV[k].newSigma += tree->vArr[k].sigma;
}
// Paths that previosul went through this vertex no longer go through them, thus the
// shortest path count(BC) is reduced.
}
if (tree->vArr[k].level == tree->vArr[currElement].level && ((eAPT->sV[k].IMoved == 1 && eAPT->sV[currElement].IMoved < 0) )) {
eAPT->sV[currElement].newDelta -=
((bc_t)tree->vArr[currElement].sigma / (bc_t)tree->vArr[k].sigma) *
(bc_t)(tree->vArr[k].delta + 1);
}
}
STINGER_FORALL_EDGES_OF_VTX_END();
#if COUNT_TRAVERSALS==1
eAPT->dynamicTraverseEdgeCounter += stinger_typed_outdegree(sStinger, currElement, 0);
eAPT->dynamicTraverseVerticeCounter++;
#endif
if (currElement != currRoot) {
eAPT->sV[currElement].totalBC += eAPT->sV[currElement].newDelta - tree->vArr[currElement].delta;
}
}
#pragma omp master
{
*qStart = *qEnd;
}
#pragma omp barrier
}
if (operation == 2) {
#pragma omp master
{
*qStartSame = *qStartSame_nxt;
* qEndSame = *qEndSame_nxt;
* qStartSame_nxt = *qEndSame;
* qEndSame_nxt = *qStartSame_nxt;
}
#pragma omp barrier
#pragma omp for
for (int64_t i = *qStartSame; i < *qEndSame; i++) {
int64_t currElement = QueueSame[i];
int64_t levelCurrMinusOne = tree->vArr[currElement].level - 1;
eAPT->sV[currElement].newEdgesBelow = 0;
STINGER_FORALL_EDGES_OF_VTX_BEGIN(sStinger, currElement) {
int64_t k = STINGER_EDGE_DEST;
// Checking that the vertices are in different levels.
if (tree->vArr[k].level == tree->vArr[currElement].level + 1) {
if (eAPT->sV[k].touched == 0) {
eAPT->sV[currElement].newEdgesBelow += tree->vArr[k].edgesBelow + 1;
} else {
eAPT->sV[currElement].newEdgesBelow += eAPT->sV[k].newEdgesBelow + 1;
}
}
if (tree->vArr[k].level == levelCurrMinusOne) {
// Checking to see if "k" has been touched before.
if (__sync_bool_compare_and_swap(&(eAPT->sV[k].touched), 0, -1)) {
eAPT->sV[k].newDelta += tree->vArr[k].delta;
upCounter++;
// Marking element as touched in the ascent stage.
eAPT->sV[k].touched = -1;
__sync_bool_compare_and_swap(&depthUp, -1, tree->vArr[k].level);
QueueUp[__atomic_fetch_add(qEnd_nxt, 1, __ATOMIC_RELAXED)] = k;
if (k != parentVertex)
eAPT->sV[k].newSigma += tree->vArr[k].sigma;
}
}
if (tree->vArr[k].level == tree->vArr[currElement].level + 1 && eAPT->sV[k].touched != 0) {
eAPT->sV[currElement].newDelta +=
((bc_t)eAPT->sV[currElement].newSigma / (bc_t)eAPT->sV[k].newSigma) *
(bc_t)(eAPT->sV[k].newDelta + 1);
// For the elements that are touched in the ascent stage it is necessary to
// to reduce the values that they previously had.
// In addition to this, the "parentVertex" that is connected to "startVertex", i.e.
// the vertices of the new edge, needs to increase its betweenness centrality
// following the new connection, without removing the old delta value.
if (eAPT->sV[currElement].touched < 0 && ( currElement != parentVertex || k != startVertex)) {
eAPT->sV[currElement].newDelta -=
((bc_t)tree->vArr[currElement].sigma / (bc_t)tree->vArr[k].sigma) *
(bc_t)(tree->vArr[k].delta + 1);
}
}
// Vertices that did not move and that one of their neighbors move up(such that
// the vertices are now in the same level).
if (tree->vArr[k].level == tree->vArr[currElement].level && ((eAPT->sV[currElement].IMoved == 1 && eAPT->sV[k].IMoved < 0) )) {
// Checking to see if "k" has been touched before.
if (__sync_bool_compare_and_swap(&(eAPT->sV[k].touched), 0, -1)) {
eAPT->sV[k].newDelta += tree->vArr[k].delta;
upCounter++;
// Marking element as touched in the ascent stage.
eAPT->sV[k].touched = -2;
__sync_bool_compare_and_swap(&depthSame, -1, tree->vArr[k].level);
QueueSame[__atomic_fetch_add(qEndSame_nxt, 1, __ATOMIC_RELAXED)] = k;
eAPT->sV[k].newSigma += tree->vArr[k].sigma;
}
// Paths that previosul went through this vertex no longer go through them, thus the
// shortest path count(BC) is reduced.
}
if (tree->vArr[k].level == tree->vArr[currElement].level && ((eAPT->sV[k].IMoved == 1 && eAPT->sV[currElement].IMoved < 0) )) {
eAPT->sV[currElement].newDelta -=
((bc_t)tree->vArr[currElement].sigma / (bc_t)tree->vArr[k].sigma) *
(bc_t)(tree->vArr[k].delta + 1);
}
}
STINGER_FORALL_EDGES_OF_VTX_END();
#if COUNT_TRAVERSALS==1
eAPT->dynamicTraverseEdgeCounter += stinger_typed_outdegree(sStinger, currElement, 0);
eAPT->dynamicTraverseVerticeCounter++;
#endif
if (currElement != currRoot) {
eAPT->sV[currElement].totalBC += eAPT->sV[currElement].newDelta - tree->vArr[currElement].delta;
}
}
#pragma omp master
{
*qStartSame = *qEndSame;
}
#pragma omp barrier
}
}
#pragma omp for
for (int64_t c = 0; c <= qDownEndMarker; c++) {
int64_t k = QueueDown[c];
tree->vArr[k].delta = eAPT->sV[k].newDelta;
tree->vArr[k].sigma = eAPT->sV[k].newSigma;
tree->vArr[k].edgesBelow = eAPT->sV[k].newEdgesBelow;
tree->vArr[k].edgesAbove = eAPT->sV[k].newEdgesAbove;
eAPT->sV[k].diffPath = 0;
eAPT->sV[k].touched = 0;
eAPT->sV[k].newDelta = 0.0;
eAPT->sV[k].movementDelta = 0;
eAPT->sV[k].IMoved = -1;
eAPT->sV[k].newSigma = 0;
eAPT->sV[k].newEdgesAbove = 0;
eAPT->sV[k].newEdgesBelow = 0;
}
eAPT->sV[startVertex].newEdgesAbove = 0;
eAPT->sV[parentVertex].newEdgesAbove = 0;
#pragma omp for
for (int64_t c = 0; c < *qEndSame; c++) {
int64_t k = QueueSame[c];
tree->vArr[k].delta = eAPT->sV[k].newDelta;
tree->vArr[k].edgesBelow = eAPT->sV[k].newEdgesBelow;
eAPT->sV[k].diffPath = 0;
eAPT->sV[k].touched = 0;
eAPT->sV[k].newDelta = 0.0;
eAPT->sV[k].movementDelta = 0;
eAPT->sV[k].IMoved = -1;
eAPT->sV[k].newSigma = 0;
eAPT->sV[k].newEdgesBelow = 0;
}
#pragma omp for
for (int64_t c = 0; c < *qEnd; c++) {
int64_t k = QueueUp[c];
tree->vArr[k].delta = eAPT->sV[k].newDelta;
tree->vArr[k].edgesBelow = eAPT->sV[k].newEdgesBelow;
eAPT->sV[k].diffPath = 0;
eAPT->sV[k].touched = 0;
eAPT->sV[k].newDelta = 0.0;
eAPT->sV[k].movementDelta = 0;
eAPT->sV[k].IMoved = -1;
eAPT->sV[k].newSigma = 0;
eAPT->sV[k].newEdgesBelow = 0;
}
}
eAPT->sV[startVertex].newEdgesBelow = 0;
eAPT->sV[parentVertex].newEdgesBelow = 0;
queue->size = 0;
eAPT->qStart = 0;
eAPT->qEnd = 0;
eAPT->qStart_nxt = 0;
eAPT->qEnd_nxt = 0;
eAPT->qStartSame = 0;
eAPT->qEndSame = 0;
eAPT->qStartSame_nxt = 0;
eAPT->qEndSame_nxt = 0;
}
// Case 2
void removeEdgeWithoutMovementBrandesFG(bcForest * forest, struct stinger * sStinger, int64_t currRoot,
int64_t startVertex, int64_t parentVertex, int64_t deletedPathsFromRoot,
extraArraysPerThread * eAPT, int64_t cores)
{
bcTree * tree = forest->forest[currRoot];
int64_t NV = forest->NV;
int64_t * Queue = eAPT->QueueSame;
int64_t * QueueDown = eAPT->QueueDown;
int64_t * QueueUp = eAPT->QueueUp;
int64_t * QueueDownBorders = eAPT->Stack;
eAPT->sV[startVertex].newEdgesBelow = tree->vArr[startVertex].edgesBelow;
eAPT->sV[parentVertex].newEdgesBelow = tree->vArr[parentVertex].edgesBelow;
eAPT->sV[startVertex].newEdgesAbove = tree->vArr[startVertex].edgesAbove;
eAPT->sV[parentVertex].newEdgesAbove = tree->vArr[parentVertex].edgesAbove;
eAPT->sV[startVertex].newSigma = tree->vArr[startVertex].sigma;
eAPT->sV[startVertex].touched = 1;
eAPT->sV[startVertex].newSigma -= deletedPathsFromRoot;
eAPT->sV[startVertex].diffPath = deletedPathsFromRoot;
eAPT->sV[startVertex].newEdgesAbove -= eAPT->sV[parentVertex].newEdgesAbove + 1;
eAPT->sV[parentVertex].newEdgesBelow -= eAPT->sV[startVertex].newEdgesBelow + 1;
QueueDown[0] = startVertex;
int64_t * qDownStart = &(eAPT->qStart);
int64_t * qDownEnd = &(eAPT->qEnd);
int64_t * qDownStart_nxt = &(eAPT->qStart_nxt);
int64_t * qDownEnd_nxt = &(eAPT->qEnd_nxt);
int64_t qDownBIndex = 0;
*qDownEnd = 1;
*qDownStart_nxt = 1;
*qDownEnd_nxt = 1;
int64_t deepestLevel = tree->vArr[startVertex].level;
queue_t * queue = eAPT->queue;
level_node_t * levelIndices = eAPT->levelIndices;
// Starting BFS decent from "startVertex", down to all the vertices that have shortest paths through "startVertex"
// All elements that will be touched will receive a positive value in their touched field.
// In this implementation, "STACKS" are not used for the "moving up" stage. Rather, a multi-level queue is used.
// Each level in the tree(max depth NV) has a queue and a counter specifiying how deep a specific deepth-queue is.
// For simplicity, all elements are pushed both into the multi-level queue and into the regular queue which is used
// for the BFS traversal.
while (*qDownStart != *qDownEnd) {
QueueDownBorders[qDownBIndex++] = *qDownStart;
QueueDownBorders[qDownBIndex++] = *qDownEnd;
int64_t thread_nums = cores;
if ((*qDownEnd - *qDownStart) < cores) {
thread_nums = *qDownEnd - *qDownStart;
}
#pragma omp parallel num_threads(thread_nums)
{
#pragma omp for
for (int64_t i = *qDownStart; i < *qDownEnd; i++) {
int64_t currElement = QueueDown[i];
if (currElement != startVertex) {
__atomic_fetch_add(&(eAPT->sV[currElement].newEdgesAbove), tree->vArr[currElement].edgesAbove, __ATOMIC_RELAXED);
}
STINGER_FORALL_EDGES_OF_VTX_BEGIN(sStinger, currElement) {
int64_t k = STINGER_EDGE_DEST;
if (currElement != startVertex
&& tree->vArr[currElement].level - 1 == tree->vArr[k].level
&& tree->vArr[currElement].level >= tree->vArr[startVertex].level) {
if (eAPT->sV[k].touched != 0) {
__atomic_fetch_add(&(eAPT->sV[currElement].newEdgesAbove), -tree->vArr[k].edgesAbove, __ATOMIC_RELAXED);
__atomic_fetch_add(&(eAPT->sV[currElement].newEdgesAbove), eAPT->sV[k].newEdgesAbove, __ATOMIC_RELAXED);
}
}
// if this vertex has not been added yet
if ((tree->vArr[currElement].level + 1) == (tree->vArr[k].level)) {
if (__sync_bool_compare_and_swap(&(eAPT->sV[k].touched), 0, currElement)) {
// Checking if a "deeper level" has been reached.
if (deepestLevel < tree->vArr[k].level)
deepestLevel = tree->vArr[k].level;
// insert this vertex into the BFS queue
QueueDown[__atomic_fetch_add(qDownEnd_nxt, 1, __ATOMIC_RELAXED)] = k;
__atomic_fetch_add(&(eAPT->sV[k].newSigma), tree->vArr[k].sigma, __ATOMIC_RELAXED);
// indicate that it is in the next level of the BFS
// add new paths to root that go through current BFS Vertex
__atomic_fetch_add(&(eAPT->sV[k].newSigma), -eAPT->sV[currElement].diffPath, __ATOMIC_RELAXED);
// pass on my new paths to root for its search
__atomic_fetch_add(&(eAPT->sV[k].diffPath), eAPT->sV[currElement].diffPath, __ATOMIC_RELAXED);
}
// otherwise if it has been touched, but is specifically in the next level
// of the search (meaning it has more than one edge to the current level)
else if (eAPT->sV[k].touched != currElement) {
// add new paths to root that go through current BFS Vertex
__atomic_fetch_add(&(eAPT->sV[k].newSigma), -eAPT->sV[currElement].diffPath, __ATOMIC_RELAXED);
// pass on my new paths to root for its search
__atomic_fetch_add(&(eAPT->sV[k].diffPath), eAPT->sV[currElement].diffPath, __ATOMIC_RELAXED);
}
}
}
STINGER_FORALL_EDGES_OF_VTX_END();
#if COUNT_TRAVERSALS==1
eAPT->dynamicTraverseEdgeCounter += stinger_typed_outdegree(sStinger, currElement, 0);
eAPT->dynamicTraverseVerticeCounter++;
#endif
}
}
*qDownStart = *qDownStart_nxt;
*qDownEnd = *qDownEnd_nxt;
*qDownStart_nxt = *qDownEnd;
*qDownEnd_nxt = *qDownStart_nxt;
}
// The parent vertex needs to be placed in the queue for the dependency accumulation stage.
// Also, it no longer has a child and so the delta from the child needs to be removed.
int64_t qUpStart = 0, qUpEnd = 0;
(*qDownEnd)--;
int64_t qDownEndMarker = *qDownEnd;
// Starting Multi-Level "BFS" ascent.
// The ascent continues going up as long as the root has not been reached and that there
// are elements in the current level of the ascent. The ascent starts in the deepest level
// of the graph.
// It was worth noting that in the ascent stage:
// 1) All previously untouched elements that are touched are marked with "-1".
// 2) On the way up, it is possible that elements that were touched in the BFS decent, will
// touch elements that were not touchded in the decsent and that are below "vertex". These
// are elements that do not have shortest paths going through "vertex" ,however, there BC
// values have changed due to the changes occuring below them. Because of this, they are
// placed in the Multi-level queue.
*qDownStart = 0;
*qDownEnd = 0;
*qDownStart_nxt = 0;
*qDownEnd_nxt = 0;
while (!(qDownBIndex <= 0 && *qDownStart >= *qDownEnd && *qDownStart_nxt >= *qDownEnd_nxt)) {
if (qDownBIndex >= 2) {
int64_t thread_nums = cores;
if ((QueueDownBorders[qDownBIndex - 1] - QueueDownBorders[qDownBIndex - 2]) < cores) {
thread_nums = QueueDownBorders[qDownBIndex - 1] - QueueDownBorders[qDownBIndex - 2];
}
#pragma omp parallel num_threads(thread_nums)
{
#pragma omp for
for (int64_t i = QueueDownBorders[qDownBIndex - 2]; i < QueueDownBorders[qDownBIndex - 1]; i++) {
int64_t currElement = QueueDown[i];
if (currElement != parentVertex && currElement != startVertex) {
__atomic_fetch_add(&(eAPT->sV[currElement].newEdgesBelow), tree->vArr[currElement].edgesBelow, __ATOMIC_RELAXED);
}
STINGER_FORALL_EDGES_OF_VTX_BEGIN(sStinger, currElement) {
int64_t k = STINGER_EDGE_DEST;
if (currElement != parentVertex
&& tree->vArr[currElement].level <= tree->vArr[parentVertex].level
&& tree->vArr[k].level > tree->vArr[currElement].level) {
if (eAPT->sV[k].touched != 0) {
__atomic_fetch_add(&(eAPT->sV[k].newEdgesBelow), -tree->vArr[k].edgesBelow, __ATOMIC_RELAXED);
__atomic_fetch_add(&(eAPT->sV[k].newEdgesBelow), eAPT->sV[k].newEdgesBelow, __ATOMIC_RELAXED);
}
}
if (tree->vArr[k].level == tree->vArr[parentVertex].level && __sync_bool_compare_and_swap(&(eAPT->sV[parentVertex].touched), 0, -1)) {
QueueUp[__atomic_fetch_add(qDownEnd_nxt, 1, __ATOMIC_RELAXED)] = parentVertex;
__atomic_fetch_add(&(eAPT->sV[parentVertex].newSigma), tree->vArr[parentVertex].sigma, __ATOMIC_RELAXED);
eAPT->sV[parentVertex].newDelta = tree->vArr[parentVertex].delta -
((bc_t)tree->vArr[parentVertex].sigma / (bc_t)tree->vArr[startVertex].sigma) *
(bc_t)(tree->vArr[startVertex].delta + 1);
}
// Checking that the vertices are in different levels.
if (tree->vArr[k].level == (tree->vArr[currElement].level - 1)) {
// Checking to see if "k" has been touched before.
if (__sync_bool_compare_and_swap(&(eAPT->sV[k].touched), 0, -1)) {
eAPT->sV[k].newDelta = tree->vArr[k].delta;
// Marking element as touched in the ascent stage.
__atomic_fetch_add(&(eAPT->sV[k].newSigma), tree->vArr[k].sigma, __ATOMIC_RELAXED);
QueueUp[__atomic_fetch_add(qDownEnd_nxt, 1, __ATOMIC_RELAXED)] = k;
}
}
if (tree->vArr[k].level == tree->vArr[currElement].level + 1 && eAPT->sV[k].touched != 0) {
eAPT->sV[currElement].newDelta +=
((bc_t)eAPT->sV[currElement].newSigma / (bc_t)eAPT->sV[k].newSigma) *
(bc_t)(eAPT->sV[k].newDelta + 1);
// For the elements that are touched in the ascent stage it is necessary to
// to reduce the values that they previously had.
// In addition to this, the "parentVertex" that is connected to "vertex", i.e.
// the vertices of the new edge, needs to increase its betweenness centrality
// following the new connection, without removing the old delta value.
if (eAPT->sV[currElement].touched < 0 && ( currElement != parentVertex || k != startVertex)) {
eAPT->sV[currElement].newDelta -=
((bc_t)tree->vArr[currElement].sigma / (bc_t)tree->vArr[k].sigma) *
(bc_t)(tree->vArr[k].delta + 1);
}
}
}
STINGER_FORALL_EDGES_OF_VTX_END();
#if COUNT_TRAVERSALS==1
eAPT->dynamicTraverseEdgeCounter += stinger_typed_outdegree(sStinger, currElement, 0);
eAPT->dynamicTraverseVerticeCounter++;
#endif
if (currElement != currRoot) {
eAPT->sV[currElement].totalBC += eAPT->sV[currElement].newDelta - tree->vArr[currElement].delta;
}
}
}
}
qDownBIndex -= 2;
*qDownStart = *qDownStart_nxt;
*qDownEnd = *qDownEnd_nxt;
*qDownStart_nxt = *qDownEnd;
*qDownEnd_nxt = *qDownStart_nxt;
int64_t thread_nums = cores;
if (*qDownEnd - *qDownStart < cores) {
thread_nums = *qDownEnd - *qDownStart;
}
#pragma omp parallel num_threads(thread_nums)
{
#pragma omp for
for (int64_t i = *qDownStart; i < *qDownEnd; i++) {
int64_t currElement = QueueUp[i];
if (currElement != parentVertex && currElement != startVertex) {
__atomic_fetch_add(&(eAPT->sV[currElement].newEdgesBelow), tree->vArr[currElement].edgesBelow, __ATOMIC_RELAXED);
}
STINGER_FORALL_EDGES_OF_VTX_BEGIN(sStinger, currElement) {
int64_t k = STINGER_EDGE_DEST;
if (currElement != parentVertex
&& tree->vArr[currElement].level <= tree->vArr[parentVertex].level
&& tree->vArr[k].level > tree->vArr[currElement].level) {
if (eAPT->sV[k].touched != 0) {
__atomic_fetch_add(&(eAPT->sV[currElement].newEdgesBelow), -tree->vArr[k].edgesBelow, __ATOMIC_RELAXED);
__atomic_fetch_add(&(eAPT->sV[currElement].newEdgesBelow), eAPT->sV[k].newEdgesBelow, __ATOMIC_RELAXED);
}
}
if (tree->vArr[k].level == tree->vArr[parentVertex].level && __sync_bool_compare_and_swap(&(eAPT->sV[parentVertex].touched), 0, -1)) {
QueueUp[__atomic_fetch_add(qDownEnd_nxt, 1, __ATOMIC_RELAXED)] = parentVertex;
__atomic_fetch_add(&(eAPT->sV[parentVertex].newSigma), tree->vArr[parentVertex].sigma, __ATOMIC_RELAXED);
eAPT->sV[parentVertex].newDelta = tree->vArr[parentVertex].delta -
((bc_t)tree->vArr[parentVertex].sigma / (bc_t)tree->vArr[startVertex].sigma) *
(bc_t)(tree->vArr[startVertex].delta + 1);
}
// Checking that the vertices are in different levels.
if (tree->vArr[k].level == (tree->vArr[currElement].level - 1)) {
// Checking to see if "k" has been touched before.
if (__sync_bool_compare_and_swap(&(eAPT->sV[k].touched), 0, -1)) {
eAPT->sV[k].newDelta = tree->vArr[k].delta;
// Marking element as touched in the ascent stage.
__atomic_fetch_add(&(eAPT->sV[k].newSigma), tree->vArr[k].sigma, __ATOMIC_RELAXED);
QueueUp[__atomic_fetch_add(qDownEnd_nxt, 1, __ATOMIC_RELAXED)] = k;
}
}
if (tree->vArr[k].level == tree->vArr[currElement].level + 1 && eAPT->sV[k].touched != 0) {
eAPT->sV[currElement].newDelta +=
((bc_t)eAPT->sV[currElement].newSigma / (bc_t)eAPT->sV[k].newSigma) *
(bc_t)(eAPT->sV[k].newDelta + 1);
// For the elements that are touched in the ascent stage it is necessary to
// to reduce the values that they previously had.
// In addition to this, the "parentVertex" that is connected to "vertex", i.e.
// the vertices of the new edge, needs to increase its betweenness centrality
// following the new connection, without removing the old delta value.
if (eAPT->sV[currElement].touched < 0 && ( currElement != parentVertex || k != startVertex)) {
eAPT->sV[currElement].newDelta -=
((bc_t)tree->vArr[currElement].sigma / (bc_t)tree->vArr[k].sigma) *
(bc_t)(tree->vArr[k].delta + 1);
}
}
}
STINGER_FORALL_EDGES_OF_VTX_END();
#if COUNT_TRAVERSALS==1
eAPT->dynamicTraverseEdgeCounter += stinger_typed_outdegree(sStinger, currElement, 0);
eAPT->dynamicTraverseVerticeCounter++;
#endif
if (currElement != currRoot) {
eAPT->sV[currElement].totalBC += eAPT->sV[currElement].newDelta - tree->vArr[currElement].delta;
}
}
}
*qDownStart = *qDownEnd;
}
for (int64_t q = 0; q <= qDownEndMarker; q++) {
int64_t k = QueueDown[q];
if (eAPT->sV[k].touched != 0) {
tree->vArr[k].delta = eAPT->sV[k].newDelta;
tree->vArr[k].sigma = eAPT->sV[k].newSigma;
}
tree->vArr[k].edgesAbove = eAPT->sV[k].newEdgesAbove;
eAPT->sV[k].newEdgesAbove = 0;
eAPT->sV[k].newEdgesBelow = 0;
eAPT->sV[k].diffPath = 0;
eAPT->sV[k].touched = 0;
eAPT->sV[k].newDelta = 0.0;
eAPT->sV[k].newSigma = 0;
}
eAPT->sV[startVertex].newEdgesAbove = 0;
eAPT->sV[parentVertex].newEdgesAbove = 0;
for (int64_t q = 0; q < *qDownEnd; q++) {
int64_t k = QueueUp[q];
if (eAPT->sV[k].touched != 0) {
tree->vArr[k].delta = eAPT->sV[k].newDelta;
tree->vArr[k].sigma = eAPT->sV[k].newSigma;
}
tree->vArr[k].edgesBelow = eAPT->sV[k].newEdgesBelow;
eAPT->sV[k].newEdgesBelow = 0;
eAPT->sV[k].newEdgesAbove = 0;
eAPT->sV[k].diffPath = 0;
eAPT->sV[k].touched = 0;
eAPT->sV[k].newDelta = 0.0;
eAPT->sV[k].newSigma = 0;
}
eAPT->sV[startVertex].newEdgesBelow = 0;
eAPT->sV[parentVertex].newEdgesBelow = 0;
queue->size = 0;
eAPT->qStart = 0;
eAPT->qEnd = 0;
eAPT->qStart_nxt = 0;
eAPT->qEnd_nxt = 0;
}
void moveDownTreeBrandesFG(bcForest * forest, struct stinger * sStinger, int64_t currRoot,
int64_t startVertex, int64_t parentVertex, extraArraysPerThread * eAPT, int64_t cores)
{
bcTree * tree = forest->forest[currRoot];
int64_t NV = forest->NV;
int64_t * Queue = eAPT->QueueDown;
int64_t * QueueUp = eAPT->QueueUp;
int64_t * topQueue = eAPT->QueueSame;
int64_t * QueueDownBorders = eAPT->Stack;
int64_t * tqBorders = eAPT->tqBorders;
int64_t * touchedVerticesDown = eAPT->touchedVerticesDown;
int64_t * touchedVerticesUp = eAPT->touchedVerticesUp;
queue_t * queue = eAPT->queue;
level_node_t * levelIndices = eAPT->levelIndices;
Queue[0] = startVertex;
int64_t tvDownEnd = 0, tvUpEnd = 0;
int64_t stopLevel = tree->vArr[startVertex].level;
int64_t * qStart = &(eAPT->qStart);
int64_t * qEnd = &(eAPT->qEnd);
int64_t * qStart_nxt = &(eAPT->qStart_nxt);
int64_t * qEnd_nxt = &(eAPT->qEnd_nxt);
int64_t * tqStart = &(eAPT->tqStart);
int64_t * tqEnd = &(eAPT->tqEnd);
int64_t * tqStart_nxt = &(eAPT->tqStart_nxt);
int64_t * tqEnd_nxt = &(eAPT->tqEnd_nxt);
int64_t qDownBIndex = 0, tqBIndex = 0;
*qEnd = 1;
*qStart_nxt = 1;
*qEnd_nxt = 1;
*tqStart = 0;
*tqEnd = 0;
*tqStart_nxt = 0;
*tqEnd_nxt = 0;
eAPT->sV[startVertex].newLevel = INFINITY_MY;
eAPT->sV[startVertex].newSigma = INFINITY_MY;
eAPT->sV[startVertex].newDelta = 0.0;
eAPT->sV[startVertex].newEdgesAbove = INFINITY_MY;
eAPT->sV[startVertex].touched = 1;
touchedVerticesDown[tvDownEnd++] = startVertex;
int64_t deepestLevel = stopLevel;
*qStart = 0;
while (*qStart != *qEnd) {
int64_t thread_nums = cores;
if (*qEnd - *qStart < cores) {
thread_nums = *qEnd - *qStart;
}
#pragma omp parallel num_threads(thread_nums)
{
#pragma omp for
for (int64_t i = *qStart; i < *qEnd; i++) {
int64_t currElement = Queue[i];
STINGER_FORALL_EDGES_OF_VTX_BEGIN(sStinger, currElement) {
int64_t k = STINGER_EDGE_DEST;
if (tree->vArr[k].level == tree->vArr[currElement].level + 1 && __sync_bool_compare_and_swap(&(eAPT->sV[k].touched), 0, -1)) {
__atomic_fetch_add(&(eAPT->sV[k].newEdgesAbove), INFINITY_MY, __ATOMIC_RELAXED);
__atomic_fetch_add(&(eAPT->sV[k].newLevel), INFINITY_MY, __ATOMIC_RELAXED);
__atomic_fetch_add(&(eAPT->sV[k].newSigma), INFINITY_MY, __ATOMIC_RELAXED);
touchedVerticesDown[__atomic_fetch_add(&tvDownEnd, 1, __ATOMIC_RELAXED)] = k;
Queue[__atomic_fetch_add(qEnd_nxt, 1, __ATOMIC_RELAXED)] = k;
eAPT->sV[k].newDelta = 0.0;
}
}
STINGER_FORALL_EDGES_OF_VTX_END();
int64_t parentOutsideSubtree = 0;
int64_t siblingOutsideSubtree = 0;
int64_t parentPathsToRoot = 0;
int64_t siblingPathsToRoot = 0;
int64_t parentEdgesAbove = 0;
int64_t siblingEdgesAbove = 0;
STINGER_FORALL_EDGES_OF_VTX_BEGIN(sStinger, currElement) {
int64_t l = STINGER_EDGE_DEST;
if (tree->vArr[l].level == tree->vArr[currElement].level - 1) {
if (eAPT->sV[l].touched == 0) {
parentOutsideSubtree = l;
parentPathsToRoot += tree->vArr[l].sigma;
} else {
parentPathsToRoot += tree->vArr[l].sigma - 1;
}
parentEdgesAbove += tree->vArr[l].edgesAbove + 1;
} else if (tree->vArr[l].level == tree->vArr[currElement].level) {
if (eAPT->sV[l].touched == 0) {
siblingOutsideSubtree = l;
siblingPathsToRoot += tree->vArr[l].sigma;
} else {
siblingPathsToRoot += tree->vArr[l].sigma - 1;
}
siblingEdgesAbove += tree->vArr[l].edgesAbove + 1;
}
}
STINGER_FORALL_EDGES_OF_VTX_END();
if (parentOutsideSubtree) {
if (eAPT->sV[currElement].touched == 1 || eAPT->sV[currElement].touched == SIBLING_ANCHORED) {
topQueue[__atomic_fetch_add(tqEnd_nxt, 1, __ATOMIC_RELAXED)] = currElement;
eAPT->sV[currElement].newLevel = tree->vArr[parentOutsideSubtree].level + 1;
}
eAPT->sV[currElement].touched = PARENT_ANCHORED;
eAPT->sV[currElement].newDelta = 0.0;
} else if (siblingOutsideSubtree) {
if (eAPT->sV[currElement].touched == 1) {
topQueue[__atomic_fetch_add(tqEnd_nxt, 1, __ATOMIC_RELAXED)] = currElement;
eAPT->sV[currElement].newLevel = tree->vArr[siblingOutsideSubtree].level + 1;
}
if (eAPT->sV[currElement].touched != PARENT_ANCHORED) {
eAPT->sV[currElement].touched = SIBLING_ANCHORED;
}
eAPT->sV[currElement].newDelta = 0.0;
}
STINGER_FORALL_EDGES_OF_VTX_BEGIN(sStinger, currElement) {
int64_t k = STINGER_EDGE_DEST;
if (tree->vArr[k].level == tree->vArr[currElement].level + 1 && (__sync_bool_compare_and_swap(&(eAPT->sV[k].touched), -1, -2)
|| __sync_bool_compare_and_swap(&(eAPT->sV[k].touched), 1, -2))) {
if (eAPT->sV[currElement].touched == PARENT_ANCHORED && (__sync_bool_compare_and_swap(&(eAPT->sV[k].touched), -2, PARENT_ANCHORED)
|| __sync_bool_compare_and_swap(&(eAPT->sV[k].touched), SIBLING_ANCHORED, PARENT_ANCHORED))) {}
else if (eAPT->sV[currElement].touched == SIBLING_ANCHORED && __sync_bool_compare_and_swap(&(eAPT->sV[k].touched), -2, SIBLING_ANCHORED)) {}
else {
__atomic_fetch_add(&(eAPT->sV[k].touched), 3, __ATOMIC_RELAXED);
}
}
}
STINGER_FORALL_EDGES_OF_VTX_END();
}
}
*qStart = *qStart_nxt;
*qEnd = *qEnd_nxt;
*qStart_nxt = *qEnd;
*qEnd_nxt = *qStart_nxt;
*tqStart = *tqStart_nxt;
*tqEnd = *tqEnd_nxt;
*tqStart_nxt = *tqEnd;
*tqEnd_nxt = *tqStart_nxt;
}
*qEnd = 1;
*qStart_nxt = 1;
*qEnd_nxt = 1;
*qStart = 0;
*tqStart = 0;
int64_t key, j;
for (int64_t i = 1; i < *tqEnd; i++) {
key = topQueue[i];
j = i - 1;
while (j >= 0 && eAPT->sV[topQueue[j]].newLevel > eAPT->sV[key].newLevel) {
topQueue[j + 1] = topQueue[j];
j = j - 1;
}
topQueue[j + 1] = key;
}
int64_t lo = 0;
int64_t hi = 0;
while (lo < *tqEnd && hi < *tqEnd) {
while (lo < *tqEnd && hi < *tqEnd && eAPT->sV[topQueue[lo]].newLevel == eAPT->sV[topQueue[hi]].newLevel) {
hi++;
}
tqBorders[tqBIndex++] = lo;
tqBorders[tqBIndex++] = hi;
lo = hi;
}
// While queue is not empty
int64_t tqLevel = 0;
if (*tqEnd != 0) {
appendDS(queue, levelIndices, eAPT->sV[topQueue[*tqStart]].newLevel, topQueue[*tqStart]);
Queue[0] = topQueue[(*tqStart)++];
tqBorders[0]++;
if (tqBorders[0] == tqBorders[1])
tqLevel += 2;
eAPT->sV[Queue[0]].touched = 5;
} else {
Queue[0] = startVertex;
eAPT->sV[Queue[0]].touched = 5;
*qStart = 0;
*qEnd = 1;
}
while (*qStart != *qEnd) {
if (tqLevel < tqBIndex && tqBorders[tqLevel] < *tqEnd && eAPT->sV[topQueue[tqBorders[tqLevel]]].newLevel <= eAPT->sV[Queue[*qStart]].newLevel) {
int64_t thread_nums = cores;
if (tqBorders[tqLevel + 1] - tqBorders[tqLevel] < cores) {
thread_nums = tqBorders[tqLevel + 1] - tqBorders[tqLevel];
}
#pragma omp parallel num_threads(thread_nums)
{
#pragma omp for
for (int64_t i = tqBorders[tqLevel]; i < tqBorders[tqLevel + 1]; i++) {
if (__sync_bool_compare_and_swap(&(eAPT->sV[topQueue[i]].touched), 1, 5) ||
__sync_bool_compare_and_swap(&(eAPT->sV[topQueue[i]].touched), PARENT_ANCHORED, 5) ||
__sync_bool_compare_and_swap(&(eAPT->sV[topQueue[i]].touched), SIBLING_ANCHORED, 5)) {
Queue[__atomic_fetch_add(qEnd, 1, __ATOMIC_RELAXED)] = topQueue[i];
appendDS(queue, levelIndices, eAPT->sV[topQueue[i]].newLevel, topQueue[i]);
}
}
}
tqLevel += 2;
}
*qStart_nxt = *qEnd;
*qEnd_nxt = *qStart_nxt;
QueueDownBorders[qDownBIndex++] = *qStart;
QueueDownBorders[qDownBIndex++] = *qEnd;
int64_t thread_nums = cores;
if (*qEnd - *qStart < cores) {
thread_nums = *qEnd - *qStart;
}
#pragma omp parallel num_threads(thread_nums)
{
#pragma omp for
for (int64_t i = *qStart; i < *qEnd; i++) {
int64_t currElement = Queue[i];
eAPT->sV[currElement].newEdgesAbove = 0;
if (deepestLevel < eAPT->sV[currElement].newLevel) {
deepestLevel = eAPT->sV[currElement].newLevel;
}
STINGER_FORALL_EDGES_OF_VTX_BEGIN(sStinger, currElement) {
int64_t k = STINGER_EDGE_DEST;
if (eAPT->sV[k].newLevel > eAPT->sV[currElement].newLevel) {
// Checking if "k" has been found.
__sync_bool_compare_and_swap(&(eAPT->sV[k].newLevel), INFINITY_MY, eAPT->sV[currElement].newLevel + 1);
if (__sync_bool_compare_and_swap(&(eAPT->sV[k].touched), 1, 5) ||
__sync_bool_compare_and_swap(&(eAPT->sV[k].touched), PARENT_ANCHORED, 5) ||
__sync_bool_compare_and_swap(&(eAPT->sV[k].touched), SIBLING_ANCHORED, 5)) {
Queue[__atomic_fetch_add(qEnd_nxt, 1, __ATOMIC_RELAXED)] = k;
eAPT->sV[k].newDelta = 0.0;
if (deepestLevel < eAPT->sV[k].newLevel)
deepestLevel = eAPT->sV[k].newLevel;
appendDS(queue, levelIndices, eAPT->sV[k].newLevel, k);
}
}
if (eAPT->sV[currElement].newLevel == tree->vArr[k].level + 1 && eAPT->sV[k].touched == 0) {
if (__sync_bool_compare_and_swap(&(eAPT->sV[currElement].newSigma), INFINITY_MY, tree->vArr[k].sigma)) {}
else {
__atomic_fetch_add(&(eAPT->sV[currElement].newSigma), tree->vArr[k].sigma, __ATOMIC_RELAXED);
}
if (__sync_bool_compare_and_swap(&(eAPT->sV[currElement].newEdgesAbove), INFINITY_MY, tree->vArr[k].edgesAbove + 1)) {}
else {
__atomic_fetch_add(&(eAPT->sV[currElement].newEdgesAbove), tree->vArr[k].edgesAbove + 1, __ATOMIC_RELAXED);
}
} else if (eAPT->sV[currElement].newLevel == eAPT->sV[k].newLevel + 1 && eAPT->sV[k].touched != 0) {
if (__sync_bool_compare_and_swap(&(eAPT->sV[currElement].newSigma), INFINITY_MY, eAPT->sV[k].newSigma)) {
} else {
__atomic_fetch_add(&(eAPT->sV[currElement].newSigma), eAPT->sV[k].newSigma, __ATOMIC_RELAXED);
}
if (__sync_bool_compare_and_swap(&(eAPT->sV[currElement].newEdgesAbove), INFINITY_MY, eAPT->sV[k].newEdgesAbove)) {
} else {
__atomic_fetch_add(&(eAPT->sV[currElement].newEdgesAbove), eAPT->sV[k].newEdgesAbove + 1, __ATOMIC_RELAXED);
}
}
}
STINGER_FORALL_EDGES_OF_VTX_END();
#if COUNT_TRAVERSALS==1
eAPT->dynamicTraverseEdgeCounter += stinger_typed_outdegree(sStinger, currElement, 0);
eAPT->dynamicTraverseVerticeCounter++;
#endif
}
}
*qStart = *qStart_nxt;
*qEnd = *qEnd_nxt;
*qStart_nxt = *qEnd;
*qEnd_nxt = *qStart_nxt;
}
*qEnd = 0;
// If it is not a case 4
if (deepestLevel != INFINITY_MY) {
for (int64_t lev = stopLevel; lev <= deepestLevel; lev++) {
int64_t index = levelIndices[lev].front;
int64_t levelEmpty = 1;
while (index != -1) {
levelEmpty = 0;
queue_node_t * temp_node = queue->nodes + index;
Queue[(*qEnd)++] = temp_node->data;
index = temp_node->next;
}
levelIndices[lev].front = -1;
levelIndices[lev].back = -1;
}
queue->size = 0;
}
int64_t * qUpStart = &(eAPT->qStartSame);
int64_t * qUpEnd = &(eAPT->qEndSame);
int64_t * qUpStart_nxt = &(eAPT->qStartSame_nxt);
int64_t * qUpEnd_nxt = &(eAPT->qEndSame_nxt);
*qUpStart = 0;
*qUpEnd = 0;
*qUpStart_nxt = 0;
*qUpEnd_nxt = 0;
(*qEnd)--;
int case4 = 0;
if (eAPT->sV[startVertex].newLevel == INFINITY_MY) {
if (eAPT->sV[parentVertex].touched == 0) {
eAPT->sV[parentVertex].newDelta = tree->vArr[parentVertex].delta -
((bc_t)tree->vArr[parentVertex].sigma / (bc_t)tree->vArr[startVertex].sigma) *
(bc_t)(tree->vArr[startVertex].delta + 1);
eAPT->sV[parentVertex].newSigma = tree->vArr[parentVertex].sigma;
eAPT->sV[parentVertex].touched = -1;
QueueUp[(*qUpEnd_nxt)++] = parentVertex;
case4 = 1;
}
}
// Starting Multi-Level "BFS" ascent.
// The ascent continues going up as long as the root has not been reached and that there
// are elements in the current level of the ascent. The ascent starts in the deepest level
// of the graph.
// It was worth noting that in the ascent stage:
// 1) All previously untouched elements that are touched are marked with "-1".
// 2) On the way up, it is possible that elements that were touched in the BFS decent, will
// touch elements that were not touchded in the decsent and that are below "vertex". These
// are elements that do not have shortest paths going through "vertex" ,however, there BC
// values have changed due to the changes occuring below them. Because of this, they are
// placed in the Multi-level queue.
while (!(qDownBIndex <= 0 && *qUpStart >= *qUpEnd && *qUpStart_nxt >= *qUpEnd_nxt)) {
if (qDownBIndex >= 2 && !case4) {
int64_t thread_nums = cores;
if (QueueDownBorders[qDownBIndex - 1] - QueueDownBorders[qDownBIndex - 2] < cores) {
thread_nums = QueueDownBorders[qDownBIndex - 1] - QueueDownBorders[qDownBIndex - 2];
}
#pragma omp parallel num_threads(thread_nums)
{
#pragma omp for
for (int64_t i = QueueDownBorders[qDownBIndex - 2]; i < QueueDownBorders[qDownBIndex - 1]; i++) {
int64_t currElement = Queue[i];
eAPT->sV[currElement].newEdgesBelow = 0;
touchedVerticesUp[__atomic_fetch_add(&tvUpEnd, 1, __ATOMIC_RELAXED)] = currElement;
int64_t currElementLevel = eAPT->sV[currElement].newLevel;
__sync_bool_compare_and_swap(&currElementLevel, 0, tree->vArr[currElement].level);
int64_t parentVertexLevel = eAPT->sV[parentVertex].newLevel;
__sync_bool_compare_and_swap(&parentVertexLevel, 0, tree->vArr[parentVertex].level);
STINGER_FORALL_EDGES_OF_VTX_BEGIN(sStinger, currElement) {
int64_t k = STINGER_EDGE_DEST;
int64_t kLevel = eAPT->sV[k].newLevel;
__sync_bool_compare_and_swap(&kLevel, 0, tree->vArr[k].level);
if (kLevel == currElementLevel + 1) {
if (eAPT->sV[k].touched != 0) {
__atomic_fetch_add(&(eAPT->sV[currElement].newEdgesBelow), eAPT->sV[k].newEdgesBelow + 1, __ATOMIC_RELAXED);
} else if (eAPT->sV[k].touched == 0) {
__atomic_fetch_add(&(eAPT->sV[currElement].newEdgesBelow), tree->vArr[k].edgesBelow + 1, __ATOMIC_RELAXED);
}
}
if (kLevel == parentVertexLevel) {
if (__sync_bool_compare_and_swap(&(eAPT->sV[parentVertex].touched), 0, -1)) {
eAPT->sV[parentVertex].newDelta = tree->vArr[parentVertex].delta -
((bc_t)tree->vArr[parentVertex].sigma / (bc_t)tree->vArr[startVertex].sigma) *
(bc_t)(tree->vArr[startVertex].delta + 1);
__atomic_fetch_add(&(eAPT->sV[parentVertex].newSigma), tree->vArr[parentVertex].sigma, __ATOMIC_RELAXED);
QueueUp[__atomic_fetch_add(qUpEnd_nxt, 1, __ATOMIC_RELAXED)] = parentVertex;
}
}
// Checking that the vertices are in different levels.
if (kLevel == currElementLevel - 1) {
// Checking to see if "k" has been touched before.
if (__sync_bool_compare_and_swap(&(eAPT->sV[k].touched), 0, -1)) {
eAPT->sV[k].newDelta = tree->vArr[k].delta;
// Marking element as touched in the ascent stage.
__atomic_fetch_add(&(eAPT->sV[k].newSigma), tree->vArr[k].sigma, __ATOMIC_RELAXED);
QueueUp[__atomic_fetch_add(qUpEnd_nxt, 1, __ATOMIC_RELAXED)] = k;
}
}
if (kLevel == currElementLevel + 1 && eAPT->sV[k].touched != 0) {
eAPT->sV[currElement].newDelta +=
((bc_t)eAPT->sV[currElement].newSigma / (bc_t)eAPT->sV[k].newSigma) *
(bc_t)(eAPT->sV[k].newDelta + 1);
// For the elements that are touched in the ascent stage it is necessary to
// to reduce the values that they previously had.
// In addition to this, the "parentVertex" that is connected to "vertex", i.e.
// the vertices of the new edge, needs to increase its betweenness centrality
// following the new connection, without removing the old delta value.
if (eAPT->sV[currElement].touched < 0 && tree->vArr[currElement].level < tree->vArr[k].level) {
eAPT->sV[currElement].newDelta -=
((bc_t)tree->vArr[currElement].sigma / (bc_t)tree->vArr[k].sigma) *
(bc_t)(tree->vArr[k].delta + 1);
}
}
}
STINGER_FORALL_EDGES_OF_VTX_END();
#if COUNT_TRAVERSALS==1
eAPT->dynamicTraverseEdgeCounter += stinger_typed_outdegree(sStinger, currElement, 0);
eAPT->dynamicTraverseVerticeCounter++;
#endif
if (currElement != currRoot) {
eAPT->sV[currElement].totalBC += eAPT->sV[currElement].newDelta - tree->vArr[currElement].delta;
}
}
}
}
qDownBIndex -= 2;
*qUpStart = *qUpStart_nxt;
*qUpEnd = *qUpEnd_nxt;
*qUpStart_nxt = *qUpEnd;
*qUpEnd_nxt = *qUpStart_nxt;
int64_t thread_nums = cores;
if (*qUpEnd - *qUpStart < cores) {
thread_nums = *qUpEnd - *qUpStart;
}
#pragma omp parallel num_threads(thread_nums)
{
#pragma omp for
for (int64_t i = *qUpStart; i < *qUpEnd; i++) {
int64_t currElement = QueueUp[i];
eAPT->sV[currElement].newEdgesBelow = 0;
touchedVerticesUp[__atomic_fetch_add(&tvUpEnd, 1, __ATOMIC_RELAXED)] = currElement;
int64_t currElementLevel = eAPT->sV[currElement].newLevel;
__sync_bool_compare_and_swap(&currElementLevel, 0, tree->vArr[currElement].level);
int64_t parentVertexLevel = eAPT->sV[parentVertex].newLevel;
__sync_bool_compare_and_swap(&parentVertexLevel, 0, tree->vArr[parentVertex].level);
STINGER_FORALL_EDGES_OF_VTX_BEGIN(sStinger, currElement) {
int64_t k = STINGER_EDGE_DEST;
int64_t kLevel = eAPT->sV[k].newLevel;
__sync_bool_compare_and_swap(&kLevel, 0, tree->vArr[k].level);
if (kLevel == currElementLevel + 1) {
if (eAPT->sV[k].touched != 0) {
__atomic_fetch_add(&(eAPT->sV[currElement].newEdgesBelow), eAPT->sV[k].newEdgesBelow + 1, __ATOMIC_RELAXED);
} else if (eAPT->sV[k].touched == 0) {
__atomic_fetch_add(&(eAPT->sV[currElement].newEdgesBelow), tree->vArr[k].edgesBelow + 1, __ATOMIC_RELAXED);
}
}
if (kLevel == parentVertexLevel) {
if (__sync_bool_compare_and_swap(&(eAPT->sV[parentVertex].touched), 0, -1)) {
eAPT->sV[parentVertex].newDelta = tree->vArr[parentVertex].delta -
((bc_t)tree->vArr[parentVertex].sigma / (bc_t)tree->vArr[startVertex].sigma) *
(bc_t)(tree->vArr[startVertex].delta + 1);
__atomic_fetch_add(&(eAPT->sV[parentVertex].newSigma), tree->vArr[parentVertex].sigma, __ATOMIC_RELAXED);
QueueUp[__atomic_fetch_add(qUpEnd_nxt, 1, __ATOMIC_RELAXED)] = parentVertex;
}
}
// Checking that the vertices are in different levels.
if (kLevel == currElementLevel - 1) {
// Checking to see if "k" has been touched before.
if (__sync_bool_compare_and_swap(&(eAPT->sV[k].touched), 0, -1)) {
eAPT->sV[k].newDelta = tree->vArr[k].delta;
// Marking element as touched in the ascent stage.
__atomic_fetch_add(&(eAPT->sV[k].newSigma), tree->vArr[k].sigma, __ATOMIC_RELAXED);
QueueUp[__atomic_fetch_add(qUpEnd_nxt, 1, __ATOMIC_RELAXED)] = k;
}
}
if (kLevel == currElementLevel + 1 && eAPT->sV[k].touched != 0) {
eAPT->sV[currElement].newDelta +=
((bc_t)eAPT->sV[currElement].newSigma / (bc_t)eAPT->sV[k].newSigma) *
(bc_t)(eAPT->sV[k].newDelta + 1);
// For the elements that are touched in the ascent stage it is necessary to
// to reduce the values that they previously had.
// In addition to this, the "parentVertex" that is connected to "vertex", i.e.
// the vertices of the new edge, needs to increase its betweenness centrality
// following the new connection, without removing the old delta value.
if (eAPT->sV[currElement].touched < 0 && tree->vArr[currElement].level < tree->vArr[k].level) {
eAPT->sV[currElement].newDelta -=
((bc_t)tree->vArr[currElement].sigma / (bc_t)tree->vArr[k].sigma) *
(bc_t)(tree->vArr[k].delta + 1);
}
}
}
STINGER_FORALL_EDGES_OF_VTX_END();
#if COUNT_TRAVERSALS==1
eAPT->dynamicTraverseEdgeCounter += stinger_typed_outdegree(sStinger, currElement, 0);
eAPT->dynamicTraverseVerticeCounter++;
#endif
if (currElement != currRoot) {
eAPT->sV[currElement].totalBC += eAPT->sV[currElement].newDelta - tree->vArr[currElement].delta;
}
}
}
*qUpStart = *qUpEnd;
}
for (int64_t k = 0; k < tvDownEnd; k++) {
int64_t vertex = touchedVerticesDown[k];
tree->vArr[vertex].level = eAPT->sV[vertex].newLevel;
}
// Handles case where edge deletion creates new connected component.
if (tree->vArr[startVertex].level == INFINITY_MY) {
*qStart = 0;
*qEnd = 1;
*qStart_nxt = 1;
*qEnd_nxt = 1;
Queue[0] = startVertex;
eAPT->sV[startVertex].touched = -2;
while (*qStart != *qEnd) {
int64_t thread_nums = cores;
if (*qEnd - *qStart < cores) {
thread_nums = *qEnd - *qStart;
}
#pragma omp parallel num_threads(thread_nums)
{
#pragma omp for
for (int64_t i = *qStart; i < *qEnd; i++) {
int64_t currElement = Queue[i];
eAPT->sV[currElement].totalBC -= tree->vArr[currElement].delta;
tree->vArr[currElement].edgesBelow = 0;
tree->vArr[currElement].edgesAbove = 0;
eAPT->sV[currElement].newEdgesAbove = 0;
eAPT->sV[currElement].newEdgesBelow = 0;
tree->vArr[currElement].sigma = INFINITY_MY;
eAPT->sV[currElement].newSigma = 0;
STINGER_FORALL_EDGES_OF_VTX_BEGIN(sStinger, currElement) {
int64_t k = STINGER_EDGE_DEST;
if (__sync_bool_compare_and_swap(&(eAPT->sV[k].touched), 1, -2)) {
touchedVerticesUp[__atomic_fetch_add(&tvUpEnd, 1, __ATOMIC_RELAXED)] = k;
Queue[__atomic_fetch_add(qEnd_nxt, 1, __ATOMIC_RELAXED)] = k;
}
}
STINGER_FORALL_EDGES_OF_VTX_END();
}
}
*qStart = *qStart_nxt;
*qEnd = *qEnd_nxt;
*qStart_nxt = *qEnd;
*qEnd_nxt = *qStart_nxt;
}
}
for (int64_t q = 0; q < tvDownEnd; q++) {
int64_t k = touchedVerticesDown[q];
if (eAPT->sV[k].touched > 0) {
tree->vArr[k].sigma = eAPT->sV[k].newSigma;
}
if (eAPT->sV[k].touched != 0) {
tree->vArr[k].delta = eAPT->sV[k].newDelta;
}
tree->vArr[k].edgesAbove = eAPT->sV[k].newEdgesAbove;
eAPT->sV[k].diffPath = 0;
eAPT->sV[k].touched = 0;
eAPT->sV[k].newDelta = 0.0;
eAPT->sV[k].newSigma = 0;
eAPT->sV[k].newLevel = 0;
eAPT->sV[k].newEdgesAbove = 0;
}
for (int64_t q = 0; q < tvUpEnd; q++) {
int64_t k = touchedVerticesUp[q];
if (eAPT->sV[k].touched > 0) {
tree->vArr[k].sigma = eAPT->sV[k].newSigma;
}
if (eAPT->sV[k].touched != 0) {
tree->vArr[k].delta = eAPT->sV[k].newDelta;
}
tree->vArr[k].edgesBelow = eAPT->sV[k].newEdgesBelow;
eAPT->sV[k].diffPath = 0;
eAPT->sV[k].touched = 0;
eAPT->sV[k].newDelta = 0.0;
eAPT->sV[k].newSigma = 0;
eAPT->sV[k].newLevel = 0;
eAPT->sV[k].newEdgesAbove = 0;
eAPT->sV[k].newEdgesBelow = 0;
}
eAPT->qStart = 0;
eAPT->qEnd = 0;
eAPT->qStart_nxt = 0;
eAPT->qEnd_nxt = 0;
eAPT->tqStart = 0;
eAPT->tqEnd = 0;
eAPT->tqStart_nxt = 0;
eAPT->tqEnd_nxt = 0;
eAPT->qStartSame = 0;
eAPT->qEndSame = 0;
eAPT->qStartSame_nxt = 0;
eAPT->qEndSame_nxt = 0;
}
|
texture.c | #include "mana/graphics/utilities/texture.h"
#define STB_IMAGE_IMPLEMENTATION
#include <stb_image.h>
int texture_init(struct Texture *texture, struct GPUAPI *gpu_api, struct TextureSettings texture_settings) {
VkFilter filter = (texture_settings.filter_type == FILTER_NEAREST) ? VK_FILTER_NEAREST : VK_FILTER_LINEAR;
VkSamplerAddressMode mode;
switch (texture_settings.mode_type) {
case (MODE_REPEAT):
mode = VK_SAMPLER_ADDRESS_MODE_REPEAT;
break;
case (MODE_MIRRORED_REPEAT):
mode = VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
break;
case (MODE_CLAMP_TO_EDGE):
mode = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
break;
case (MODE_CLAMP_TO_BORDER):
mode = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
break;
}
texture->path = strdup(texture_settings.path);
char *name_location = strrchr(texture_settings.path, '/');
if (!name_location)
texture->name = strdup(name_location);
else
texture->name = strdup(name_location + 1);
char *type_location = strrchr(texture_settings.path, '.');
if (!type_location)
texture->type = strdup(type_location);
else
texture->type = strdup(type_location + 1);
// Todo: Detect pixel bit
// Note: Something like this could be useful for optimizing but not needed as stbi will correctly convert up/down bits
//int pixel_bit = 16;
//int tex_width, tex_height, tex_channels;
//void *pixels;
//VkDeviceSize image_size;
//if (pixel_bit == 8) {
// pixels = (void *)stbi_load(texture->path, &tex_width, &tex_height, &tex_channels, STBI_rgb_alpha);
// image_size = tex_width * tex_height * 4;
//} else if (pixel_bit == 16) {
// pixels = (void *)stbi_load_16(texture->path, &tex_width, &tex_height, &tex_channels, STBI_rgb_alpha);
// image_size = tex_width * tex_height * 4 * 2;
//} else if (pixel_bit == 32) {
// pixels = (void *)stbi_load_32(texture->path, &tex_width, &tex_height, &tex_channels, STBI_rgb_alpha);
// image_size = tex_width * tex_height * 4 * 2 * 2;
//} else
// return -1;
int tex_width, tex_height, tex_channels;
stbi_us *pixels = stbi_load_16(texture->path, &tex_width, &tex_height, &tex_channels, STBI_rgb_alpha);
VkDeviceSize image_size = tex_width * tex_height * 4 * 2;
texture->width = tex_width;
texture->height = tex_height;
if (!pixels) {
printf("Failed to load texture image!\n");
return 1;
}
// Guaranteed to be multiple of 4
// ifndef avx512 -> ifndef avx2 -> ifndef avx -> ifndef sse2 -> ifndef neon -> fallback
if (texture_settings.premultiplied_alpha == 0) {
// Note: Let openmp take care of simd for now, outside of texture uses parallel for inside used parallel simd
//https://stackoverflow.com/questions/14674049/parallel-for-vs-omp-simd-when-to-use-each
#pragma omp simd
for (int pixel_group_num = 0; pixel_group_num < tex_width * tex_height * tex_channels; pixel_group_num += 4) {
unsigned short alpha_value = pixels[pixel_group_num + 3];
pixels[pixel_group_num] *= ((float)alpha_value / USHRT_MAX);
pixels[pixel_group_num + 1] *= ((float)alpha_value / USHRT_MAX);
pixels[pixel_group_num + 2] *= ((float)alpha_value / USHRT_MAX);
}
}
// int total_pixels = tex_width * tex_height * tex_channels;
//#ifdef __AVX__
// for (int iterations = total_pixels / 4;;)
// ;
// if (total_pixels % 4 != 0) {
// unsigned short alpha_value = pixels[total_pixels - 1];
// pixels[total_pixels - 4] *= ((float)alpha_value / USHRT_MAX);
// pixels[total_pixels - 3] *= ((float)alpha_value / USHRT_MAX);
// pixels[total_pixels - 2] *= ((float)alpha_value / USHRT_MAX);
// }
//#else
// if (texture_settings.premultiplied_alpha == 0) {
//#pragma omp parallel for
// for (int pixel_group_num = 0; pixel_group_num < tex_width * tex_height * tex_channels; pixel_group_num += 4) {
// unsigned short alpha_value = pixels[pixel_group_num + 3];
// pixels[pixel_group_num] *= ((float)alpha_value / USHRT_MAX);
// pixels[pixel_group_num + 1] *= ((float)alpha_value / USHRT_MAX);
// pixels[pixel_group_num + 2] *= ((float)alpha_value / USHRT_MAX);
// }
// }
//#endif
VkBuffer staging_buffer = {0};
VkDeviceMemory staging_buffer_memory = {0};
graphics_utils_create_buffer(gpu_api->vulkan_state->device, gpu_api->vulkan_state->physical_device, image_size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &staging_buffer, &staging_buffer_memory);
void *data;
vkMapMemory(gpu_api->vulkan_state->device, staging_buffer_memory, 0, image_size, 0, &data);
memcpy(data, pixels, image_size);
vkUnmapMemory(gpu_api->vulkan_state->device, staging_buffer_memory);
stbi_image_free(pixels);
uint32_t mip_levels = (uint32_t)(floor(log2(MAX(tex_width, tex_height))));
if (texture_settings.mip_maps_enabled == 0)
mip_levels = 1;
graphics_utils_create_image(gpu_api->vulkan_state->device, gpu_api->vulkan_state->physical_device, tex_width, tex_height, mip_levels, VK_SAMPLE_COUNT_1_BIT, VK_FORMAT_R16G16B16A16_UNORM, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &texture->texture_image, &texture->texture_image_memory);
graphics_utils_transition_image_layout(gpu_api->vulkan_state->device, gpu_api->vulkan_state->graphics_queue, gpu_api->vulkan_state->command_pool, texture->texture_image, VK_FORMAT_R16G16B16A16_UNORM, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, mip_levels);
graphics_utils_copy_buffer_to_image(gpu_api->vulkan_state->device, gpu_api->vulkan_state->graphics_queue, gpu_api->vulkan_state->command_pool, &staging_buffer, &texture->texture_image, tex_width, tex_height);
vkDestroyBuffer(gpu_api->vulkan_state->device, staging_buffer, NULL);
vkFreeMemory(gpu_api->vulkan_state->device, staging_buffer_memory, NULL);
graphics_utils_generate_mipmaps(gpu_api->vulkan_state->device, gpu_api->vulkan_state->physical_device, gpu_api->vulkan_state->graphics_queue, gpu_api->vulkan_state->command_pool, texture->texture_image, VK_FORMAT_R16G16B16A16_UNORM, tex_width, tex_height, mip_levels);
graphics_utils_create_image_view(gpu_api->vulkan_state->device, texture->texture_image, VK_FORMAT_R16G16B16A16_UNORM, VK_IMAGE_ASPECT_COLOR_BIT, mip_levels, &texture->texture_image_view);
graphics_utils_create_sampler(gpu_api->vulkan_state->device, &texture->texture_sampler, (struct SamplerSettings){.mip_levels = mip_levels, .filter = filter, .address_mode = mode});
return 0;
}
void texture_delete(struct Texture *texture, struct GPUAPI *gpu_api) {
vkDestroySampler(gpu_api->vulkan_state->device, texture->texture_sampler, NULL);
vkDestroyImageView(gpu_api->vulkan_state->device, texture->texture_image_view, NULL);
vkDestroyImage(gpu_api->vulkan_state->device, texture->texture_image, NULL);
vkFreeMemory(gpu_api->vulkan_state->device, texture->texture_image_memory, NULL);
free(texture->path);
free(texture->name);
free(texture->type);
}
|
ASTMatchers.h | //===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements matchers to be used together with the MatchFinder to
// match AST nodes.
//
// Matchers are created by generator functions, which can be combined in
// a functional in-language DSL to express queries over the C++ AST.
//
// For example, to match a class with a certain name, one would call:
// cxxRecordDecl(hasName("MyClass"))
// which returns a matcher that can be used to find all AST nodes that declare
// a class named 'MyClass'.
//
// For more complicated match expressions we're often interested in accessing
// multiple parts of the matched AST nodes once a match is found. In that case,
// use the id(...) matcher around the match expressions that match the nodes
// you want to access.
//
// For example, when we're interested in child classes of a certain class, we
// would write:
// cxxRecordDecl(hasName("MyClass"), has(id("child", recordDecl())))
// When the match is found via the MatchFinder, a user provided callback will
// be called with a BoundNodes instance that contains a mapping from the
// strings that we provided for the id(...) calls to the nodes that were
// matched.
// In the given example, each time our matcher finds a match we get a callback
// where "child" is bound to the RecordDecl node of the matching child
// class declaration.
//
// See ASTMatchersInternal.h for a more in-depth explanation of the
// implementation details of the matcher framework.
//
// See ASTMatchFinder.h for how to use the generated matchers to run over
// an AST.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTTypeTraits.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/ASTMatchers/ASTMatchersInternal.h"
#include "clang/ASTMatchers/ASTMatchersMacros.h"
#include "clang/Basic/AttrKinds.h"
#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TypeTraits.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Regex.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <limits>
#include <string>
#include <utility>
#include <vector>
namespace clang {
namespace ast_matchers {
/// Maps string IDs to AST nodes matched by parts of a matcher.
///
/// The bound nodes are generated by calling \c bind("id") on the node matchers
/// of the nodes we want to access later.
///
/// The instances of BoundNodes are created by \c MatchFinder when the user's
/// callbacks are executed every time a match is found.
class BoundNodes {
public:
/// Returns the AST node bound to \c ID.
///
/// Returns NULL if there was no node bound to \c ID or if there is a node but
/// it cannot be converted to the specified type.
template <typename T>
const T *getNodeAs(StringRef ID) const {
return MyBoundNodes.getNodeAs<T>(ID);
}
/// Type of mapping from binding identifiers to bound nodes. This type
/// is an associative container with a key type of \c std::string and a value
/// type of \c clang::ast_type_traits::DynTypedNode
using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap;
/// Retrieve mapping from binding identifiers to bound nodes.
const IDToNodeMap &getMap() const {
return MyBoundNodes.getMap();
}
private:
friend class internal::BoundNodesTreeBuilder;
/// Create BoundNodes from a pre-filled map of bindings.
BoundNodes(internal::BoundNodesMap &MyBoundNodes)
: MyBoundNodes(MyBoundNodes) {}
internal::BoundNodesMap MyBoundNodes;
};
/// If the provided matcher matches a node, binds the node to \c ID.
///
/// FIXME: Do we want to support this now that we have bind()?
template <typename T>
internal::Matcher<T> id(StringRef ID,
const internal::BindableMatcher<T> &InnerMatcher) {
return InnerMatcher.bind(ID);
}
/// Types of matchers for the top-level classes in the AST class
/// hierarchy.
/// @{
using DeclarationMatcher = internal::Matcher<Decl>;
using StatementMatcher = internal::Matcher<Stmt>;
using TypeMatcher = internal::Matcher<QualType>;
using TypeLocMatcher = internal::Matcher<TypeLoc>;
using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>;
using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>;
using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>;
/// @}
/// Matches any node.
///
/// Useful when another matcher requires a child matcher, but there's no
/// additional constraint. This will often be used with an explicit conversion
/// to an \c internal::Matcher<> type such as \c TypeMatcher.
///
/// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g.,
/// \code
/// "int* p" and "void f()" in
/// int* p;
/// void f();
/// \endcode
///
/// Usable as: Any Matcher
inline internal::TrueMatcher anything() { return internal::TrueMatcher(); }
/// Matches the top declaration context.
///
/// Given
/// \code
/// int X;
/// namespace NS {
/// int Y;
/// } // namespace NS
/// \endcode
/// decl(hasDeclContext(translationUnitDecl()))
/// matches "int X", but not "int Y".
extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl>
translationUnitDecl;
/// Matches typedef declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefDecl()
/// matches "typedef int X", but not "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl>
typedefDecl;
/// Matches typedef name declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefNameDecl()
/// matches "typedef int X" and "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl>
typedefNameDecl;
/// Matches type alias declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typeAliasDecl()
/// matches "using Y = int", but not "typedef int X"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl>
typeAliasDecl;
/// Matches type alias template declarations.
///
/// typeAliasTemplateDecl() matches
/// \code
/// template <typename T>
/// using Y = X<T>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl>
typeAliasTemplateDecl;
/// Matches AST nodes that were expanded within the main-file.
///
/// Example matches X but not Y
/// (matcher = cxxRecordDecl(isExpansionInMainFile())
/// \code
/// #include <Y.h>
/// class X {};
/// \endcode
/// Y.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInMainFile,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
return SourceManager.isInMainFile(
SourceManager.getExpansionLoc(Node.getBeginLoc()));
}
/// Matches AST nodes that were expanded within system-header-files.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInSystemHeader())
/// \code
/// #include <SystemHeader.h>
/// class X {};
/// \endcode
/// SystemHeader.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
return SourceManager.isInSystemHeader(ExpansionLoc);
}
/// Matches AST nodes that were expanded within files whose name is
/// partially matching a given regex.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*"))
/// \code
/// #include "ASTMatcher.h"
/// class X {};
/// \endcode
/// ASTMatcher.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER_P(isExpansionInFileMatching,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc),
std::string, RegExp) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
auto FileEntry =
SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc));
if (!FileEntry) {
return false;
}
auto Filename = FileEntry->getName();
llvm::Regex RE(RegExp);
return RE.match(Filename);
}
/// Matches declarations.
///
/// Examples matches \c X, \c C, and the friend declaration inside \c C;
/// \code
/// void X();
/// class C {
/// friend X;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<Decl> decl;
/// Matches a declaration of a linkage specification.
///
/// Given
/// \code
/// extern "C" {}
/// \endcode
/// linkageSpecDecl()
/// matches "extern "C" {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl>
linkageSpecDecl;
/// Matches a declaration of anything that could have a name.
///
/// Example matches \c X, \c S, the anonymous union type, \c i, and \c U;
/// \code
/// typedef int X;
/// struct S {
/// union {
/// int i;
/// } U;
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl;
/// Matches a declaration of label.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelDecl()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl;
/// Matches a declaration of a namespace.
///
/// Given
/// \code
/// namespace {}
/// namespace test {}
/// \endcode
/// namespaceDecl()
/// matches "namespace {}" and "namespace test {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl>
namespaceDecl;
/// Matches a declaration of a namespace alias.
///
/// Given
/// \code
/// namespace test {}
/// namespace alias = ::test;
/// \endcode
/// namespaceAliasDecl()
/// matches "namespace alias" but not "namespace test"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl>
namespaceAliasDecl;
/// Matches class, struct, and union declarations.
///
/// Example matches \c X, \c Z, \c U, and \c S
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl;
/// Matches C++ class declarations.
///
/// Example matches \c X, \c Z
/// \code
/// class X;
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl>
cxxRecordDecl;
/// Matches C++ class template declarations.
///
/// Example matches \c Z
/// \code
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl>
classTemplateDecl;
/// Matches C++ class template specializations.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
/// \endcode
/// classTemplateSpecializationDecl()
/// matches the specializations \c A<int> and \c A<double>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplateSpecializationDecl>
classTemplateSpecializationDecl;
/// Matches C++ class template partial specializations.
///
/// Given
/// \code
/// template<class T1, class T2, int I>
/// class A {};
///
/// template<class T, int I>
/// class A<T, T*, I> {};
///
/// template<>
/// class A<int, int, 1> {};
/// \endcode
/// classTemplatePartialSpecializationDecl()
/// matches the specialization \c A<T,T*,I> but not \c A<int,int,1>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplatePartialSpecializationDecl>
classTemplatePartialSpecializationDecl;
/// Matches declarator declarations (field, variable, function
/// and non-type template parameter declarations).
///
/// Given
/// \code
/// class X { int y; };
/// \endcode
/// declaratorDecl()
/// matches \c int y.
extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl>
declaratorDecl;
/// Matches parameter variable declarations.
///
/// Given
/// \code
/// void f(int x);
/// \endcode
/// parmVarDecl()
/// matches \c int x.
extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl>
parmVarDecl;
/// Matches C++ access specifier declarations.
///
/// Given
/// \code
/// class C {
/// public:
/// int a;
/// };
/// \endcode
/// accessSpecDecl()
/// matches 'public:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl>
accessSpecDecl;
/// Matches constructor initializers.
///
/// Examples matches \c i(42).
/// \code
/// class C {
/// C() : i(42) {}
/// int i;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<CXXCtorInitializer>
cxxCtorInitializer;
/// Matches template arguments.
///
/// Given
/// \code
/// template <typename T> struct C {};
/// C<int> c;
/// \endcode
/// templateArgument()
/// matches 'int' in C<int>.
extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument;
/// Matches template name.
///
/// Given
/// \code
/// template <typename T> class X { };
/// X<int> xi;
/// \endcode
/// templateName()
/// matches 'X' in X<int>.
extern const internal::VariadicAllOfMatcher<TemplateName> templateName;
/// Matches non-type template parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// nonTypeTemplateParmDecl()
/// matches 'N', but not 'T'.
extern const internal::VariadicDynCastAllOfMatcher<Decl,
NonTypeTemplateParmDecl>
nonTypeTemplateParmDecl;
/// Matches template type parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// templateTypeParmDecl()
/// matches 'T', but not 'N'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl>
templateTypeParmDecl;
/// Matches public C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isPublic())
/// matches 'int a;'
AST_MATCHER(Decl, isPublic) {
return Node.getAccess() == AS_public;
}
/// Matches protected C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isProtected())
/// matches 'int b;'
AST_MATCHER(Decl, isProtected) {
return Node.getAccess() == AS_protected;
}
/// Matches private C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isPrivate())
/// matches 'int c;'
AST_MATCHER(Decl, isPrivate) {
return Node.getAccess() == AS_private;
}
/// Matches non-static data members that are bit-fields.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b;
/// };
/// \endcode
/// fieldDecl(isBitField())
/// matches 'int a;' but not 'int b;'.
AST_MATCHER(FieldDecl, isBitField) {
return Node.isBitField();
}
/// Matches non-static data members that are bit-fields of the specified
/// bit width.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b : 4;
/// int c : 2;
/// };
/// \endcode
/// fieldDecl(hasBitWidth(2))
/// matches 'int a;' and 'int c;' but not 'int b;'.
AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) {
return Node.isBitField() &&
Node.getBitWidthValue(Finder->getASTContext()) == Width;
}
/// Matches non-static data members that have an in-class initializer.
///
/// Given
/// \code
/// class C {
/// int a = 2;
/// int b = 3;
/// int c;
/// };
/// \endcode
/// fieldDecl(hasInClassInitializer(integerLiteral(equals(2))))
/// matches 'int a;' but not 'int b;'.
/// fieldDecl(hasInClassInitializer(anything()))
/// matches 'int a;' and 'int b;' but not 'int c;'.
AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getInClassInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// Determines whether the function is "main", which is the entry point
/// into an executable program.
AST_MATCHER(FunctionDecl, isMain) {
return Node.isMain();
}
/// Matches the specialized template of a specialization declaration.
///
/// Given
/// \code
/// template<typename T> class A {}; #1
/// template<> class A<int> {}; #2
/// \endcode
/// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl()))
/// matches '#2' with classTemplateDecl() matching the class template
/// declaration of 'A' at #1.
AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate,
internal::Matcher<ClassTemplateDecl>, InnerMatcher) {
const ClassTemplateDecl* Decl = Node.getSpecializedTemplate();
return (Decl != nullptr &&
InnerMatcher.matches(*Decl, Finder, Builder));
}
/// Matches a declaration that has been implicitly added
/// by the compiler (eg. implicit default/copy constructors).
AST_MATCHER(Decl, isImplicit) {
return Node.isImplicit();
}
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl that have at least one TemplateArgument matching the given
/// InnerMatcher.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
///
/// template<typename T> f() {};
/// void func() { f<int>(); };
/// \endcode
///
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(asString("int"))))
/// matches the specialization \c A<int>
///
/// functionDecl(hasAnyTemplateArgument(refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P(
hasAnyTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder,
Builder);
}
/// Matches expressions that match InnerMatcher after any implicit AST
/// nodes are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// class C {};
/// C a = C();
/// C b;
/// C c = b;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr())))
/// \endcode
/// would match the declarations for a, b, and c.
/// While
/// \code
/// varDecl(hasInitializer(cxxConstructExpr()))
/// \endcode
/// only match the declarations for b and c.
AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>,
InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after any implicit casts
/// are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = 0;
/// const int c = a;
/// int *d = arr;
/// long e = (long) 0l;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringImpCasts(declRefExpr())))
/// \endcode
/// would match the declarations for a, b, c, and d, but not e.
/// While
/// \code
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// \endcode
/// only match the declarations for b, c, and d.
AST_MATCHER_P(Expr, ignoringImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after parentheses and
/// casts are stripped off.
///
/// Implicit and non-C Style casts are also discarded.
/// Given
/// \code
/// int a = 0;
/// char b = (0);
/// void* c = reinterpret_cast<char*>(0);
/// char d = char(0);
/// \endcode
/// The matcher
/// varDecl(hasInitializer(ignoringParenCasts(integerLiteral())))
/// would match the declarations for a, b, c, and d.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after implicit casts and
/// parentheses are stripped off.
///
/// Explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = (0);
/// const int c = a;
/// int *d = (arr);
/// long e = ((long) 0l);
/// \endcode
/// The matchers
/// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr())))
/// would match the declarations for a, b, c, and d, but not e.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// would only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder);
}
/// Matches types that match InnerMatcher after any parens are stripped.
///
/// Given
/// \code
/// void (*fp)(void);
/// \endcode
/// The matcher
/// \code
/// varDecl(hasType(pointerType(pointee(ignoringParens(functionType())))))
/// \endcode
/// would match the declaration for fp.
AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>,
InnerMatcher, 0) {
return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder);
}
/// Overload \c ignoringParens for \c Expr.
///
/// Given
/// \code
/// const char* str = ("my-string");
/// \endcode
/// The matcher
/// \code
/// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral())))
/// \endcode
/// would match the implicit cast resulting from the assignment.
AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>,
InnerMatcher, 1) {
const Expr *E = Node.IgnoreParens();
return InnerMatcher.matches(*E, Finder, Builder);
}
/// Matches expressions that are instantiation-dependent even if it is
/// neither type- nor value-dependent.
///
/// In the following example, the expression sizeof(sizeof(T() + T()))
/// is instantiation-dependent (since it involves a template parameter T),
/// but is neither type- nor value-dependent, since the type of the inner
/// sizeof is known (std::size_t) and therefore the size of the outer
/// sizeof is known.
/// \code
/// template<typename T>
/// void f(T x, T y) { sizeof(sizeof(T() + T()); }
/// \endcode
/// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T())
AST_MATCHER(Expr, isInstantiationDependent) {
return Node.isInstantiationDependent();
}
/// Matches expressions that are type-dependent because the template type
/// is not yet instantiated.
///
/// For example, the expressions "x" and "x + y" are type-dependent in
/// the following code, but "y" is not type-dependent:
/// \code
/// template<typename T>
/// void add(T x, int y) {
/// x + y;
/// }
/// \endcode
/// expr(isTypeDependent()) matches x + y
AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); }
/// Matches expression that are value-dependent because they contain a
/// non-type template parameter.
///
/// For example, the array bound of "Chars" in the following example is
/// value-dependent.
/// \code
/// template<int Size> int f() { return Size; }
/// \endcode
/// expr(isValueDependent()) matches return Size
AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); }
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl where the n'th TemplateArgument matches the given InnerMatcher.
///
/// Given
/// \code
/// template<typename T, typename U> class A {};
/// A<bool, int> b;
/// A<int, bool> c;
///
/// template<typename T> void f() {}
/// void func() { f<int>(); };
/// \endcode
/// classTemplateSpecializationDecl(hasTemplateArgument(
/// 1, refersToType(asString("int"))))
/// matches the specialization \c A<bool, int>
///
/// functionDecl(hasTemplateArgument(0, refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P2(
hasTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
if (List.size() <= N)
return false;
return InnerMatcher.matches(List[N], Finder, Builder);
}
/// Matches if the number of template arguments equals \p N.
///
/// Given
/// \code
/// template<typename T> struct C {};
/// C<int> c;
/// \endcode
/// classTemplateSpecializationDecl(templateArgumentCountIs(1))
/// matches C<int>.
AST_POLYMORPHIC_MATCHER_P(
templateArgumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType),
unsigned, N) {
return internal::getTemplateSpecializationArgs(Node).size() == N;
}
/// Matches a TemplateArgument that refers to a certain type.
///
/// Given
/// \code
/// struct X {};
/// template<typename T> struct A {};
/// A<X> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(class(hasName("X")))))
/// matches the specialization \c A<X>
AST_MATCHER_P(TemplateArgument, refersToType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Type)
return false;
return InnerMatcher.matches(Node.getAsType(), Finder, Builder);
}
/// Matches a TemplateArgument that refers to a certain template.
///
/// Given
/// \code
/// template<template <typename> class S> class X {};
/// template<typename T> class Y {};
/// X<Y> xi;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToTemplate(templateName())))
/// matches the specialization \c X<Y>
AST_MATCHER_P(TemplateArgument, refersToTemplate,
internal::Matcher<TemplateName>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Template)
return false;
return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder);
}
/// Matches a canonical TemplateArgument that refers to a certain
/// declaration.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToDeclaration(fieldDecl(hasName("next")))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, refersToDeclaration,
internal::Matcher<Decl>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Declaration)
return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder);
return false;
}
/// Matches a sugar TemplateArgument that refers to a certain expression.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// templateSpecializationType(hasAnyTemplateArgument(
/// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next"))))))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Expression)
return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder);
return false;
}
/// Matches a TemplateArgument that is an integral value.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(isIntegral()))
/// matches the implicit instantiation of C in C<42>
/// with isIntegral() matching 42.
AST_MATCHER(TemplateArgument, isIntegral) {
return Node.getKind() == TemplateArgument::Integral;
}
/// Matches a TemplateArgument that referes to an integral type.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(refersToIntegralType(asString("int"))))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, refersToIntegralType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder);
}
/// Matches a TemplateArgument of integral type with a given value.
///
/// Note that 'Value' is a string as the template argument's value is
/// an arbitrary precision integer. 'Value' must be euqal to the canonical
/// representation of that integral value in base 10.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(equalsIntegralValue("42")))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, equalsIntegralValue,
std::string, Value) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return Node.getAsIntegral().toString(10) == Value;
}
/// Matches an Objective-C autorelease pool statement.
///
/// Given
/// \code
/// @autoreleasepool {
/// int x = 0;
/// }
/// \endcode
/// autoreleasePoolStmt(stmt()) matches the declaration of "x"
/// inside the autorelease pool.
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
ObjCAutoreleasePoolStmt> autoreleasePoolStmt;
/// Matches any value declaration.
///
/// Example matches A, B, C and F
/// \code
/// enum X { A, B, C };
/// void F();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl;
/// Matches C++ constructor declarations.
///
/// Example matches Foo::Foo() and Foo::Foo(int)
/// \code
/// class Foo {
/// public:
/// Foo();
/// Foo(int);
/// int DoSomething();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl>
cxxConstructorDecl;
/// Matches explicit C++ destructor declarations.
///
/// Example matches Foo::~Foo()
/// \code
/// class Foo {
/// public:
/// virtual ~Foo();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl>
cxxDestructorDecl;
/// Matches enum declarations.
///
/// Example matches X
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl;
/// Matches enum constants.
///
/// Example matches A, B, C
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl>
enumConstantDecl;
/// Matches method declarations.
///
/// Example matches y
/// \code
/// class X { void y(); };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl>
cxxMethodDecl;
/// Matches conversion operator declarations.
///
/// Example matches the operator.
/// \code
/// class X { operator int() const; };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl>
cxxConversionDecl;
/// Matches variable declarations.
///
/// Note: this does not match declarations of member variables, which are
/// "field" declarations in Clang parlance.
///
/// Example matches a
/// \code
/// int a;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl;
/// Matches field declarations.
///
/// Given
/// \code
/// class X { int m; };
/// \endcode
/// fieldDecl()
/// matches 'm'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl;
/// Matches indirect field declarations.
///
/// Given
/// \code
/// struct X { struct { int a; }; };
/// \endcode
/// indirectFieldDecl()
/// matches 'a'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl>
indirectFieldDecl;
/// Matches function declarations.
///
/// Example matches f
/// \code
/// void f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl>
functionDecl;
/// Matches C++ function template declarations.
///
/// Example matches f
/// \code
/// template<class T> void f(T t) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl>
functionTemplateDecl;
/// Matches friend declarations.
///
/// Given
/// \code
/// class X { friend void foo(); };
/// \endcode
/// friendDecl()
/// matches 'friend void foo()'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl;
/// Matches statements.
///
/// Given
/// \code
/// { ++a; }
/// \endcode
/// stmt()
/// matches both the compound statement '{ ++a; }' and '++a'.
extern const internal::VariadicAllOfMatcher<Stmt> stmt;
/// Matches declaration statements.
///
/// Given
/// \code
/// int a;
/// \endcode
/// declStmt()
/// matches 'int a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt;
/// Matches member expressions.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// int a; static int b;
/// };
/// \endcode
/// memberExpr()
/// matches this->x, x, y.x, a, this->b
extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr;
/// Matches unresolved member expressions.
///
/// Given
/// \code
/// struct X {
/// template <class T> void f();
/// void g();
/// };
/// template <class T> void h() { X x; x.f<T>(); x.g(); }
/// \endcode
/// unresolvedMemberExpr()
/// matches x.f<T>
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr>
unresolvedMemberExpr;
/// Matches member expressions where the actual member referenced could not be
/// resolved because the base expression or the member name was dependent.
///
/// Given
/// \code
/// template <class T> void f() { T t; t.g(); }
/// \endcode
/// cxxDependentScopeMemberExpr()
/// matches t.g
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXDependentScopeMemberExpr>
cxxDependentScopeMemberExpr;
/// Matches call expressions.
///
/// Example matches x.y() and y()
/// \code
/// X x;
/// x.y();
/// y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr;
/// Matches call expressions which were resolved using ADL.
///
/// Example matches y(x) but not y(42) or NS::y(x).
/// \code
/// namespace NS {
/// struct X {};
/// void y(X);
/// }
///
/// void y(...);
///
/// void test() {
/// NS::X x;
/// y(x); // Matches
/// NS::y(x); // Doesn't match
/// y(42); // Doesn't match
/// using NS::y;
/// y(x); // Found by both unqualified lookup and ADL, doesn't match
// }
/// \endcode
AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); }
/// Matches lambda expressions.
///
/// Example matches [&](){return 5;}
/// \code
/// [&](){return 5;}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr;
/// Matches member call expressions.
///
/// Example matches x.y()
/// \code
/// X x;
/// x.y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr>
cxxMemberCallExpr;
/// Matches ObjectiveC Message invocation expressions.
///
/// The innermost message send invokes the "alloc" class method on the
/// NSString class, while the outermost message send invokes the
/// "initWithString" instance method on the object returned from
/// NSString's "alloc". This matcher should match both message sends.
/// \code
/// [[NSString alloc] initWithString:@"Hello"]
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr>
objcMessageExpr;
/// Matches Objective-C interface declarations.
///
/// Example matches Foo
/// \code
/// @interface Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl>
objcInterfaceDecl;
/// Matches Objective-C implementation declarations.
///
/// Example matches Foo
/// \code
/// @implementation Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl>
objcImplementationDecl;
/// Matches Objective-C protocol declarations.
///
/// Example matches FooDelegate
/// \code
/// @protocol FooDelegate
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl>
objcProtocolDecl;
/// Matches Objective-C category declarations.
///
/// Example matches Foo (Additions)
/// \code
/// @interface Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl>
objcCategoryDecl;
/// Matches Objective-C category definitions.
///
/// Example matches Foo (Additions)
/// \code
/// @implementation Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl>
objcCategoryImplDecl;
/// Matches Objective-C method declarations.
///
/// Example matches both declaration and definition of -[Foo method]
/// \code
/// @interface Foo
/// - (void)method;
/// @end
///
/// @implementation Foo
/// - (void)method {}
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl>
objcMethodDecl;
/// Matches block declarations.
///
/// Example matches the declaration of the nameless block printing an input
/// integer.
///
/// \code
/// myFunc(^(int p) {
/// printf("%d", p);
/// })
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl>
blockDecl;
/// Matches Objective-C instance variable declarations.
///
/// Example matches _enabled
/// \code
/// @implementation Foo {
/// BOOL _enabled;
/// }
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl>
objcIvarDecl;
/// Matches Objective-C property declarations.
///
/// Example matches enabled
/// \code
/// @interface Foo
/// @property BOOL enabled;
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl>
objcPropertyDecl;
/// Matches Objective-C \@throw statements.
///
/// Example matches \@throw
/// \code
/// @throw obj;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt>
objcThrowStmt;
/// Matches Objective-C @try statements.
///
/// Example matches @try
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt>
objcTryStmt;
/// Matches Objective-C @catch statements.
///
/// Example matches @catch
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt>
objcCatchStmt;
/// Matches Objective-C @finally statements.
///
/// Example matches @finally
/// \code
/// @try {}
/// @finally {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt>
objcFinallyStmt;
/// Matches expressions that introduce cleanups to be run at the end
/// of the sub-expression's evaluation.
///
/// Example matches std::string()
/// \code
/// const std::string str = std::string();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups>
exprWithCleanups;
/// Matches init list expressions.
///
/// Given
/// \code
/// int a[] = { 1, 2 };
/// struct B { int x, y; };
/// B b = { 5, 6 };
/// \endcode
/// initListExpr()
/// matches "{ 1, 2 }" and "{ 5, 6 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr>
initListExpr;
/// Matches the syntactic form of init list expressions
/// (if expression have it).
AST_MATCHER_P(InitListExpr, hasSyntacticForm,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *SyntForm = Node.getSyntacticForm();
return (SyntForm != nullptr &&
InnerMatcher.matches(*SyntForm, Finder, Builder));
}
/// Matches C++ initializer list expressions.
///
/// Given
/// \code
/// std::vector<int> a({ 1, 2, 3 });
/// std::vector<int> b = { 4, 5 };
/// int c[] = { 6, 7 };
/// std::pair<int, int> d = { 8, 9 };
/// \endcode
/// cxxStdInitializerListExpr()
/// matches "{ 1, 2, 3 }" and "{ 4, 5 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXStdInitializerListExpr>
cxxStdInitializerListExpr;
/// Matches implicit initializers of init list expressions.
///
/// Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 };
/// \endcode
/// implicitValueInitExpr()
/// matches "[0].y" (implicitly)
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr>
implicitValueInitExpr;
/// Matches paren list expressions.
/// ParenListExprs don't have a predefined type and are used for late parsing.
/// In the final AST, they can be met in template declarations.
///
/// Given
/// \code
/// template<typename T> class X {
/// void f() {
/// X x(*this);
/// int a = 0, b = 1; int i = (a, b);
/// }
/// };
/// \endcode
/// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b)
/// has a predefined type and is a ParenExpr, not a ParenListExpr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr>
parenListExpr;
/// Matches substitutions of non-type template parameters.
///
/// Given
/// \code
/// template <int N>
/// struct A { static const int n = N; };
/// struct B : public A<42> {};
/// \endcode
/// substNonTypeTemplateParmExpr()
/// matches "N" in the right-hand side of "static const int n = N;"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
SubstNonTypeTemplateParmExpr>
substNonTypeTemplateParmExpr;
/// Matches using declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using X::x;
/// \endcode
/// usingDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl;
/// Matches using namespace declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using namespace X;
/// \endcode
/// usingDirectiveDecl()
/// matches \code using namespace X \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl>
usingDirectiveDecl;
/// Matches reference to a name that can be looked up during parsing
/// but could not be resolved to a specific declaration.
///
/// Given
/// \code
/// template<typename T>
/// T foo() { T a; return a; }
/// template<typename T>
/// void bar() {
/// foo<T>();
/// }
/// \endcode
/// unresolvedLookupExpr()
/// matches \code foo<T>() \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr>
unresolvedLookupExpr;
/// Matches unresolved using value declarations.
///
/// Given
/// \code
/// template<typename X>
/// class C : private X {
/// using X::x;
/// };
/// \endcode
/// unresolvedUsingValueDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingValueDecl>
unresolvedUsingValueDecl;
/// Matches unresolved using value declarations that involve the
/// typename.
///
/// Given
/// \code
/// template <typename T>
/// struct Base { typedef T Foo; };
///
/// template<typename T>
/// struct S : private Base<T> {
/// using typename Base<T>::Foo;
/// };
/// \endcode
/// unresolvedUsingTypenameDecl()
/// matches \code using Base<T>::Foo \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingTypenameDecl>
unresolvedUsingTypenameDecl;
/// Matches a constant expression wrapper.
///
/// Example matches the constant in the case statement:
/// (matcher = constantExpr())
/// \code
/// switch (a) {
/// case 37: break;
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr>
constantExpr;
/// Matches parentheses used in expressions.
///
/// Example matches (foo() + 1)
/// \code
/// int foo() { return 1; }
/// int a = (foo() + 1);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr;
/// Matches constructor call expressions (including implicit ones).
///
/// Example matches string(ptr, n) and ptr within arguments of f
/// (matcher = cxxConstructExpr())
/// \code
/// void f(const string &a, const string &b);
/// char *ptr;
/// int n;
/// f(string(ptr, n), ptr);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr>
cxxConstructExpr;
/// Matches unresolved constructor call expressions.
///
/// Example matches T(t) in return statement of f
/// (matcher = cxxUnresolvedConstructExpr())
/// \code
/// template <typename T>
/// void f(const T& t) { return T(t); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXUnresolvedConstructExpr>
cxxUnresolvedConstructExpr;
/// Matches implicit and explicit this expressions.
///
/// Example matches the implicit this expression in "return i".
/// (matcher = cxxThisExpr())
/// \code
/// struct foo {
/// int i;
/// int f() { return i; }
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr>
cxxThisExpr;
/// Matches nodes where temporaries are created.
///
/// Example matches FunctionTakesString(GetStringByValue())
/// (matcher = cxxBindTemporaryExpr())
/// \code
/// FunctionTakesString(GetStringByValue());
/// FunctionTakesStringByPointer(GetStringPointer());
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr>
cxxBindTemporaryExpr;
/// Matches nodes where temporaries are materialized.
///
/// Example: Given
/// \code
/// struct T {void func();};
/// T f();
/// void g(T);
/// \endcode
/// materializeTemporaryExpr() matches 'f()' in these statements
/// \code
/// T u(f());
/// g(f());
/// f().func();
/// \endcode
/// but does not match
/// \code
/// f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
MaterializeTemporaryExpr>
materializeTemporaryExpr;
/// Matches new expressions.
///
/// Given
/// \code
/// new X;
/// \endcode
/// cxxNewExpr()
/// matches 'new X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr;
/// Matches delete expressions.
///
/// Given
/// \code
/// delete X;
/// \endcode
/// cxxDeleteExpr()
/// matches 'delete X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr>
cxxDeleteExpr;
/// Matches array subscript expressions.
///
/// Given
/// \code
/// int i = a[1];
/// \endcode
/// arraySubscriptExpr()
/// matches "a[1]"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr>
arraySubscriptExpr;
/// Matches the value of a default argument at the call site.
///
/// Example matches the CXXDefaultArgExpr placeholder inserted for the
/// default value of the second parameter in the call expression f(42)
/// (matcher = cxxDefaultArgExpr())
/// \code
/// void f(int x, int y = 0);
/// f(42);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr>
cxxDefaultArgExpr;
/// Matches overloaded operator calls.
///
/// Note that if an operator isn't overloaded, it won't match. Instead, use
/// binaryOperator matcher.
/// Currently it does not match operators such as new delete.
/// FIXME: figure out why these do not match?
///
/// Example matches both operator<<((o << b), c) and operator<<(o, b)
/// (matcher = cxxOperatorCallExpr())
/// \code
/// ostream &operator<< (ostream &out, int i) { };
/// ostream &o; int b = 1, c = 1;
/// o << b << c;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr>
cxxOperatorCallExpr;
/// Matches expressions.
///
/// Example matches x()
/// \code
/// void f() { x(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr;
/// Matches expressions that refer to declarations.
///
/// Example matches x in if (x)
/// \code
/// bool x;
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr>
declRefExpr;
/// Matches a reference to an ObjCIvar.
///
/// Example: matches "a" in "init" method:
/// \code
/// @implementation A {
/// NSString *a;
/// }
/// - (void) init {
/// a = @"hello";
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr>
objcIvarRefExpr;
/// Matches a reference to a block.
///
/// Example: matches "^{}":
/// \code
/// void f() { ^{}(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr;
/// Matches if statements.
///
/// Example matches 'if (x) {}'
/// \code
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt;
/// Matches for statements.
///
/// Example matches 'for (;;) {}'
/// \code
/// for (;;) {}
/// int i[] = {1, 2, 3}; for (auto a : i);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt;
/// Matches the increment statement of a for loop.
///
/// Example:
/// forStmt(hasIncrement(unaryOperator(hasOperatorName("++"))))
/// matches '++x' in
/// \code
/// for (x; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Increment = Node.getInc();
return (Increment != nullptr &&
InnerMatcher.matches(*Increment, Finder, Builder));
}
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopInit(declStmt()))
/// matches 'int x = 0' in
/// \code
/// for (int x = 0; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Init = Node.getInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches range-based for statements.
///
/// cxxForRangeStmt() matches 'for (auto a : i)'
/// \code
/// int i[] = {1, 2, 3}; for (auto a : i);
/// for(int j = 0; j < 5; ++j);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt>
cxxForRangeStmt;
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopVariable(anything()))
/// matches 'int x' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>,
InnerMatcher) {
const VarDecl *const Var = Node.getLoopVariable();
return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder));
}
/// Matches the range initialization statement of a for loop.
///
/// Example:
/// forStmt(hasRangeInit(anything()))
/// matches 'a' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *const Init = Node.getRangeInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches while statements.
///
/// Given
/// \code
/// while (true) {}
/// \endcode
/// whileStmt()
/// matches 'while (true) {}'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt;
/// Matches do statements.
///
/// Given
/// \code
/// do {} while (true);
/// \endcode
/// doStmt()
/// matches 'do {} while(true)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt;
/// Matches break statements.
///
/// Given
/// \code
/// while (true) { break; }
/// \endcode
/// breakStmt()
/// matches 'break'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt;
/// Matches continue statements.
///
/// Given
/// \code
/// while (true) { continue; }
/// \endcode
/// continueStmt()
/// matches 'continue'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt>
continueStmt;
/// Matches return statements.
///
/// Given
/// \code
/// return 1;
/// \endcode
/// returnStmt()
/// matches 'return 1'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt;
/// Matches goto statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// gotoStmt()
/// matches 'goto FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt;
/// Matches label statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelStmt()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt;
/// Matches address of label statements (GNU extension).
///
/// Given
/// \code
/// FOO: bar();
/// void *ptr = &&FOO;
/// goto *bar;
/// \endcode
/// addrLabelExpr()
/// matches '&&FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr>
addrLabelExpr;
/// Matches switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchStmt()
/// matches 'switch(a)'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt;
/// Matches case and default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchCase()
/// matches 'case 42:' and 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase;
/// Matches case statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// caseStmt()
/// matches 'case 42:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt;
/// Matches default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// defaultStmt()
/// matches 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt>
defaultStmt;
/// Matches compound statements.
///
/// Example matches '{}' and '{{}}' in 'for (;;) {{}}'
/// \code
/// for (;;) {{}}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt>
compoundStmt;
/// Matches catch statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxCatchStmt()
/// matches 'catch(int i)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt>
cxxCatchStmt;
/// Matches try statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxTryStmt()
/// matches 'try {}'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt;
/// Matches throw expressions.
///
/// \code
/// try { throw 5; } catch(int i) {}
/// \endcode
/// cxxThrowExpr()
/// matches 'throw 5'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr>
cxxThrowExpr;
/// Matches null statements.
///
/// \code
/// foo();;
/// \endcode
/// nullStmt()
/// matches the second ';'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt;
/// Matches asm statements.
///
/// \code
/// int i = 100;
/// __asm("mov al, 2");
/// \endcode
/// asmStmt()
/// matches '__asm("mov al, 2")'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt;
/// Matches bool literals.
///
/// Example matches true
/// \code
/// true
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr>
cxxBoolLiteral;
/// Matches string literals (also matches wide string literals).
///
/// Example matches "abcd", L"abcd"
/// \code
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral>
stringLiteral;
/// Matches character literals (also matches wchar_t).
///
/// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral),
/// though.
///
/// Example matches 'a', L'a'
/// \code
/// char ch = 'a';
/// wchar_t chw = L'a';
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral>
characterLiteral;
/// Matches integer literals of all sizes / encodings, e.g.
/// 1, 1L, 0x1 and 1U.
///
/// Does not match character-encoded integers such as L'a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral>
integerLiteral;
/// Matches float literals of all sizes / encodings, e.g.
/// 1.0, 1.0f, 1.0L and 1e10.
///
/// Does not match implicit conversions such as
/// \code
/// float a = 10;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral>
floatLiteral;
/// Matches imaginary literals, which are based on integer and floating
/// point literals e.g.: 1i, 1.0i
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral>
imaginaryLiteral;
/// Matches user defined literal operator call.
///
/// Example match: "foo"_suffix
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral>
userDefinedLiteral;
/// Matches compound (i.e. non-scalar) literals
///
/// Example match: {1}, (1, 2)
/// \code
/// int array[4] = {1};
/// vector int myvec = (vector int)(1, 2);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr>
compoundLiteralExpr;
/// Matches nullptr literal.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr>
cxxNullPtrLiteralExpr;
/// Matches GNU __builtin_choose_expr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr>
chooseExpr;
/// Matches GNU __null expression.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr>
gnuNullExpr;
/// Matches atomic builtins.
/// Example matches __atomic_load_n(ptr, 1)
/// \code
/// void foo() { int *ptr; __atomic_load_n(ptr, 1); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr;
/// Matches statement expression (GNU extension).
///
/// Example match: ({ int X = 4; X; })
/// \code
/// int C = ({ int X = 4; X; });
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr;
/// Matches binary operator expressions.
///
/// Example matches a || b
/// \code
/// !(a || b)
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator>
binaryOperator;
/// Matches unary operator expressions.
///
/// Example matches !a
/// \code
/// !a || b
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator>
unaryOperator;
/// Matches conditional operator expressions.
///
/// Example matches a ? b : c
/// \code
/// (a ? b : c) + 42
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator>
conditionalOperator;
/// Matches binary conditional operator expressions (GNU extension).
///
/// Example matches a ?: b
/// \code
/// (a ?: b) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
BinaryConditionalOperator>
binaryConditionalOperator;
/// Matches opaque value expressions. They are used as helpers
/// to reference another expressions and can be met
/// in BinaryConditionalOperators, for example.
///
/// Example matches 'a'
/// \code
/// (a ?: c) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr>
opaqueValueExpr;
/// Matches a C++ static_assert declaration.
///
/// Example:
/// staticAssertExpr()
/// matches
/// static_assert(sizeof(S) == sizeof(int))
/// in
/// \code
/// struct S {
/// int x;
/// };
/// static_assert(sizeof(S) == sizeof(int));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl>
staticAssertDecl;
/// Matches a reinterpret_cast expression.
///
/// Either the source expression or the destination type can be matched
/// using has(), but hasDestinationType() is more specific and can be
/// more readable.
///
/// Example matches reinterpret_cast<char*>(&p) in
/// \code
/// void* p = reinterpret_cast<char*>(&p);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr>
cxxReinterpretCastExpr;
/// Matches a C++ static_cast expression.
///
/// \see hasDestinationType
/// \see reinterpretCast
///
/// Example:
/// cxxStaticCastExpr()
/// matches
/// static_cast<long>(8)
/// in
/// \code
/// long eight(static_cast<long>(8));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr>
cxxStaticCastExpr;
/// Matches a dynamic_cast expression.
///
/// Example:
/// cxxDynamicCastExpr()
/// matches
/// dynamic_cast<D*>(&b);
/// in
/// \code
/// struct B { virtual ~B() {} }; struct D : B {};
/// B b;
/// D* p = dynamic_cast<D*>(&b);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr>
cxxDynamicCastExpr;
/// Matches a const_cast expression.
///
/// Example: Matches const_cast<int*>(&r) in
/// \code
/// int n = 42;
/// const int &r(n);
/// int* p = const_cast<int*>(&r);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr>
cxxConstCastExpr;
/// Matches a C-style cast expression.
///
/// Example: Matches (int) 2.2f in
/// \code
/// int i = (int) 2.2f;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr>
cStyleCastExpr;
/// Matches explicit cast expressions.
///
/// Matches any cast expression written in user code, whether it be a
/// C-style cast, a functional-style cast, or a keyword cast.
///
/// Does not match implicit conversions.
///
/// Note: the name "explicitCast" is chosen to match Clang's terminology, as
/// Clang uses the term "cast" to apply to implicit conversions as well as to
/// actual cast expressions.
///
/// \see hasDestinationType.
///
/// Example: matches all five of the casts in
/// \code
/// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42)))))
/// \endcode
/// but does not match the implicit conversion in
/// \code
/// long ell = 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr>
explicitCastExpr;
/// Matches the implicit cast nodes of Clang's AST.
///
/// This matches many different places, including function call return value
/// eliding, as well as any type conversions.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr>
implicitCastExpr;
/// Matches any cast nodes of Clang's AST.
///
/// Example: castExpr() matches each of the following:
/// \code
/// (int) 3;
/// const_cast<Expr *>(SubExpr);
/// char c = 0;
/// \endcode
/// but does not match
/// \code
/// int i = (0);
/// int k = 0;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr;
/// Matches functional cast expressions
///
/// Example: Matches Foo(bar);
/// \code
/// Foo f = bar;
/// Foo g = (Foo) bar;
/// Foo h = Foo(bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr>
cxxFunctionalCastExpr;
/// Matches functional cast expressions having N != 1 arguments
///
/// Example: Matches Foo(bar, bar)
/// \code
/// Foo h = Foo(bar, bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr>
cxxTemporaryObjectExpr;
/// Matches predefined identifier expressions [C99 6.4.2.2].
///
/// Example: Matches __func__
/// \code
/// printf("%s", __func__);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr>
predefinedExpr;
/// Matches C99 designated initializer expressions [C99 6.7.8].
///
/// Example: Matches { [2].y = 1.0, [0].x = 1.0 }
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr>
designatedInitExpr;
/// Matches designated initializer expressions that contain
/// a specific number of designators.
///
/// Example: Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 };
/// \endcode
/// designatorCountIs(2)
/// matches '{ [2].y = 1.0, [0].x = 1.0 }',
/// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'.
AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches \c QualTypes in the clang AST.
extern const internal::VariadicAllOfMatcher<QualType> qualType;
/// Matches \c Types in the clang AST.
extern const internal::VariadicAllOfMatcher<Type> type;
/// Matches \c TypeLocs in the clang AST.
extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc;
/// Matches if any of the given matchers matches.
///
/// Unlike \c anyOf, \c eachOf will generate a match result for each
/// matching submatcher.
///
/// For example, in:
/// \code
/// class A { int a; int b; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")),
/// has(fieldDecl(hasName("b")).bind("v"))))
/// \endcode
/// will generate two results binding "v", the first of which binds
/// the field declaration of \c a, the second the field declaration of
/// \c b.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
eachOf;
/// Matches if any of the given matchers matches.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
anyOf;
/// Matches if all given matchers match.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
allOf;
/// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL)
///
/// Given
/// \code
/// Foo x = bar;
/// int y = sizeof(x) + alignof(x);
/// \endcode
/// unaryExprOrTypeTraitExpr()
/// matches \c sizeof(x) and \c alignof(x)
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
UnaryExprOrTypeTraitExpr>
unaryExprOrTypeTraitExpr;
/// Matches unary expressions that have a specific type of argument.
///
/// Given
/// \code
/// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c);
/// \endcode
/// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int"))
/// matches \c sizeof(a) and \c alignof(c)
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType ArgumentType = Node.getTypeOfArgument();
return InnerMatcher.matches(ArgumentType, Finder, Builder);
}
/// Matches unary expressions of a certain kind.
///
/// Given
/// \code
/// int x;
/// int s = sizeof(x) + alignof(x)
/// \endcode
/// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf))
/// matches \c sizeof(x)
///
/// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter
/// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf").
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) {
return Node.getKind() == Kind;
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// alignof.
inline internal::Matcher<Stmt> alignOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)),
InnerMatcher)));
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// sizeof.
inline internal::Matcher<Stmt> sizeOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(ofKind(UETT_SizeOf), InnerMatcher)));
}
/// Matches NamedDecl nodes that have the specified name.
///
/// Supports specifying enclosing namespaces or classes by prefixing the name
/// with '<enclosing>::'.
/// Does not match typedefs of an underlying type with the given name.
///
/// Example matches X (Name == "X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X")
/// \code
/// namespace a { namespace b { class X; } }
/// \endcode
inline internal::Matcher<NamedDecl> hasName(const std::string &Name) {
return internal::Matcher<NamedDecl>(new internal::HasNameMatcher({Name}));
}
/// Matches NamedDecl nodes that have any of the specified names.
///
/// This matcher is only provided as a performance optimization of hasName.
/// \code
/// hasAnyName(a, b, c)
/// \endcode
/// is equivalent to, but faster than
/// \code
/// anyOf(hasName(a), hasName(b), hasName(c))
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef,
internal::hasAnyNameFunc>
hasAnyName;
/// Matches NamedDecl nodes whose fully qualified names contain
/// a substring matched by the given RegExp.
///
/// Supports specifying enclosing namespaces or classes by
/// prefixing the name with '<enclosing>::'. Does not match typedefs
/// of an underlying type with the given name.
///
/// Example matches X (regexp == "::X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (regexp is one of "::X", "^foo::.*X", among others)
/// \code
/// namespace foo { namespace bar { class X; } }
/// \endcode
AST_MATCHER_P(NamedDecl, matchesName, std::string, RegExp) {
assert(!RegExp.empty());
std::string FullNameString = "::" + Node.getQualifiedNameAsString();
llvm::Regex RE(RegExp);
return RE.match(FullNameString);
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// Given:
/// \code
/// class A { int operator*(); };
/// const A &operator<<(const A &a, const A &b);
/// A a;
/// a << a; // <-- This matches
/// \endcode
///
/// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the
/// specified line and
/// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*")))
/// matches the declaration of \c A.
///
/// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl>
inline internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, StringRef,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>
hasOverloadedOperatorName(StringRef Name) {
return internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, StringRef,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>(Name);
}
/// Matches C++ classes that are directly or indirectly derived from
/// a class matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, Z, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
AST_MATCHER_P(CXXRecordDecl, isDerivedFrom,
internal::Matcher<NamedDecl>, Base) {
return Finder->classIsDerivedFrom(&Node, Base, Builder);
}
/// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)).
AST_MATCHER_P_OVERLOAD(CXXRecordDecl, isDerivedFrom, std::string, BaseName, 1) {
assert(!BaseName.empty());
return isDerivedFrom(hasName(BaseName)).matches(Node, Finder, Builder);
}
/// Similar to \c isDerivedFrom(), but also matches classes that directly
/// match \c Base.
AST_MATCHER_P_OVERLOAD(CXXRecordDecl, isSameOrDerivedFrom,
internal::Matcher<NamedDecl>, Base, 0) {
return Matcher<CXXRecordDecl>(anyOf(Base, isDerivedFrom(Base)))
.matches(Node, Finder, Builder);
}
/// Overloaded method as shortcut for
/// \c isSameOrDerivedFrom(hasName(...)).
AST_MATCHER_P_OVERLOAD(CXXRecordDecl, isSameOrDerivedFrom, std::string,
BaseName, 1) {
assert(!BaseName.empty());
return isSameOrDerivedFrom(hasName(BaseName)).matches(Node, Finder, Builder);
}
/// Matches the first method of a class or struct that satisfies \c
/// InnerMatcher.
///
/// Given:
/// \code
/// class A { void func(); };
/// class B { void member(); };
/// \endcode
///
/// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of
/// \c A but not \c B.
AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.method_begin(),
Node.method_end(), Finder, Builder);
}
/// Matches the generated class of lambda expressions.
///
/// Given:
/// \code
/// auto x = []{};
/// \endcode
///
/// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of
/// \c decltype(x)
AST_MATCHER(CXXRecordDecl, isLambda) {
return Node.isLambda();
}
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y
/// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X")))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// Usable as: Any Matcher
/// Note that has is direct matcher, so it also matches things like implicit
/// casts and paren casts. If you are matching with expr then you should
/// probably consider using ignoringParenImpCasts like:
/// has(ignoringParenImpCasts(expr())).
extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Z
/// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasDescendantMatcher>
hasDescendant;
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Y::X, Z::Y, Z::Y::X
/// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X")))
/// \code
/// class X {};
/// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X
/// // inside Y.
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// As opposed to 'has', 'forEach' will cause a match for each result that
/// matches instead of only on the first one.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher>
forEach;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, A, A::X, B, B::C, B::C::X
/// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {};
/// class A { class X {}; }; // Matches A, because A::X is a class of name
/// // X inside A.
/// class B { class C { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for
/// each result that matches instead of only on the first one.
///
/// Note: Recursively combined ForEachDescendant can cause many matches:
/// cxxRecordDecl(forEachDescendant(cxxRecordDecl(
/// forEachDescendant(cxxRecordDecl())
/// )))
/// will match 10 times (plus injected class name matches) on:
/// \code
/// class A { class B { class C { class D { class E {}; }; }; }; };
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::ForEachDescendantMatcher>
forEachDescendant;
/// Matches if the node or any descendant matches.
///
/// Generates results for each match.
///
/// For example, in:
/// \code
/// class A { class B {}; class C {}; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(hasName("::A"),
/// findAll(cxxRecordDecl(isDefinition()).bind("m")))
/// \endcode
/// will generate results for \c A, \c B and \c C.
///
/// Usable as: Any Matcher
template <typename T>
internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) {
return eachOf(Matcher, forEachDescendant(Matcher));
}
/// Matches AST nodes that have a parent that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } }
/// \endcode
/// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }".
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasParentMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasParent;
/// Matches AST nodes that have an ancestor that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { if (true) { int x = 42; } }
/// void g() { for (;;) { int x = 43; } }
/// \endcode
/// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasAncestorMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasAncestor;
/// Matches if the provided matcher does not match.
///
/// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X"))))
/// \code
/// class X {};
/// class Y {};
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> unless;
/// Matches a node if the declaration associated with that node
/// matches the given matcher.
///
/// The associated declaration is:
/// - for type nodes, the declaration of the underlying type
/// - for CallExpr, the declaration of the callee
/// - for MemberExpr, the declaration of the referenced member
/// - for CXXConstructExpr, the declaration of the constructor
/// - for CXXNewExpr, the declaration of the operator new
/// - for ObjCIvarExpr, the declaration of the ivar
///
/// For type nodes, hasDeclaration will generally match the declaration of the
/// sugared type. Given
/// \code
/// class X {};
/// typedef X Y;
/// Y y;
/// \endcode
/// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the
/// typedefDecl. A common use case is to match the underlying, desugared type.
/// This can be achieved by using the hasUnqualifiedDesugaredType matcher:
/// \code
/// varDecl(hasType(hasUnqualifiedDesugaredType(
/// recordType(hasDeclaration(decl())))))
/// \endcode
/// In this matcher, the decl will match the CXXRecordDecl of class X.
///
/// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>,
/// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>,
/// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>,
/// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>,
/// Matcher<TagType>, Matcher<TemplateSpecializationType>,
/// Matcher<TemplateTypeParmType>, Matcher<TypedefType>,
/// Matcher<UnresolvedUsingType>
inline internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>
hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) {
return internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>(InnerMatcher);
}
/// Matches a \c NamedDecl whose underlying declaration matches the given
/// matcher.
///
/// Given
/// \code
/// namespace N { template<class T> void f(T t); }
/// template <class T> void g() { using N::f; f(T()); }
/// \endcode
/// \c unresolvedLookupExpr(hasAnyDeclaration(
/// namedDecl(hasUnderlyingDecl(hasName("::N::f")))))
/// matches the use of \c f in \c g() .
AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>,
InnerMatcher) {
const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl();
return UnderlyingDecl != nullptr &&
InnerMatcher.matches(*UnderlyingDecl, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression, after
/// stripping off any parentheses or implicit casts.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y {};
/// void z(Y y, X x) { y.m(); (g()).m(); x.m(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y")))))
/// matches `y.m()` and `(g()).m()`.
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m()`.
/// cxxMemberCallExpr(on(callExpr()))
/// matches `(g()).m()`.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument()
->IgnoreParenImpCasts();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches on the receiver of an ObjectiveC Message expression.
///
/// Example
/// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *")));
/// matches the [webView ...] message invocation.
/// \code
/// NSString *webViewJavaScript = ...
/// UIWebView *webView = ...
/// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>,
InnerMatcher) {
const QualType TypeDecl = Node.getReceiverType();
return InnerMatcher.matches(TypeDecl, Finder, Builder);
}
/// Returns true when the Objective-C message is sent to an instance.
///
/// Example
/// matcher = objcMessagaeExpr(isInstanceMessage())
/// matches
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// but not
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isInstanceMessage) {
return Node.isInstanceMessage();
}
/// Matches if the Objective-C message is sent to an instance,
/// and the inner matcher matches on that instance.
///
/// For example the method call in
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// is matched by
/// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x"))))))
AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ReceiverNode = Node.getInstanceReceiver();
return (ReceiverNode != nullptr &&
InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder,
Builder));
}
/// Matches when BaseName == Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) {
Selector Sel = Node.getSelector();
return BaseName.compare(Sel.getAsString()) == 0;
}
/// Matches when at least one of the supplied string equals to the
/// Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:"));
/// matches both of the expressions below:
/// \code
/// [myObj methodA:argA];
/// [myObj methodB:argB];
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>,
StringRef,
internal::hasAnySelectorFunc>
hasAnySelector;
/// Matches ObjC selectors whose name contains
/// a substring matched by the given RegExp.
/// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, matchesSelector, std::string, RegExp) {
assert(!RegExp.empty());
std::string SelectorString = Node.getSelector().getAsString();
llvm::Regex RE(RegExp);
return RE.match(SelectorString);
}
/// Matches when the selector is the empty selector
///
/// Matches only when the selector of the objCMessageExpr is NULL. This may
/// represent an error condition in the tree!
AST_MATCHER(ObjCMessageExpr, hasNullSelector) {
return Node.getSelector().isNull();
}
/// Matches when the selector is a Unary Selector
///
/// matcher = objCMessageExpr(matchesSelector(hasUnarySelector());
/// matches self.bodyView in the code below, but NOT the outer message
/// invocation of "loadHTMLString:baseURL:".
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasUnarySelector) {
return Node.getSelector().isUnarySelector();
}
/// Matches when the selector is a keyword selector
///
/// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame
/// message expression in
///
/// \code
/// UIWebView *webView = ...;
/// CGRect bodyFrame = webView.frame;
/// bodyFrame.size.height = self.bodyContentHeight;
/// webView.frame = bodyFrame;
/// // ^---- matches here
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) {
return Node.getSelector().isKeywordSelector();
}
/// Matches when the selector has the specified number of arguments
///
/// matcher = objCMessageExpr(numSelectorArgs(0));
/// matches self.bodyView in the code below
///
/// matcher = objCMessageExpr(numSelectorArgs(2));
/// matches the invocation of "loadHTMLString:baseURL:" but not that
/// of self.bodyView
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) {
return Node.getSelector().getNumArgs() == N;
}
/// Matches if the call expression's callee expression matches.
///
/// Given
/// \code
/// class Y { void x() { this->x(); x(); Y y; y.x(); } };
/// void f() { f(); }
/// \endcode
/// callExpr(callee(expr()))
/// matches this->x(), x(), y.x(), f()
/// with callee(...)
/// matching this->x, x, y.x, f respectively
///
/// Note: Callee cannot take the more general internal::Matcher<Expr>
/// because this introduces ambiguous overloads with calls to Callee taking a
/// internal::Matcher<Decl>, as the matcher hierarchy is purely
/// implemented in terms of implicit casts.
AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>,
InnerMatcher) {
const Expr *ExprNode = Node.getCallee();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the call expression's callee's declaration matches the
/// given matcher.
///
/// Example matches y.x() (matcher = callExpr(callee(
/// cxxMethodDecl(hasName("x")))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y y; y.x(); }
/// \endcode
AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher,
1) {
return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder);
}
/// Matches if the expression's or declaration's type matches a type
/// matcher.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and U (matcher = typedefDecl(hasType(asString("int")))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// typedef int U;
/// class Y { friend class X; };
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType,
AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl,
ValueDecl),
internal::Matcher<QualType>, InnerMatcher, 0) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return InnerMatcher.matches(QT, Finder, Builder);
return false;
}
/// Overloaded to match the declaration of the expression's or value
/// declaration's type.
///
/// In case of a value declaration (for example a variable declaration),
/// this resolves one layer of indirection. For example, in the value
/// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of
/// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the
/// declaration of x.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// class Y { friend class X; };
/// \endcode
///
/// Usable as: Matcher<Expr>, Matcher<ValueDecl>
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl),
internal::Matcher<Decl>, InnerMatcher, 1) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder);
return false;
}
/// Matches if the type location of the declarator decl's type matches
/// the inner matcher.
///
/// Given
/// \code
/// int x;
/// \endcode
/// declaratorDecl(hasTypeLoc(loc(asString("int"))))
/// matches int x
AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) {
if (!Node.getTypeSourceInfo())
// This happens for example for implicit destructors.
return false;
return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder);
}
/// Matches if the matched type is represented by the given string.
///
/// Given
/// \code
/// class Y { public: void x(); };
/// void z() { Y* y; y->x(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(asString("class Y *"))))
/// matches y->x()
AST_MATCHER_P(QualType, asString, std::string, Name) {
return Name == Node.getAsString();
}
/// Matches if the matched type is a pointer type and the pointee type
/// matches the specified matcher.
///
/// Example matches y->x()
/// (matcher = cxxMemberCallExpr(on(hasType(pointsTo
/// cxxRecordDecl(hasName("Y")))))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y *y; y->x(); }
/// \endcode
AST_MATCHER_P(
QualType, pointsTo, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isAnyPointerType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Overloaded to match the pointee type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>,
InnerMatcher, 1) {
return pointsTo(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches if the matched type matches the unqualified desugared
/// type of the matched node.
///
/// For example, in:
/// \code
/// class A {};
/// using B = A;
/// \endcode
/// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches
/// both B and A.
AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>,
InnerMatcher) {
return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder,
Builder);
}
/// Matches if the matched type is a reference type and the referenced
/// type matches the specified matcher.
///
/// Example matches X &x and const X &y
/// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X"))))))
/// \code
/// class X {
/// void a(X b) {
/// X &x = b;
/// const X &y = b;
/// }
/// };
/// \endcode
AST_MATCHER_P(QualType, references, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isReferenceType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Matches QualTypes whose canonical type matches InnerMatcher.
///
/// Given:
/// \code
/// typedef int &int_ref;
/// int a;
/// int_ref b = a;
/// \endcode
///
/// \c varDecl(hasType(qualType(referenceType()))))) will not match the
/// declaration of b but \c
/// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does.
AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>,
InnerMatcher) {
if (Node.isNull())
return false;
return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder);
}
/// Overloaded to match the referenced type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>,
InnerMatcher, 1) {
return references(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression. Unlike
/// `on`, matches the argument directly without stripping away anything.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y { void g(); };
/// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); }
/// \endcode
/// cxxMemberCallExpr(onImplicitObjectArgument(hasType(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`.
/// cxxMemberCallExpr(on(callExpr()))
/// does not match `(g()).m()`, because the parens are not ignored.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the type of the expression's implicit object argument either
/// matches the InnerMatcher, or is a pointer to a type that matches the
/// InnerMatcher.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// class X : public Y { void g(); };
/// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); }
/// \endcode
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `p->m()` and `x.m()`.
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("X")))))
/// matches `x.g()`.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<QualType>, InnerMatcher, 0) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Overloaded to match the type's declaration.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<Decl>, InnerMatcher, 1) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Matches a DeclRefExpr that refers to a declaration that matches the
/// specified matcher.
///
/// Example matches x in if(x)
/// (matcher = declRefExpr(to(varDecl(hasName("x")))))
/// \code
/// bool x;
/// if (x) {}
/// \endcode
AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>,
InnerMatcher) {
const Decl *DeclNode = Node.getDecl();
return (DeclNode != nullptr &&
InnerMatcher.matches(*DeclNode, Finder, Builder));
}
/// Matches a \c DeclRefExpr that refers to a declaration through a
/// specific using shadow declaration.
///
/// Given
/// \code
/// namespace a { void f() {} }
/// using a::f;
/// void g() {
/// f(); // Matches this ..
/// a::f(); // .. but not this.
/// }
/// \endcode
/// declRefExpr(throughUsingDecl(anything()))
/// matches \c f()
AST_MATCHER_P(DeclRefExpr, throughUsingDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
const NamedDecl *FoundDecl = Node.getFoundDecl();
if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl))
return InnerMatcher.matches(*UsingDecl, Finder, Builder);
return false;
}
/// Matches an \c OverloadExpr if any of the declarations in the set of
/// overloads matches the given matcher.
///
/// Given
/// \code
/// template <typename T> void foo(T);
/// template <typename T> void bar(T);
/// template <typename T> void baz(T t) {
/// foo(t);
/// bar(t);
/// }
/// \endcode
/// unresolvedLookupExpr(hasAnyDeclaration(
/// functionTemplateDecl(hasName("foo"))))
/// matches \c foo in \c foo(t); but not \c bar in \c bar(t);
AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(),
Node.decls_end(), Finder, Builder);
}
/// Matches the Decl of a DeclStmt which has a single declaration.
///
/// Given
/// \code
/// int a, b;
/// int c;
/// \endcode
/// declStmt(hasSingleDecl(anything()))
/// matches 'int c;' but not 'int a, b;'.
AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) {
if (Node.isSingleDecl()) {
const Decl *FoundDecl = Node.getSingleDecl();
return InnerMatcher.matches(*FoundDecl, Finder, Builder);
}
return false;
}
/// Matches a variable declaration that has an initializer expression
/// that matches the given matcher.
///
/// Example matches x (matcher = varDecl(hasInitializer(callExpr())))
/// \code
/// bool y() { return true; }
/// bool x = y();
/// \endcode
AST_MATCHER_P(
VarDecl, hasInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getAnyInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// \brief Matches a static variable with local scope.
///
/// Example matches y (matcher = varDecl(isStaticLocal()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// static int z;
/// \endcode
AST_MATCHER(VarDecl, isStaticLocal) {
return Node.isStaticLocal();
}
/// Matches a variable declaration that has function scope and is a
/// non-static local variable.
///
/// Example matches x (matcher = varDecl(hasLocalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasLocalStorage) {
return Node.hasLocalStorage();
}
/// Matches a variable declaration that does not have local storage.
///
/// Example matches y and z (matcher = varDecl(hasGlobalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasGlobalStorage) {
return Node.hasGlobalStorage();
}
/// Matches a variable declaration that has automatic storage duration.
///
/// Example matches x, but not y, z, or a.
/// (matcher = varDecl(hasAutomaticStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasAutomaticStorageDuration) {
return Node.getStorageDuration() == SD_Automatic;
}
/// Matches a variable declaration that has static storage duration.
/// It includes the variable declared at namespace scope and those declared
/// with "static" and "extern" storage class specifiers.
///
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// static int b;
/// extern int c;
/// varDecl(hasStaticStorageDuration())
/// matches the function declaration y, a, b and c.
/// \endcode
AST_MATCHER(VarDecl, hasStaticStorageDuration) {
return Node.getStorageDuration() == SD_Static;
}
/// Matches a variable declaration that has thread storage duration.
///
/// Example matches z, but not x, z, or a.
/// (matcher = varDecl(hasThreadStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasThreadStorageDuration) {
return Node.getStorageDuration() == SD_Thread;
}
/// Matches a variable declaration that is an exception variable from
/// a C++ catch block, or an Objective-C \@catch statement.
///
/// Example matches x (matcher = varDecl(isExceptionVariable())
/// \code
/// void f(int y) {
/// try {
/// } catch (int x) {
/// }
/// }
/// \endcode
AST_MATCHER(VarDecl, isExceptionVariable) {
return Node.isExceptionVariable();
}
/// Checks that a call expression or a constructor call expression has
/// a specific number of arguments (including absent default arguments).
///
/// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2)))
/// \code
/// void f(int x, int y);
/// f(0, 0);
/// \endcode
AST_POLYMORPHIC_MATCHER_P(argumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N) {
return Node.getNumArgs() == N;
}
/// Matches the n'th argument of a call expression or a constructor
/// call expression.
///
/// Example matches y in x(y)
/// (matcher = callExpr(hasArgument(0, declRefExpr())))
/// \code
/// void x(int) { int y; x(y); }
/// \endcode
AST_POLYMORPHIC_MATCHER_P2(hasArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N, internal::Matcher<Expr>, InnerMatcher) {
return (N < Node.getNumArgs() &&
InnerMatcher.matches(
*Node.getArg(N)->IgnoreParenImpCasts(), Finder, Builder));
}
/// Matches the n'th item of an initializer list expression.
///
/// Example matches y.
/// (matcher = initListExpr(hasInit(0, expr())))
/// \code
/// int x{y}.
/// \endcode
AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
return N < Node.getNumInits() &&
InnerMatcher.matches(*Node.getInit(N), Finder, Builder);
}
/// Matches declaration statements that contain a specific number of
/// declarations.
///
/// Example: Given
/// \code
/// int a, b;
/// int c;
/// int d = 2, e;
/// \endcode
/// declCountIs(2)
/// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'.
AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) {
return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N;
}
/// Matches the n'th declaration of a declaration statement.
///
/// Note that this does not work for global declarations because the AST
/// breaks up multiple-declaration DeclStmt's into multiple single-declaration
/// DeclStmt's.
/// Example: Given non-global declarations
/// \code
/// int a, b = 0;
/// int c;
/// int d = 2, e;
/// \endcode
/// declStmt(containsDeclaration(
/// 0, varDecl(hasInitializer(anything()))))
/// matches only 'int d = 2, e;', and
/// declStmt(containsDeclaration(1, varDecl()))
/// \code
/// matches 'int a, b = 0' as well as 'int d = 2, e;'
/// but 'int c;' is not matched.
/// \endcode
AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N,
internal::Matcher<Decl>, InnerMatcher) {
const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end());
if (N >= NumDecls)
return false;
DeclStmt::const_decl_iterator Iterator = Node.decl_begin();
std::advance(Iterator, N);
return InnerMatcher.matches(**Iterator, Finder, Builder);
}
/// Matches a C++ catch statement that has a catch-all handler.
///
/// Given
/// \code
/// try {
/// // ...
/// } catch (int) {
/// // ...
/// } catch (...) {
/// // ...
/// }
/// \endcode
/// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int).
AST_MATCHER(CXXCatchStmt, isCatchAll) {
return Node.getExceptionDecl() == nullptr;
}
/// Matches a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(
/// hasAnyConstructorInitializer(anything())
/// )))
/// record matches Foo, hasAnyConstructorInitializer matches foo_(1)
AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.init_begin(),
Node.init_end(), Finder, Builder);
}
/// Matches the field declaration of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// forField(hasName("foo_"))))))
/// matches Foo
/// with forField matching foo_
AST_MATCHER_P(CXXCtorInitializer, forField,
internal::Matcher<FieldDecl>, InnerMatcher) {
const FieldDecl *NodeAsDecl = Node.getAnyMember();
return (NodeAsDecl != nullptr &&
InnerMatcher.matches(*NodeAsDecl, Finder, Builder));
}
/// Matches the initializer expression of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// withInitializer(integerLiteral(equals(1)))))))
/// matches Foo
/// with withInitializer matching (1)
AST_MATCHER_P(CXXCtorInitializer, withInitializer,
internal::Matcher<Expr>, InnerMatcher) {
const Expr* NodeAsExpr = Node.getInit();
return (NodeAsExpr != nullptr &&
InnerMatcher.matches(*NodeAsExpr, Finder, Builder));
}
/// Matches a constructor initializer if it is explicitly written in
/// code (as opposed to implicitly added by the compiler).
///
/// Given
/// \code
/// struct Foo {
/// Foo() { }
/// Foo(int) : foo_("A") { }
/// string foo_;
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten()))
/// will match Foo(int), but not Foo()
AST_MATCHER(CXXCtorInitializer, isWritten) {
return Node.isWritten();
}
/// Matches a constructor initializer if it is initializing a base, as
/// opposed to a member.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer()))
/// will match E(), but not match D(int).
AST_MATCHER(CXXCtorInitializer, isBaseInitializer) {
return Node.isBaseInitializer();
}
/// Matches a constructor initializer if it is initializing a member, as
/// opposed to a base.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer()))
/// will match D(int), but not match E().
AST_MATCHER(CXXCtorInitializer, isMemberInitializer) {
return Node.isMemberInitializer();
}
/// Matches any argument of a call expression or a constructor call
/// expression, or an ObjC-message-send expression.
///
/// Given
/// \code
/// void x(int, int, int) { int y; x(1, y, 42); }
/// \endcode
/// callExpr(hasAnyArgument(declRefExpr()))
/// matches x(1, y, 42)
/// with hasAnyArgument(...)
/// matching y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// void foo(I *i) { [i f:12]; }
/// \endcode
/// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12))))
/// matches [i f:12]
AST_POLYMORPHIC_MATCHER_P(hasAnyArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(
CallExpr, CXXConstructExpr,
CXXUnresolvedConstructExpr, ObjCMessageExpr),
internal::Matcher<Expr>, InnerMatcher) {
for (const Expr *Arg : Node.arguments()) {
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Arg, Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
return false;
}
/// Matches a constructor call expression which uses list initialization.
AST_MATCHER(CXXConstructExpr, isListInitialization) {
return Node.isListInitialization();
}
/// Matches a constructor call expression which requires
/// zero initialization.
///
/// Given
/// \code
/// void foo() {
/// struct point { double x; double y; };
/// point pt[2] = { { 1.0, 2.0 } };
/// }
/// \endcode
/// initListExpr(has(cxxConstructExpr(requiresZeroInitialization()))
/// will match the implicit array filler for pt[1].
AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) {
return Node.requiresZeroInitialization();
}
/// Matches the n'th parameter of a function or an ObjC method
/// declaration or a block.
///
/// Given
/// \code
/// class X { void f(int x) {} };
/// \endcode
/// cxxMethodDecl(hasParameter(0, hasType(varDecl())))
/// matches f(int x) {}
/// with hasParameter(...)
/// matching int x
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasParameter(0, hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P2(hasParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
unsigned, N, internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return (N < Node.parameters().size()
&& InnerMatcher.matches(*Node.parameters()[N], Finder, Builder));
}
/// Matches all arguments and their respective ParmVarDecl.
///
/// Given
/// \code
/// void f(int i);
/// int y;
/// f(y);
/// \endcode
/// callExpr(
/// forEachArgumentWithParam(
/// declRefExpr(to(varDecl(hasName("y")))),
/// parmVarDecl(hasType(isInteger()))
/// ))
/// matches f(y);
/// with declRefExpr(...)
/// matching int y
/// and parmVarDecl(...)
/// matching int i
AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr),
internal::Matcher<Expr>, ArgMatcher,
internal::Matcher<ParmVarDecl>, ParamMatcher) {
BoundNodesTreeBuilder Result;
// The first argument of an overloaded member operator is the implicit object
// argument of the method which should not be matched against a parameter, so
// we skip over it here.
BoundNodesTreeBuilder Matches;
unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl()))
.matches(Node, Finder, &Matches)
? 1
: 0;
int ParamIndex = 0;
bool Matched = false;
for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) {
BoundNodesTreeBuilder ArgMatches(*Builder);
if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()),
Finder, &ArgMatches)) {
BoundNodesTreeBuilder ParamMatches(ArgMatches);
if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl(
hasParameter(ParamIndex, ParamMatcher)))),
callExpr(callee(functionDecl(
hasParameter(ParamIndex, ParamMatcher))))))
.matches(Node, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
}
}
++ParamIndex;
}
*Builder = std::move(Result);
return Matched;
}
/// Matches any parameter of a function or an ObjC method declaration or a
/// block.
///
/// Does not match the 'this' parameter of a method.
///
/// Given
/// \code
/// class X { void f(int x, int y, int z) {} };
/// \endcode
/// cxxMethodDecl(hasAnyParameter(hasName("y")))
/// matches f(int x, int y, int z) {}
/// with hasAnyParameter(...)
/// matching int y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
///
/// For blocks, given
/// \code
/// b = ^(int y) { printf("%d", y) };
/// \endcode
///
/// the matcher blockDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of the block b with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P(hasAnyParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(),
Node.param_end(), Finder, Builder);
}
/// Matches \c FunctionDecls and \c FunctionProtoTypes that have a
/// specific parameter count.
///
/// Given
/// \code
/// void f(int i) {}
/// void g(int i, int j) {}
/// void h(int i, int j);
/// void j(int i);
/// void k(int x, int y, int z, ...);
/// \endcode
/// functionDecl(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(3))
/// matches \c k
AST_POLYMORPHIC_MATCHER_P(parameterCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType),
unsigned, N) {
return Node.getNumParams() == N;
}
/// Matches \c FunctionDecls that have a noreturn attribute.
///
/// Given
/// \code
/// void nope();
/// [[noreturn]] void a();
/// __attribute__((noreturn)) void b();
/// struct c { [[noreturn]] c(); };
/// \endcode
/// functionDecl(isNoReturn())
/// matches all of those except
/// \code
/// void nope();
/// \endcode
AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); }
/// Matches the return type of a function declaration.
///
/// Given:
/// \code
/// class X { int f() { return 1; } };
/// \endcode
/// cxxMethodDecl(returns(asString("int")))
/// matches int f() { return 1; }
AST_MATCHER_P(FunctionDecl, returns,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getReturnType(), Finder, Builder);
}
/// Matches extern "C" function or variable declarations.
///
/// Given:
/// \code
/// extern "C" void f() {}
/// extern "C" { void g() {} }
/// void h() {}
/// extern "C" int x = 1;
/// extern "C" int y = 2;
/// int z = 3;
/// \endcode
/// functionDecl(isExternC())
/// matches the declaration of f and g, but not the declaration of h.
/// varDecl(isExternC())
/// matches the declaration of x and y, but not the declaration of z.
AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.isExternC();
}
/// Matches variable/function declarations that have "static" storage
/// class specifier ("static" keyword) written in the source.
///
/// Given:
/// \code
/// static void f() {}
/// static int i = 0;
/// extern int j;
/// int k;
/// \endcode
/// functionDecl(isStaticStorageClass())
/// matches the function declaration f.
/// varDecl(isStaticStorageClass())
/// matches the variable declaration i.
AST_POLYMORPHIC_MATCHER(isStaticStorageClass,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.getStorageClass() == SC_Static;
}
/// Matches deleted function declarations.
///
/// Given:
/// \code
/// void Func();
/// void DeletedFunc() = delete;
/// \endcode
/// functionDecl(isDeleted())
/// matches the declaration of DeletedFunc, but not Func.
AST_MATCHER(FunctionDecl, isDeleted) {
return Node.isDeleted();
}
/// Matches defaulted function declarations.
///
/// Given:
/// \code
/// class A { ~A(); };
/// class B { ~B() = default; };
/// \endcode
/// functionDecl(isDefaulted())
/// matches the declaration of ~B, but not ~A.
AST_MATCHER(FunctionDecl, isDefaulted) {
return Node.isDefaulted();
}
/// Matches functions that have a dynamic exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() noexcept(true);
/// void i() noexcept(false);
/// void j() throw();
/// void k() throw(int);
/// void l() throw(...);
/// \endcode
/// functionDecl(hasDynamicExceptionSpec()) and
/// functionProtoType(hasDynamicExceptionSpec())
/// match the declarations of j, k, and l, but not f, g, h, or i.
AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node))
return FnTy->hasDynamicExceptionSpec();
return false;
}
/// Matches functions that have a non-throwing exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() throw();
/// void i() throw(int);
/// void j() noexcept(false);
/// \endcode
/// functionDecl(isNoThrow()) and functionProtoType(isNoThrow())
/// match the declarations of g, and h, but not f, i or j.
AST_POLYMORPHIC_MATCHER(isNoThrow,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node);
// If the function does not have a prototype, then it is assumed to be a
// throwing function (as it would if the function did not have any exception
// specification).
if (!FnTy)
return false;
// Assume the best for any unresolved exception specification.
if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType()))
return true;
return FnTy->isNothrow();
}
/// Matches constexpr variable and function declarations,
/// and if constexpr.
///
/// Given:
/// \code
/// constexpr int foo = 42;
/// constexpr int bar();
/// void baz() { if constexpr(1 > 0) {} }
/// \endcode
/// varDecl(isConstexpr())
/// matches the declaration of foo.
/// functionDecl(isConstexpr())
/// matches the declaration of bar.
/// ifStmt(isConstexpr())
/// matches the if statement in baz.
AST_POLYMORPHIC_MATCHER(isConstexpr,
AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl,
FunctionDecl,
IfStmt)) {
return Node.isConstexpr();
}
/// Matches the condition expression of an if statement, for loop,
/// switch statement or conditional operator.
///
/// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true))))
/// \code
/// if (true) {}
/// \endcode
AST_POLYMORPHIC_MATCHER_P(
hasCondition,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt,
SwitchStmt, AbstractConditionalOperator),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const Condition = Node.getCond();
return (Condition != nullptr &&
InnerMatcher.matches(*Condition, Finder, Builder));
}
/// Matches the then-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) true; else false;
/// \endcode
AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Then = Node.getThen();
return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder));
}
/// Matches the else-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) false; else true;
/// \endcode
AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Else = Node.getElse();
return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder));
}
/// Matches if a node equals a previously bound node.
///
/// Matches a node if it equals the node previously bound to \p ID.
///
/// Given
/// \code
/// class X { int a; int b; };
/// \endcode
/// cxxRecordDecl(
/// has(fieldDecl(hasName("a"), hasType(type().bind("t")))),
/// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t"))))))
/// matches the class \c X, as \c a and \c b have the same type.
///
/// Note that when multiple matches are involved via \c forEach* matchers,
/// \c equalsBoundNodes acts as a filter.
/// For example:
/// compoundStmt(
/// forEachDescendant(varDecl().bind("d")),
/// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d"))))))
/// will trigger a match for each combination of variable declaration
/// and reference to that variable declaration within a compound statement.
AST_POLYMORPHIC_MATCHER_P(equalsBoundNode,
AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type,
QualType),
std::string, ID) {
// FIXME: Figure out whether it makes sense to allow this
// on any other node types.
// For *Loc it probably does not make sense, as those seem
// unique. For NestedNameSepcifier it might make sense, as
// those also have pointer identity, but I'm not sure whether
// they're ever reused.
internal::NotEqualsBoundNodePredicate Predicate;
Predicate.ID = ID;
Predicate.Node = ast_type_traits::DynTypedNode::create(Node);
return Builder->removeBindings(Predicate);
}
/// Matches the condition variable statement in an if statement.
///
/// Given
/// \code
/// if (A* a = GetAPointer()) {}
/// \endcode
/// hasConditionVariableStatement(...)
/// matches 'A* a = GetAPointer()'.
AST_MATCHER_P(IfStmt, hasConditionVariableStatement,
internal::Matcher<DeclStmt>, InnerMatcher) {
const DeclStmt* const DeclarationStatement =
Node.getConditionVariableDeclStmt();
return DeclarationStatement != nullptr &&
InnerMatcher.matches(*DeclarationStatement, Finder, Builder);
}
/// Matches the index expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasIndex(integerLiteral()))
/// matches \c i[1] with the \c integerLiteral() matching \c 1
AST_MATCHER_P(ArraySubscriptExpr, hasIndex,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getIdx())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches the base expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasBase(implicitCastExpr(
/// hasSourceExpression(declRefExpr()))))
/// matches \c i[1] with the \c declRefExpr() matching \c i
AST_MATCHER_P(ArraySubscriptExpr, hasBase,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getBase())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches a 'for', 'while', 'do while' statement or a function
/// definition that has a given body.
///
/// Given
/// \code
/// for (;;) {}
/// \endcode
/// hasBody(compoundStmt())
/// matches 'for (;;) {}'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasBody,
AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt,
WhileStmt,
CXXForRangeStmt,
FunctionDecl),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node);
return (Statement != nullptr &&
InnerMatcher.matches(*Statement, Finder, Builder));
}
/// Matches compound statements where at least one substatement matches
/// a given matcher. Also matches StmtExprs that have CompoundStmt as children.
///
/// Given
/// \code
/// { {}; 1+2; }
/// \endcode
/// hasAnySubstatement(compoundStmt())
/// matches '{ {}; 1+2; }'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt,
StmtExpr),
internal::Matcher<Stmt>, InnerMatcher) {
const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node);
return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(),
CS->body_end(), Finder, Builder);
}
/// Checks that a compound statement contains a specific number of
/// child statements.
///
/// Example: Given
/// \code
/// { for (;;) {} }
/// \endcode
/// compoundStmt(statementCountIs(0)))
/// matches '{}'
/// but does not match the outer compound statement.
AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches literals that are equal to the given value of type ValueT.
///
/// Given
/// \code
/// f('\0', false, 3.14, 42);
/// \endcode
/// characterLiteral(equals(0))
/// matches '\0'
/// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0))
/// match false
/// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2))
/// match 3.14
/// integerLiteral(equals(42))
/// matches 42
///
/// Note that you cannot directly match a negative numeric literal because the
/// minus sign is not part of the literal: It is a unary operator whose operand
/// is the positive numeric literal. Instead, you must use a unaryOperator()
/// matcher to match the minus sign:
///
/// unaryOperator(hasOperatorName("-"),
/// hasUnaryOperand(integerLiteral(equals(13))))
///
/// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>,
/// Matcher<FloatingLiteral>, Matcher<IntegerLiteral>
template <typename ValueT>
internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT>
equals(const ValueT &Value) {
return internal::PolymorphicMatcherWithParam1<
internal::ValueEqualsMatcher,
ValueT>(Value);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
bool, Value, 0) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
unsigned, Value, 1) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
FloatingLiteral,
IntegerLiteral),
double, Value, 2) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
/// Matches the operator Name of operator expressions (binary or
/// unary).
///
/// Example matches a || b (matcher = binaryOperator(hasOperatorName("||")))
/// \code
/// !(a || b)
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasOperatorName,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
UnaryOperator),
std::string, Name) {
return Name == Node.getOpcodeStr(Node.getOpcode());
}
/// Matches all kinds of assignment operators.
///
/// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 = s2
/// (matcher = cxxOperatorCallExpr(isAssignmentOperator()))
/// \code
/// struct S { S& operator=(const S&); };
/// void x() { S s1, s2; s1 = s2; })
/// \endcode
AST_POLYMORPHIC_MATCHER(isAssignmentOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
CXXOperatorCallExpr)) {
return Node.isAssignmentOp();
}
/// Matches the left hand side of binary operator expressions.
///
/// Example matches a (matcher = binaryOperator(hasLHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasLHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *LeftHandSide = Node.getLHS();
return (LeftHandSide != nullptr &&
InnerMatcher.matches(*LeftHandSide, Finder, Builder));
}
/// Matches the right hand side of binary operator expressions.
///
/// Example matches b (matcher = binaryOperator(hasRHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasRHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *RightHandSide = Node.getRHS();
return (RightHandSide != nullptr &&
InnerMatcher.matches(*RightHandSide, Finder, Builder));
}
/// Matches if either the left hand side or the right hand side of a
/// binary operator matches.
inline internal::Matcher<BinaryOperator> hasEitherOperand(
const internal::Matcher<Expr> &InnerMatcher) {
return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher));
}
/// Matches if the operand of a unary operator matches.
///
/// Example matches true (matcher = hasUnaryOperand(
/// cxxBoolLiteral(equals(true))))
/// \code
/// !true
/// \endcode
AST_MATCHER_P(UnaryOperator, hasUnaryOperand,
internal::Matcher<Expr>, InnerMatcher) {
const Expr * const Operand = Node.getSubExpr();
return (Operand != nullptr &&
InnerMatcher.matches(*Operand, Finder, Builder));
}
/// Matches if the cast's source expression
/// or opaque value's source expression matches the given matcher.
///
/// Example 1: matches "a string"
/// (matcher = castExpr(hasSourceExpression(cxxConstructExpr())))
/// \code
/// class URL { URL(string); };
/// URL url = "a string";
/// \endcode
///
/// Example 2: matches 'b' (matcher =
/// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr())))
/// \code
/// int a = b ?: 1;
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasSourceExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr,
OpaqueValueExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const SubExpression =
internal::GetSourceExpressionMatcher<NodeType>::get(Node);
return (SubExpression != nullptr &&
InnerMatcher.matches(*SubExpression, Finder, Builder));
}
/// Matches casts that has a given cast kind.
///
/// Example: matches the implicit cast around \c 0
/// (matcher = castExpr(hasCastKind(CK_NullToPointer)))
/// \code
/// int *p = 0;
/// \endcode
///
/// If the matcher is use from clang-query, CastKind parameter
/// should be passed as a quoted string. e.g., ofKind("CK_NullToPointer").
AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) {
return Node.getCastKind() == Kind;
}
/// Matches casts whose destination type matches a given matcher.
///
/// (Note: Clang's AST refers to other conversions as "casts" too, and calls
/// actual casts "explicit" casts.)
AST_MATCHER_P(ExplicitCastExpr, hasDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType NodeType = Node.getTypeAsWritten();
return InnerMatcher.matches(NodeType, Finder, Builder);
}
/// Matches implicit casts whose destination type matches a given
/// matcher.
///
/// FIXME: Unit test this matcher
AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getType(), Finder, Builder);
}
/// Matches RecordDecl object that are spelled with "struct."
///
/// Example matches S, but not C or U.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// \endcode
AST_MATCHER(RecordDecl, isStruct) {
return Node.isStruct();
}
/// Matches RecordDecl object that are spelled with "union."
///
/// Example matches U, but not C or S.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// \endcode
AST_MATCHER(RecordDecl, isUnion) {
return Node.isUnion();
}
/// Matches RecordDecl object that are spelled with "class."
///
/// Example matches C, but not S or U.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// \endcode
AST_MATCHER(RecordDecl, isClass) {
return Node.isClass();
}
/// Matches the true branch expression of a conditional operator.
///
/// Example 1 (conditional ternary operator): matches a
/// \code
/// condition ? a : b
/// \endcode
///
/// Example 2 (conditional binary operator): matches opaqueValueExpr(condition)
/// \code
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getTrueExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches the false branch expression of a conditional operator
/// (binary or ternary).
///
/// Example matches b
/// \code
/// condition ? a : b
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getFalseExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches if a declaration has a body attached.
///
/// Example matches A, va, fa
/// \code
/// class A {};
/// class B; // Doesn't match, as it has no body.
/// int va;
/// extern int vb; // Doesn't match, as it doesn't define the variable.
/// void fa() {}
/// void fb(); // Doesn't match, as it has no body.
/// @interface X
/// - (void)ma; // Doesn't match, interface is declaration.
/// @end
/// @implementation X
/// - (void)ma {}
/// @end
/// \endcode
///
/// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>,
/// Matcher<ObjCMethodDecl>
AST_POLYMORPHIC_MATCHER(isDefinition,
AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl,
ObjCMethodDecl,
FunctionDecl)) {
return Node.isThisDeclarationADefinition();
}
/// Matches if a function declaration is variadic.
///
/// Example matches f, but not g or h. The function i will not match, even when
/// compiled in C mode.
/// \code
/// void f(...);
/// void g(int);
/// template <typename... Ts> void h(Ts...);
/// void i();
/// \endcode
AST_MATCHER(FunctionDecl, isVariadic) {
return Node.isVariadic();
}
/// Matches the class declaration that the given method declaration
/// belongs to.
///
/// FIXME: Generalize this for other kinds of declarations.
/// FIXME: What other kind of declarations would we need to generalize
/// this to?
///
/// Example matches A() in the last line
/// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl(
/// ofClass(hasName("A"))))))
/// \code
/// class A {
/// public:
/// A();
/// };
/// A a = A();
/// \endcode
AST_MATCHER_P(CXXMethodDecl, ofClass,
internal::Matcher<CXXRecordDecl>, InnerMatcher) {
const CXXRecordDecl *Parent = Node.getParent();
return (Parent != nullptr &&
InnerMatcher.matches(*Parent, Finder, Builder));
}
/// Matches each method overridden by the given method. This matcher may
/// produce multiple matches.
///
/// Given
/// \code
/// class A { virtual void f(); };
/// class B : public A { void f(); };
/// class C : public B { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note
/// that B::f is not overridden by C::f).
///
/// The check can produce multiple matches in case of multiple inheritance, e.g.
/// \code
/// class A1 { virtual void f(); };
/// class A2 { virtual void f(); };
/// class C : public A1, public A2 { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and
/// once with "b" binding "A2::f" and "d" binding "C::f".
AST_MATCHER_P(CXXMethodDecl, forEachOverridden,
internal::Matcher<CXXMethodDecl>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *Overridden : Node.overridden_methods()) {
BoundNodesTreeBuilder OverriddenBuilder(*Builder);
const bool OverriddenMatched =
InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder);
if (OverriddenMatched) {
Matched = true;
Result.addMatch(OverriddenBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches if the given method declaration is virtual.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// \endcode
/// matches A::x
AST_MATCHER(CXXMethodDecl, isVirtual) {
return Node.isVirtual();
}
/// Matches if the given method declaration has an explicit "virtual".
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// void x();
/// };
/// \endcode
/// matches A::x but not B::x
AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) {
return Node.isVirtualAsWritten();
}
/// Matches if the given method or class declaration is final.
///
/// Given:
/// \code
/// class A final {};
///
/// struct B {
/// virtual void f();
/// };
///
/// struct C : B {
/// void f() final;
/// };
/// \endcode
/// matches A and C::f, but not B, C, or B::f
AST_POLYMORPHIC_MATCHER(isFinal,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl,
CXXMethodDecl)) {
return Node.template hasAttr<FinalAttr>();
}
/// Matches if the given method declaration is pure.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x() = 0;
/// };
/// \endcode
/// matches A::x
AST_MATCHER(CXXMethodDecl, isPure) {
return Node.isPure();
}
/// Matches if the given method declaration is const.
///
/// Given
/// \code
/// struct A {
/// void foo() const;
/// void bar();
/// };
/// \endcode
///
/// cxxMethodDecl(isConst()) matches A::foo() but not A::bar()
AST_MATCHER(CXXMethodDecl, isConst) {
return Node.isConst();
}
/// Matches if the given method declaration declares a copy assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not
/// the second one.
AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) {
return Node.isCopyAssignmentOperator();
}
/// Matches if the given method declaration declares a move assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not
/// the first one.
AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) {
return Node.isMoveAssignmentOperator();
}
/// Matches if the given method declaration overrides another method.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// virtual void x();
/// };
/// \endcode
/// matches B::x
AST_MATCHER(CXXMethodDecl, isOverride) {
return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>();
}
/// Matches method declarations that are user-provided.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &) = default; // #2
/// S(S &&) = delete; // #3
/// };
/// \endcode
/// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3.
AST_MATCHER(CXXMethodDecl, isUserProvided) {
return Node.isUserProvided();
}
/// Matches member expressions that are called with '->' as opposed
/// to '.'.
///
/// Member calls on the implicit this pointer match as called with '->'.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// template <class T> void f() { this->f<T>(); f<T>(); }
/// int a;
/// static int b;
/// };
/// template <class T>
/// class Z {
/// void x() { this->m; }
/// };
/// \endcode
/// memberExpr(isArrow())
/// matches this->x, x, y.x, a, this->b
/// cxxDependentScopeMemberExpr(isArrow())
/// matches this->m
/// unresolvedMemberExpr(isArrow())
/// matches this->f<T>, f<T>
AST_POLYMORPHIC_MATCHER(
isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr)) {
return Node.isArrow();
}
/// Matches QualType nodes that are of integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isInteger())))
/// matches "a(int)", "b(long)", but not "c(double)".
AST_MATCHER(QualType, isInteger) {
return Node->isIntegerType();
}
/// Matches QualType nodes that are of unsigned integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isUnsignedInteger())))
/// matches "b(unsigned long)", but not "a(int)" and "c(double)".
AST_MATCHER(QualType, isUnsignedInteger) {
return Node->isUnsignedIntegerType();
}
/// Matches QualType nodes that are of signed integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isSignedInteger())))
/// matches "a(int)", but not "b(unsigned long)" and "c(double)".
AST_MATCHER(QualType, isSignedInteger) {
return Node->isSignedIntegerType();
}
/// Matches QualType nodes that are of character type.
///
/// Given
/// \code
/// void a(char);
/// void b(wchar_t);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isAnyCharacter())))
/// matches "a(char)", "b(wchar_t)", but not "c(double)".
AST_MATCHER(QualType, isAnyCharacter) {
return Node->isAnyCharacterType();
}
/// Matches QualType nodes that are of any pointer type; this includes
/// the Objective-C object pointer type, which is different despite being
/// syntactically similar.
///
/// Given
/// \code
/// int *i = nullptr;
///
/// @interface Foo
/// @end
/// Foo *f;
///
/// int j;
/// \endcode
/// varDecl(hasType(isAnyPointer()))
/// matches "int *i" and "Foo *f", but not "int j".
AST_MATCHER(QualType, isAnyPointer) {
return Node->isAnyPointerType();
}
/// Matches QualType nodes that are const-qualified, i.e., that
/// include "top-level" const.
///
/// Given
/// \code
/// void a(int);
/// void b(int const);
/// void c(const int);
/// void d(const int*);
/// void e(int const) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isConstQualified())))
/// matches "void b(int const)", "void c(const int)" and
/// "void e(int const) {}". It does not match d as there
/// is no top-level const on the parameter type "const int *".
AST_MATCHER(QualType, isConstQualified) {
return Node.isConstQualified();
}
/// Matches QualType nodes that are volatile-qualified, i.e., that
/// include "top-level" volatile.
///
/// Given
/// \code
/// void a(int);
/// void b(int volatile);
/// void c(volatile int);
/// void d(volatile int*);
/// void e(int volatile) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isVolatileQualified())))
/// matches "void b(int volatile)", "void c(volatile int)" and
/// "void e(int volatile) {}". It does not match d as there
/// is no top-level volatile on the parameter type "volatile int *".
AST_MATCHER(QualType, isVolatileQualified) {
return Node.isVolatileQualified();
}
/// Matches QualType nodes that have local CV-qualifiers attached to
/// the node, not hidden within a typedef.
///
/// Given
/// \code
/// typedef const int const_int;
/// const_int i;
/// int *const j;
/// int *volatile k;
/// int m;
/// \endcode
/// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k.
/// \c i is const-qualified but the qualifier is not local.
AST_MATCHER(QualType, hasLocalQualifiers) {
return Node.hasLocalQualifiers();
}
/// Matches a member expression where the member is matched by a
/// given matcher.
///
/// Given
/// \code
/// struct { int first, second; } first, second;
/// int i(second.first);
/// int j(first.second);
/// \endcode
/// memberExpr(member(hasName("first")))
/// matches second.first
/// but not first.second (because the member name there is "second").
AST_MATCHER_P(MemberExpr, member,
internal::Matcher<ValueDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder);
}
/// Matches a member expression where the object expression is matched by a
/// given matcher. Implicit object expressions are included; that is, it matches
/// use of implicit `this`.
///
/// Given
/// \code
/// struct X {
/// int m;
/// int f(X x) { x.m; return m; }
/// };
/// \endcode
/// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m`, but not `m`; however,
/// memberExpr(hasObjectExpression(hasType(pointsTo(
// cxxRecordDecl(hasName("X"))))))
/// matches `m` (aka. `this->m`), but not `x.m`.
AST_POLYMORPHIC_MATCHER_P(
hasObjectExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr),
internal::Matcher<Expr>, InnerMatcher) {
if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
return InnerMatcher.matches(*Node.getBase(), Finder, Builder);
}
/// Matches any using shadow declaration.
///
/// Given
/// \code
/// namespace X { void b(); }
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasName("b"))))
/// matches \code using X::b \endcode
AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(),
Node.shadow_end(), Finder, Builder);
}
/// Matches a using shadow declaration where the target declaration is
/// matched by the given matcher.
///
/// Given
/// \code
/// namespace X { int a; void b(); }
/// using X::a;
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl())))
/// matches \code using X::b \endcode
/// but not \code using X::a \endcode
AST_MATCHER_P(UsingShadowDecl, hasTargetDecl,
internal::Matcher<NamedDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder);
}
/// Matches template instantiations of function, class, or static
/// member variable template instantiations.
///
/// Given
/// \code
/// template <typename T> class X {}; class A {}; X<A> x;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; template class X<A>;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; extern template class X<A>;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// matches the template instantiation of X<A>.
///
/// But given
/// \code
/// template <typename T> class X {}; class A {};
/// template <> class X<A> {}; X<A> x;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// does not match, as X<A> is an explicit template specialization.
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isTemplateInstantiation,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDefinition ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDeclaration);
}
/// Matches declarations that are template instantiations or are inside
/// template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { T i; }
/// A(0);
/// A(0U);
/// \endcode
/// functionDecl(isInstantiated())
/// matches 'A(int) {...};' and 'A(unsigned) {...}'.
AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) {
auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())));
return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation)));
}
/// Matches statements inside of a template instantiation.
///
/// Given
/// \code
/// int j;
/// template<typename T> void A(T t) { T i; j += 42;}
/// A(0);
/// A(0U);
/// \endcode
/// declStmt(isInTemplateInstantiation())
/// matches 'int i;' and 'unsigned i'.
/// unless(stmt(isInTemplateInstantiation()))
/// will NOT match j += 42; as it's shared between the template definition and
/// instantiation.
AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) {
return stmt(
hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())))));
}
/// Matches explicit template specializations of function, class, or
/// static member variable template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { }
/// template<> void A(int N) { }
/// \endcode
/// functionDecl(isExplicitTemplateSpecialization())
/// matches the specialization A<int>().
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization);
}
/// Matches \c TypeLocs for which the given inner
/// QualType-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc,
internal::Matcher<QualType>, InnerMatcher, 0) {
return internal::BindableMatcher<TypeLoc>(
new internal::TypeLocTypeMatcher(InnerMatcher));
}
/// Matches type \c bool.
///
/// Given
/// \code
/// struct S { bool func(); };
/// \endcode
/// functionDecl(returns(booleanType()))
/// matches "bool func();"
AST_MATCHER(Type, booleanType) {
return Node.isBooleanType();
}
/// Matches type \c void.
///
/// Given
/// \code
/// struct S { void func(); };
/// \endcode
/// functionDecl(returns(voidType()))
/// matches "void func();"
AST_MATCHER(Type, voidType) {
return Node.isVoidType();
}
template <typename NodeType>
using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>;
/// Matches builtin Types.
///
/// Given
/// \code
/// struct A {};
/// A a;
/// int b;
/// float c;
/// bool d;
/// \endcode
/// builtinType()
/// matches "int b", "float c" and "bool d"
extern const AstTypeMatcher<BuiltinType> builtinType;
/// Matches all kinds of arrays.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[4];
/// void f() { int c[a[0]]; }
/// \endcode
/// arrayType()
/// matches "int a[]", "int b[4]" and "int c[a[0]]";
extern const AstTypeMatcher<ArrayType> arrayType;
/// Matches C99 complex types.
///
/// Given
/// \code
/// _Complex float f;
/// \endcode
/// complexType()
/// matches "_Complex float f"
extern const AstTypeMatcher<ComplexType> complexType;
/// Matches any real floating-point type (float, double, long double).
///
/// Given
/// \code
/// int i;
/// float f;
/// \endcode
/// realFloatingPointType()
/// matches "float f" but not "int i"
AST_MATCHER(Type, realFloatingPointType) {
return Node.isRealFloatingType();
}
/// Matches arrays and C99 complex types that have a specific element
/// type.
///
/// Given
/// \code
/// struct A {};
/// A a[7];
/// int b[7];
/// \endcode
/// arrayType(hasElementType(builtinType()))
/// matches "int b[7]"
///
/// Usable as: Matcher<ArrayType>, Matcher<ComplexType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement,
AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType,
ComplexType));
/// Matches C arrays with a specified constant size.
///
/// Given
/// \code
/// void() {
/// int a[2];
/// int b[] = { 2, 3 };
/// int c[b[0]];
/// }
/// \endcode
/// constantArrayType()
/// matches "int a[2]"
extern const AstTypeMatcher<ConstantArrayType> constantArrayType;
/// Matches nodes that have the specified size.
///
/// Given
/// \code
/// int a[42];
/// int b[2 * 21];
/// int c[41], d[43];
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// char *w = "a";
/// \endcode
/// constantArrayType(hasSize(42))
/// matches "int a[42]" and "int b[2 * 21]"
/// stringLiteral(hasSize(4))
/// matches "abcd", L"abcd"
AST_POLYMORPHIC_MATCHER_P(hasSize,
AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType,
StringLiteral),
unsigned, N) {
return internal::HasSizeMatcher<NodeType>::hasSize(Node, N);
}
/// Matches C++ arrays whose size is a value-dependent expression.
///
/// Given
/// \code
/// template<typename T, int Size>
/// class array {
/// T data[Size];
/// };
/// \endcode
/// dependentSizedArrayType
/// matches "T data[Size]"
extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType;
/// Matches C arrays with unspecified size.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[42];
/// void f(int c[]) { int d[a[0]]; };
/// \endcode
/// incompleteArrayType()
/// matches "int a[]" and "int c[]"
extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType;
/// Matches C arrays with a specified size that is not an
/// integer-constant-expression.
///
/// Given
/// \code
/// void f() {
/// int a[] = { 2, 3 }
/// int b[42];
/// int c[a[0]];
/// }
/// \endcode
/// variableArrayType()
/// matches "int c[a[0]]"
extern const AstTypeMatcher<VariableArrayType> variableArrayType;
/// Matches \c VariableArrayType nodes that have a specific size
/// expression.
///
/// Given
/// \code
/// void f(int b) {
/// int a[b];
/// }
/// \endcode
/// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to(
/// varDecl(hasName("b")))))))
/// matches "int a[b]"
AST_MATCHER_P(VariableArrayType, hasSizeExpr,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder);
}
/// Matches atomic types.
///
/// Given
/// \code
/// _Atomic(int) i;
/// \endcode
/// atomicType()
/// matches "_Atomic(int) i"
extern const AstTypeMatcher<AtomicType> atomicType;
/// Matches atomic types with a specific value type.
///
/// Given
/// \code
/// _Atomic(int) i;
/// _Atomic(float) f;
/// \endcode
/// atomicType(hasValueType(isInteger()))
/// matches "_Atomic(int) i"
///
/// Usable as: Matcher<AtomicType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue,
AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType));
/// Matches types nodes representing C++11 auto types.
///
/// Given:
/// \code
/// auto n = 4;
/// int v[] = { 2, 3 }
/// for (auto i : v) { }
/// \endcode
/// autoType()
/// matches "auto n" and "auto i"
extern const AstTypeMatcher<AutoType> autoType;
/// Matches types nodes representing C++11 decltype(<expr>) types.
///
/// Given:
/// \code
/// short i = 1;
/// int j = 42;
/// decltype(i + j) result = i + j;
/// \endcode
/// decltypeType()
/// matches "decltype(i + j)"
extern const AstTypeMatcher<DecltypeType> decltypeType;
/// Matches \c AutoType nodes where the deduced type is a specific type.
///
/// Note: There is no \c TypeLoc for the deduced type and thus no
/// \c getDeducedLoc() matcher.
///
/// Given
/// \code
/// auto a = 1;
/// auto b = 2.0;
/// \endcode
/// autoType(hasDeducedType(isInteger()))
/// matches "auto a"
///
/// Usable as: Matcher<AutoType>
AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType,
AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType));
/// Matches \c DecltypeType nodes to find out the underlying type.
///
/// Given
/// \code
/// decltype(1) a = 1;
/// decltype(2.0) b = 2.0;
/// \endcode
/// decltypeType(hasUnderlyingType(isInteger()))
/// matches the type of "a"
///
/// Usable as: Matcher<DecltypeType>
AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType,
AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType));
/// Matches \c FunctionType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionType()
/// matches "int (*f)(int)" and the type of "g".
extern const AstTypeMatcher<FunctionType> functionType;
/// Matches \c FunctionProtoType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionProtoType()
/// matches "int (*f)(int)" and the type of "g" in C++ mode.
/// In C mode, "g" is not matched because it does not contain a prototype.
extern const AstTypeMatcher<FunctionProtoType> functionProtoType;
/// Matches \c ParenType nodes.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int *array_of_ptrs[4];
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not
/// \c array_of_ptrs.
extern const AstTypeMatcher<ParenType> parenType;
/// Matches \c ParenType nodes where the inner type is a specific type.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int (*ptr_to_func)(int);
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches
/// \c ptr_to_func but not \c ptr_to_array.
///
/// Usable as: Matcher<ParenType>
AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType,
AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType));
/// Matches block pointer types, i.e. types syntactically represented as
/// "void (^)(int)".
///
/// The \c pointee is always required to be a \c FunctionType.
extern const AstTypeMatcher<BlockPointerType> blockPointerType;
/// Matches member pointer types.
/// Given
/// \code
/// struct A { int i; }
/// A::* ptr = A::i;
/// \endcode
/// memberPointerType()
/// matches "A::* ptr"
extern const AstTypeMatcher<MemberPointerType> memberPointerType;
/// Matches pointer types, but does not match Objective-C object pointer
/// types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int c = 5;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "int *a", but does not match "Foo *f".
extern const AstTypeMatcher<PointerType> pointerType;
/// Matches an Objective-C object pointer type, which is different from
/// a pointer type, despite being syntactically similar.
///
/// Given
/// \code
/// int *a;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "Foo *f", but does not match "int *a".
extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType;
/// Matches both lvalue and rvalue reference types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f.
extern const AstTypeMatcher<ReferenceType> referenceType;
/// Matches lvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is
/// matched since the type is deduced as int& by reference collapsing rules.
extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType;
/// Matches rvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not
/// matched as it is deduced to int& by reference collapsing rules.
extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType;
/// Narrows PointerType (and similar) matchers to those where the
/// \c pointee matches a given matcher.
///
/// Given
/// \code
/// int *a;
/// int const *b;
/// float const *f;
/// \endcode
/// pointerType(pointee(isConstQualified(), isInteger()))
/// matches "int const *b"
///
/// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>,
/// Matcher<PointerType>, Matcher<ReferenceType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(
pointee, getPointee,
AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType,
PointerType, ReferenceType));
/// Matches typedef types.
///
/// Given
/// \code
/// typedef int X;
/// \endcode
/// typedefType()
/// matches "typedef int X"
extern const AstTypeMatcher<TypedefType> typedefType;
/// Matches enum types.
///
/// Given
/// \code
/// enum C { Green };
/// enum class S { Red };
///
/// C c;
/// S s;
/// \endcode
//
/// \c enumType() matches the type of the variable declarations of both \c c and
/// \c s.
extern const AstTypeMatcher<EnumType> enumType;
/// Matches template specialization types.
///
/// Given
/// \code
/// template <typename T>
/// class C { };
///
/// template class C<int>; // A
/// C<char> var; // B
/// \endcode
///
/// \c templateSpecializationType() matches the type of the explicit
/// instantiation in \c A and the type of the variable declaration in \c B.
extern const AstTypeMatcher<TemplateSpecializationType>
templateSpecializationType;
/// Matches types nodes representing unary type transformations.
///
/// Given:
/// \code
/// typedef __underlying_type(T) type;
/// \endcode
/// unaryTransformType()
/// matches "__underlying_type(T)"
extern const AstTypeMatcher<UnaryTransformType> unaryTransformType;
/// Matches record types (e.g. structs, classes).
///
/// Given
/// \code
/// class C {};
/// struct S {};
///
/// C c;
/// S s;
/// \endcode
///
/// \c recordType() matches the type of the variable declarations of both \c c
/// and \c s.
extern const AstTypeMatcher<RecordType> recordType;
/// Matches tag types (record and enum types).
///
/// Given
/// \code
/// enum E {};
/// class C {};
///
/// E e;
/// C c;
/// \endcode
///
/// \c tagType() matches the type of the variable declarations of both \c e
/// and \c c.
extern const AstTypeMatcher<TagType> tagType;
/// Matches types specified with an elaborated type keyword or with a
/// qualified name.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// class C {};
///
/// class C c;
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType() matches the type of the variable declarations of both
/// \c c and \c d.
extern const AstTypeMatcher<ElaboratedType> elaboratedType;
/// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier,
/// matches \c InnerMatcher if the qualifier exists.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N"))))
/// matches the type of the variable declaration of \c d.
AST_MATCHER_P(ElaboratedType, hasQualifier,
internal::Matcher<NestedNameSpecifier>, InnerMatcher) {
if (const NestedNameSpecifier *Qualifier = Node.getQualifier())
return InnerMatcher.matches(*Qualifier, Finder, Builder);
return false;
}
/// Matches ElaboratedTypes whose named type matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(namesType(recordType(
/// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable
/// declaration of \c d.
AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>,
InnerMatcher) {
return InnerMatcher.matches(Node.getNamedType(), Finder, Builder);
}
/// Matches types that represent the result of substituting a type for a
/// template type parameter.
///
/// Given
/// \code
/// template <typename T>
/// void F(T t) {
/// int i = 1 + t;
/// }
/// \endcode
///
/// \c substTemplateTypeParmType() matches the type of 't' but not '1'
extern const AstTypeMatcher<SubstTemplateTypeParmType>
substTemplateTypeParmType;
/// Matches template type parameter substitutions that have a replacement
/// type that matches the provided matcher.
///
/// Given
/// \code
/// template <typename T>
/// double F(T t);
/// int i;
/// double j = F(i);
/// \endcode
///
/// \c substTemplateTypeParmType(hasReplacementType(type())) matches int
AST_TYPE_TRAVERSE_MATCHER(
hasReplacementType, getReplacementType,
AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType));
/// Matches template type parameter types.
///
/// Example matches T, but not int.
/// (matcher = templateTypeParmType())
/// \code
/// template <typename T> void f(int i);
/// \endcode
extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType;
/// Matches injected class name types.
///
/// Example matches S s, but not S<T> s.
/// (matcher = parmVarDecl(hasType(injectedClassNameType())))
/// \code
/// template <typename T> struct S {
/// void f(S s);
/// void g(S<T> s);
/// };
/// \endcode
extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType;
/// Matches decayed type
/// Example matches i[] in declaration of f.
/// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType())))))
/// Example matches i[1].
/// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType())))))
/// \code
/// void f(int i[]) {
/// i[1] = 0;
/// }
/// \endcode
extern const AstTypeMatcher<DecayedType> decayedType;
/// Matches the decayed type, whos decayed type matches \c InnerMatcher
AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>,
InnerType) {
return InnerType.matches(Node.getDecayedType(), Finder, Builder);
}
/// Matches declarations whose declaration context, interpreted as a
/// Decl, matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// \endcode
///
/// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the
/// declaration of \c class \c D.
AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) {
const DeclContext *DC = Node.getDeclContext();
if (!DC) return false;
return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder);
}
/// Matches nested name specifiers.
///
/// Given
/// \code
/// namespace ns {
/// struct A { static void f(); };
/// void A::f() {}
/// void g() { A::f(); }
/// }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier()
/// matches "ns::" and both "A::"
extern const internal::VariadicAllOfMatcher<NestedNameSpecifier>
nestedNameSpecifier;
/// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc.
extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc>
nestedNameSpecifierLoc;
/// Matches \c NestedNameSpecifierLocs for which the given inner
/// NestedNameSpecifier-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(
internal::BindableMatcher<NestedNameSpecifierLoc>, loc,
internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) {
return internal::BindableMatcher<NestedNameSpecifierLoc>(
new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>(
InnerMatcher));
}
/// Matches nested name specifiers that specify a type matching the
/// given \c QualType matcher without qualifiers.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(specifiesType(
/// hasDeclaration(cxxRecordDecl(hasName("A")))
/// ))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifier, specifiesType,
internal::Matcher<QualType>, InnerMatcher) {
if (!Node.getAsType())
return false;
return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder);
}
/// Matches nested name specifier locs that specify a type matching the
/// given \c TypeLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type(
/// hasDeclaration(cxxRecordDecl(hasName("A")))))))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc,
internal::Matcher<TypeLoc>, InnerMatcher) {
return Node && Node.getNestedNameSpecifier()->getAsType() &&
InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifier.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix,
internal::Matcher<NestedNameSpecifier>, InnerMatcher,
0) {
const NestedNameSpecifier *NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(*NextNode, Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifierLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A")))))
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix,
internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher,
1) {
NestedNameSpecifierLoc NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(NextNode, Finder, Builder);
}
/// Matches nested name specifiers that specify a namespace matching the
/// given namespace matcher.
///
/// Given
/// \code
/// namespace ns { struct A {}; }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier(specifiesNamespace(hasName("ns")))
/// matches "ns::"
AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace,
internal::Matcher<NamespaceDecl>, InnerMatcher) {
if (!Node.getAsNamespace())
return false;
return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder);
}
/// Overloads for the \c equalsNode matcher.
/// FIXME: Implement for other node types.
/// @{
/// Matches if a node equals another node.
///
/// \c Decl has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Stmt has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Type has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) {
return &Node == Other;
}
/// @}
/// Matches each case or default statement belonging to the given switch
/// statement. This matcher may produce multiple matches.
///
/// Given
/// \code
/// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } }
/// \endcode
/// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s")
/// matches four times, with "c" binding each of "case 1:", "case 2:",
/// "case 3:" and "case 4:", and "s" respectively binding "switch (1)",
/// "switch (1)", "switch (2)" and "switch (2)".
AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>,
InnerMatcher) {
BoundNodesTreeBuilder Result;
// FIXME: getSwitchCaseList() does not necessarily guarantee a stable
// iteration order. We should use the more general iterating matchers once
// they are capable of expressing this matcher (for example, it should ignore
// case statements belonging to nested switch statements).
bool Matched = false;
for (const SwitchCase *SC = Node.getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase()) {
BoundNodesTreeBuilder CaseBuilder(*Builder);
bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder);
if (CaseMatched) {
Matched = true;
Result.addMatch(CaseBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches each constructor initializer in a constructor definition.
///
/// Given
/// \code
/// class A { A() : i(42), j(42) {} int i; int j; };
/// \endcode
/// cxxConstructorDecl(forEachConstructorInitializer(
/// forField(decl().bind("x"))
/// ))
/// will trigger two matches, binding for 'i' and 'j' respectively.
AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *I : Node.inits()) {
BoundNodesTreeBuilder InitBuilder(*Builder);
if (InnerMatcher.matches(*I, Finder, &InitBuilder)) {
Matched = true;
Result.addMatch(InitBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches constructor declarations that are copy constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3.
AST_MATCHER(CXXConstructorDecl, isCopyConstructor) {
return Node.isCopyConstructor();
}
/// Matches constructor declarations that are move constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2.
AST_MATCHER(CXXConstructorDecl, isMoveConstructor) {
return Node.isMoveConstructor();
}
/// Matches constructor declarations that are default constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3.
AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) {
return Node.isDefaultConstructor();
}
/// Matches constructors that delegate to another constructor.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(int) {} // #2
/// S(S &&) : S() {} // #3
/// };
/// S::S() : S(0) {} // #4
/// \endcode
/// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not
/// #1 or #2.
AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) {
return Node.isDelegatingConstructor();
}
/// Matches constructor and conversion declarations that are marked with
/// the explicit keyword.
///
/// Given
/// \code
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// };
/// \endcode
/// cxxConstructorDecl(isExplicit()) will match #2, but not #1.
/// cxxConversionDecl(isExplicit()) will match #4, but not #3.
AST_POLYMORPHIC_MATCHER(isExplicit,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXConstructorDecl,
CXXConversionDecl)) {
return Node.isExplicit();
}
/// Matches function and namespace declarations that are marked with
/// the inline keyword.
///
/// Given
/// \code
/// inline void f();
/// void g();
/// namespace n {
/// inline namespace m {}
/// }
/// \endcode
/// functionDecl(isInline()) will match ::f().
/// namespaceDecl(isInline()) will match n::m.
AST_POLYMORPHIC_MATCHER(isInline,
AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl,
FunctionDecl)) {
// This is required because the spelling of the function used to determine
// whether inline is specified or not differs between the polymorphic types.
if (const auto *FD = dyn_cast<FunctionDecl>(&Node))
return FD->isInlineSpecified();
else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node))
return NSD->isInline();
llvm_unreachable("Not a valid polymorphic type");
}
/// Matches anonymous namespace declarations.
///
/// Given
/// \code
/// namespace n {
/// namespace {} // #1
/// }
/// \endcode
/// namespaceDecl(isAnonymous()) will match #1 but not ::n.
AST_MATCHER(NamespaceDecl, isAnonymous) {
return Node.isAnonymousNamespace();
}
/// If the given case statement does not use the GNU case range
/// extension, matches the constant given in the statement.
///
/// Given
/// \code
/// switch (1) { case 1: case 1+1: case 3 ... 4: ; }
/// \endcode
/// caseStmt(hasCaseConstant(integerLiteral()))
/// matches "case 1:"
AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>,
InnerMatcher) {
if (Node.getRHS())
return false;
return InnerMatcher.matches(*Node.getLHS(), Finder, Builder);
}
/// Matches declaration that has a given attribute.
///
/// Given
/// \code
/// __attribute__((device)) void f() { ... }
/// \endcode
/// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of
/// f. If the matcher is used from clang-query, attr::Kind parameter should be
/// passed as a quoted string. e.g., hasAttr("attr::CUDADevice").
AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) {
for (const auto *Attr : Node.attrs()) {
if (Attr->getKind() == AttrKind)
return true;
}
return false;
}
/// Matches the return value expression of a return statement
///
/// Given
/// \code
/// return a + b;
/// \endcode
/// hasReturnValue(binaryOperator())
/// matches 'return a + b'
/// with binaryOperator()
/// matching 'a + b'
AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>,
InnerMatcher) {
if (const auto *RetValue = Node.getRetValue())
return InnerMatcher.matches(*RetValue, Finder, Builder);
return false;
}
/// Matches CUDA kernel call expression.
///
/// Example matches,
/// \code
/// kernel<<<i,j>>>();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr>
cudaKernelCallExpr;
/// Matches expressions that resolve to a null pointer constant, such as
/// GNU's __null, C++11's nullptr, or C's NULL macro.
///
/// Given:
/// \code
/// void *v1 = NULL;
/// void *v2 = nullptr;
/// void *v3 = __null; // GNU extension
/// char *cp = (char *)0;
/// int *ip = 0;
/// int i = 0;
/// \endcode
/// expr(nullPointerConstant())
/// matches the initializer for v1, v2, v3, cp, and ip. Does not match the
/// initializer for i.
AST_MATCHER_FUNCTION(internal::Matcher<Expr>, nullPointerConstant) {
return anyOf(
gnuNullExpr(), cxxNullPtrLiteralExpr(),
integerLiteral(equals(0), hasParent(expr(hasType(pointerType())))));
}
/// Matches declaration of the function the statement belongs to
///
/// Given:
/// \code
/// F& operator=(const F& o) {
/// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; });
/// return *this;
/// }
/// \endcode
/// returnStmt(forFunction(hasName("operator=")))
/// matches 'return *this'
/// but does match 'return > 0'
AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>,
InnerMatcher) {
const auto &Parents = Finder->getASTContext().getParents(Node);
llvm::SmallVector<ast_type_traits::DynTypedNode, 8> Stack(Parents.begin(),
Parents.end());
while(!Stack.empty()) {
const auto &CurNode = Stack.back();
Stack.pop_back();
if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) {
if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) {
return true;
}
} else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) {
if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(),
Finder, Builder)) {
return true;
}
} else {
for(const auto &Parent: Finder->getASTContext().getParents(CurNode))
Stack.push_back(Parent);
}
}
return false;
}
/// Matches a declaration that has external formal linkage.
///
/// Example matches only z (matcher = varDecl(hasExternalFormalLinkage()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
///
/// Example matches f() because it has external formal linkage despite being
/// unique to the translation unit as though it has internal likage
/// (matcher = functionDecl(hasExternalFormalLinkage()))
///
/// \code
/// namespace {
/// void f() {}
/// }
/// \endcode
AST_MATCHER(NamedDecl, hasExternalFormalLinkage) {
return Node.hasExternalFormalLinkage();
}
/// Matches a declaration that has default arguments.
///
/// Example matches y (matcher = parmVarDecl(hasDefaultArgument()))
/// \code
/// void x(int val) {}
/// void y(int val = 0) {}
/// \endcode
AST_MATCHER(ParmVarDecl, hasDefaultArgument) {
return Node.hasDefaultArg();
}
/// Matches array new expressions.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(isArray())
/// matches the expression 'new MyClass[10]'.
AST_MATCHER(CXXNewExpr, isArray) {
return Node.isArray();
}
/// Matches array new expressions with a given array size.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(hasArraySize(intgerLiteral(equals(10))))
/// matches the expression 'new MyClass[10]'.
AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) {
return Node.isArray() &&
InnerMatcher.matches(*Node.getArraySize(), Finder, Builder);
}
/// Matches a class declaration that is defined.
///
/// Example matches x (matcher = cxxRecordDecl(hasDefinition()))
/// \code
/// class x {};
/// class y;
/// \endcode
AST_MATCHER(CXXRecordDecl, hasDefinition) {
return Node.hasDefinition();
}
/// Matches C++11 scoped enum declaration.
///
/// Example matches Y (matcher = enumDecl(isScoped()))
/// \code
/// enum X {};
/// enum class Y {};
/// \endcode
AST_MATCHER(EnumDecl, isScoped) {
return Node.isScoped();
}
/// Matches a function declared with a trailing return type.
///
/// Example matches Y (matcher = functionDecl(hasTrailingReturn()))
/// \code
/// int X() {}
/// auto Y() -> int {}
/// \endcode
AST_MATCHER(FunctionDecl, hasTrailingReturn) {
if (const auto *F = Node.getType()->getAs<FunctionProtoType>())
return F->hasTrailingReturn();
return false;
}
//----------------------------------------------------------------------------//
// OpenMP handling.
//----------------------------------------------------------------------------//
/// Matches any ``#pragma omp`` executable directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective()`` matches ``omp parallel``,
/// ``omp parallel default(none)`` and ``omp taskyield``.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective>
ompExecutableDirective;
/// Matches standalone OpenMP directives,
/// i.e., directives that can't have a structured block.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// {}
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective(isStandaloneDirective()))`` matches
/// ``omp taskyield``.
AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) {
return Node.isStandaloneDirective();
}
/// Matches the Stmt AST node that is marked as being the structured-block
/// of an OpenMP executable directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// {}
/// \endcode
///
/// ``stmt(isOMPStructuredBlock()))`` matches ``{}``.
AST_MATCHER(Stmt, isOMPStructuredBlock) { return Node.isOMPStructuredBlock(); }
/// Matches the structured-block of the OpenMP executable directive
///
/// Prerequisite: the executable directive must not be standalone directive.
/// If it is, it will never match.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// ;
/// #pragma omp parallel
/// {}
/// \endcode
///
/// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;``
AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock,
internal::Matcher<Stmt>, InnerMatcher) {
if (Node.isStandaloneDirective())
return false; // Standalone directives have no structured blocks.
return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder);
}
/// Matches any clause in an OpenMP directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// \endcode
///
/// ``ompExecutableDirective(hasAnyClause(anything()))`` matches
/// ``omp parallel default(none)``.
AST_MATCHER_P(OMPExecutableDirective, hasAnyClause,
internal::Matcher<OMPClause>, InnerMatcher) {
ArrayRef<OMPClause *> Clauses = Node.clauses();
return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(),
Clauses.end(), Finder, Builder);
}
/// Matches OpenMP ``default`` clause.
///
/// Given
///
/// \code
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel
/// \endcode
///
/// ``ompDefaultClause()`` matches ``default(none)`` and ``default(shared)``.
extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause>
ompDefaultClause;
/// Matches if the OpenMP ``default`` clause has ``none`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// \endcode
///
/// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``.
AST_MATCHER(OMPDefaultClause, isNoneKind) {
return Node.getDefaultKind() == OMPC_DEFAULT_none;
}
/// Matches if the OpenMP ``default`` clause has ``shared`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// \endcode
///
/// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``.
AST_MATCHER(OMPDefaultClause, isSharedKind) {
return Node.getDefaultKind() == OMPC_DEFAULT_shared;
}
/// Matches if the OpenMP directive is allowed to contain the specified OpenMP
/// clause kind.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel for
/// #pragma omp for
/// \endcode
///
/// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches
/// ``omp parallel`` and ``omp parallel for``.
///
/// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter
/// should be passed as a quoted string. e.g.,
/// ``isAllowedToContainClauseKind("OMPC_default").``
AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind,
OpenMPClauseKind, CKind) {
return isAllowedClauseForDirective(Node.getDirectiveKind(), CKind);
}
//----------------------------------------------------------------------------//
// End OpenMP handling.
//----------------------------------------------------------------------------//
} // namespace ast_matchers
} // namespace clang
#endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
|
SparseMatrix.h | #pragma once
#include <Matrix.h>
#include <algorithm>
#include <utility>
namespace freeaml
{
/**
* @brief @c SparseMatrix<T> is a class suited for representing sparse matrices.
*
* This class stores a matrix of elements of type @c T. It overloads the
* addition (+), subtraction (-), multiplication (*) and division (/) operators
* for supporting common matrix operations such as matrix addition, matrix
* multiplication by scalar and matrix multiplication with another matrix or a
* vector.
*
* Whenever possible, this class avoids storing elements equal to @c T{} because
* @c T{} is zero for all primitive numeric types in C++ (e.g. <tt>int{} ==
* 0</tt>, <tt>double{} == 0.0</tt> and so on). Therefore, when an element
* accessed by the user is not explicitly stored in the matrix, it is known to
* be equal to @c T{}.
*
* If the matrix is a @c const object, accessing an element which is not
* explicitly stored in it incurs no penalties, but if the matrix is not a
* @c const object, accessing such an element will cause it to actually be
* introduced in the matrix with value initially set to @c T{}. For that reason,
* do not access an element of a @c SparseMatrix<T> object @c M unless you are
* about to set its value to something other than @c T{} or @c M is @c const.
* Therefore, if @c M is not @c const, only use expressions such as @c M(i,j)
* if you are setting this element to something not equal to @c T{} (e.g.
* <tt>M(i,j) = 2.5;</tt>), otherwise you will cause @c M(i,j) to be explicitly
* stored as @c T{} in the matrix, which is not only wasteful in terms of memory
* consumption but will also slow down element access considerably.
*
* Some commonly used mathematical operations are also provided in the class
* (e.g. determining the transpose of the matrix).
*
* Support for OpenMP was added to the functions and operators which showed a
* significant speedup when implemented using multiple threads.
*/
template<typename T>
class SparseMatrix
{
public:
using value_type = typename Vector<T>::value_type;
using size_type = typename Vector<T>::size_type;
using Element = std::pair<size_type, T>;
using SparseRow = Vector<Element>;
/** @brief Constructs a sparse matrix with no elements. */
SparseMatrix();
/**
* @brief Constructs a sparse matrix with the contents of an initializer
* list of equally-sized initializer lists, each one representing a
* row of the matrix.
* @param init An initializer list holding initializer lists of elements of
* type @c T.
*/
SparseMatrix(std::initializer_list<std::initializer_list<T>> init);
/**
* @brief Constructs a sparse matrix with specified dimensions.
* @param rows The number of matrix rows.
* @param cols The number of matrix columns.
*/
SparseMatrix(size_type rows, size_type cols);
/**
* @brief Constructs a sparse matrix with the elements of a vector.
* @param rows The number of matrix rows.
* @param cols The number of matrix columns.
* @param elements A vector holding the <tt>rows × cols</tt> matrix elements
* in row-major order (i.e., the elements on the first matrix row
* followed by the elements on the second matrix row and so on).
*/
SparseMatrix(size_type rows, size_type cols, const Vector<T>& elements);
/**
* @brief Copy constructor.
* @param M The matrix from which all elements will be copied.
*/
SparseMatrix(const SparseMatrix& M) = default;
/**
* @brief Move constructor.
* @param M The matrix from which all elements will be moved.
*/
SparseMatrix(SparseMatrix&& M) = default;
/**
* @brief Returns a reference to a matrix element.
* @param i The row of the matrix element.
* @param j The column of the matrix element.
* @return A reference to the element <tt>(i,j)</tt> of the matrix.
* @note When an element <tt>(i,j)</tt> which is not explicitly stored in
* the matrix is accessed, it will be stored and initially set to
* @c T{}.
*/
T& operator()(size_type i, size_type j);
/**
* @brief Returns a const reference to a matrix element.
* @param i The row of the matrix element.
* @param j The column of the matrix element.
* @return A const reference to the element <tt>(i,j)</tt> of the matrix.
* @note When an element <tt>(i,j)</tt> which is not explicitly stored in
* the matrix is accessed, a reference to a statically-stored @c T{}
* object will be returned, i.e., the element will not be stored in
* the matrix due to the fact that it was accessed.
*/
const T& operator()(size_type i, size_type j) const;
/**
* @brief Copy-assignment operator.
* @param M The matrix from which all elements will be copied.
* @return A reference to @c *this.
*/
SparseMatrix& operator=(const SparseMatrix& M) = default;
/**
* @brief Move-assignment operator.
* @param M The matrix from which all elements will be moved.
* @return A reference to @c *this.
*/
SparseMatrix& operator=(SparseMatrix&& M) = default;
/**
* @brief Equality-comparison operator.
* @param M A matrix to compare against.
* @return @c true if the matrix is equal to @c M, @c false otherwise.
*/
bool operator==(const SparseMatrix& M) const;
/**
* @brief Inequality-comparison operator.
* @param M A matrix to compare against.
* @return @c true if the matrix is not equal to @c M, @c false otherwise.
*/
bool operator!=(const SparseMatrix& M) const;
/**
* @brief Multiplies all elements of the matrix by a scalar.
* @param c A scalar.
* @return A reference to @c *this.
*/
SparseMatrix& operator*=(const T& c);
/**
* @brief Divides all elements of the matrix by a scalar.
* @param c A scalar.
* @return A reference to @c *this.
*/
SparseMatrix& operator/=(const T& c);
/**
* @brief Performs element-wise addition-assignment with another matrix.
* @param M A sparse matrix.
* @return A reference to @c *this.
*/
SparseMatrix& operator+=(const SparseMatrix& M);
/**
* @brief Performs element-wise subtraction-assignment with another matrix.
* @param M A sparse matrix.
* @return A reference to @c *this.
*/
SparseMatrix& operator-=(const SparseMatrix& M);
/**
* @brief Computes the transpose of the matrix.
* @return A copy of the transpose of the matrix.
*/
SparseMatrix transpose() const;
/**
* @brief Computes the max-norm of the matrix.
* @return The magnitude of the largest-in-magnitude matrix element.
*/
T max_norm() const;
/**
* @brief Checks if the matrix has the same number of rows and columns.
* @return @c true if the matrix is square, @c false otherwise.
*/
bool is_square() const;
/**
* @brief Checks if the matrix is symmetric.
* @return @c true if the matrix is symmetric, @c false otherwise.
*/
bool is_symmetric() const;
/**
* @brief Gets the number of rows in the matrix.
* @return The number of rows in the matrix.
*/
size_type num_rows() const;
/**
* @brief Gets the number of columns in the matrix.
* @return The number of columns in the matrix.
*/
size_type num_cols() const;
/**
* @brief Checks if the matrix is empty.
* @return @c true if the matrix is empty, @c false otherwise.
*/
bool empty() const;
/**
* @brief Resizes the matrix.
* @param rows The new number of matrix rows.
* @param cols The new number of matrix columns.
*/
void resize(size_type rows, size_type cols);
/**
* @brief Clears the matrix.
*/
void clear();
/**
* @brief Returns the matrix elements as a vector (in row-major order).
* @return The elements of the matrix stored on a vector, with the first
* row elements appearing first, then the second row elements and
* so on.
*/
Vector<T> flatten() const;
/**
* @brief Returns the number of elements stored explicitly in the matrix.
* @return The number of elements stored explicitly in the matrix.
*/
size_type num_stored() const;
/**
* @brief Returns a const reference to the representation of a matrix row.
* @param i The row number.
* @return The elements stored on the <tt>i</tt>-th matrix row.
*/
const SparseRow& row(size_type i) const;
private:
size_type cols_; /* number of matrix columns */
Vector<SparseRow> elements_; /* matrix elements (vector of sparse rows) */
}; /* class SparseMatrix<T> */
/**
* @brief Computes the multiplication of a sparse matrix by a scalar on the
* right.
* @param M A sparse matrix.
* @param c A scalar.
* @return A copy of @c M with all elements multiplied by @c c.
*/
template<typename T>
SparseMatrix<T> operator*(const SparseMatrix<T>& M, const T& c);
/**
* @brief Computes the multiplication of a sparse matrix by a scalar on the
* left.
* @param c A scalar.
* @param M A sparse matrix.
* @return A copy of @c M with all elements multiplied by @c c.
*/
template<typename T>
SparseMatrix<T> operator*(const T& c, const SparseMatrix<T>& M);
/**
* @brief Computes the multiplication of two sparse matrices.
* @param M1 a sparse matrix.
* @param M2 a sparse matrix.
* @return A sparse matrix which is the result of multipying @c M1 and @c M2.
*/
template<typename T>
SparseMatrix<T> operator*(const SparseMatrix<T>& M1, const SparseMatrix<T>& M2);
/**
* @brief Computes the multiplication of a sparse matrix and a dense matrix.
* @param M1 a sparse matrix.
* @param M2 a dense matrix.
* @return A dense matrix which is the result of multipying @c M1 and @c M2.
*/
template<typename T>
Matrix<T> operator*(const SparseMatrix<T>& M1, const Matrix<T>& M2);
/**
* @brief Computes the multiplication of a dense matrix and a sparse matrix.
* @param M1 a dense matrix.
* @param M2 a sparse matrix.
* @return A dense matrix which is the result of multipying @c M1 and @c M2.
*/
template<typename T>
Matrix<T> operator*(const Matrix<T>& M1, const SparseMatrix<T>& M2);
/**
* @brief Computes the multiplication of a sparse matrix and a vector.
* @param M a sparse matrix.
* @param v a vector (interpreted as a column vector).
* @return A vector which is the result of multipying @c M and @c v.
*/
template<typename T>
Vector<T> operator*(const SparseMatrix<T>& M, const Vector<T>& v);
/**
* @brief Computes the multiplication of a vector and a sparse matrix.
* @param v a vector (interpreted as a row vector).
* @param M a sparse matrix.
* @return A vector which is the result of multipying @c v and @c M.
*/
template<typename T>
Vector<T> operator*(const Vector<T>& v, const SparseMatrix<T>& M);
/**
* @brief Computes the division of a sparse matrix by a scalar.
* @param M A sparse matrix.
* @param c A scalar.
* @return A copy of @c M with all elements divided by @c c.
*/
template<typename T>
SparseMatrix<T> operator/(const SparseMatrix<T>& M, const T& c);
/**
* @brief Computes the matrix addition of two equally-sized sparse matrices.
* @param M1 A sparse matrix.
* @param M2 A sparse matrix.
* @return The element-wise sum of @c M1 and @c M2.
*/
template<typename T>
SparseMatrix<T> operator+(const SparseMatrix<T>& M1, const SparseMatrix<T>& M2);
/**
* @brief Computes the matrix addition of a dense matrix and an equally-sized
* sparse matrix.
* @param M1 A dense matrix.
* @param M2 A sparse matrix.
* @return The element-wise sum of @c M1 and @c M2.
*/
template<typename T>
Matrix<T> operator+(const Matrix<T>& M1, const SparseMatrix<T>& M2);
/**
* @brief Computes the matrix addition of a sparse matrix and an equally-sized
* dense matrix.
* @param M1 A sparse matrix.
* @param M2 A dense matrix.
* @return The element-wise sum of @c M1 and @c M2.
*/
template<typename T>
Matrix<T> operator+(const SparseMatrix<T>& M1, const Matrix<T>& M2);
/**
* @brief Computes the matrix difference of two equally-sized sparse matrices.
* @param M1 A sparse matrix.
* @param M2 A sparse matrix.
* @return The element-wise difference between @c M1 and @c M2.
*/
template<typename T>
SparseMatrix<T> operator-(const SparseMatrix<T>& M1, const SparseMatrix<T>& M2);
/**
* @brief Computes the matrix difference of a dense matrix and an equally-sized
* sparse matrix.
* @param M1 A dense matrix.
* @param M2 A sparse matrix.
* @return The element-wise difference of @c M1 and @c M2.
*/
template<typename T>
Matrix<T> operator-(const Matrix<T>& M1, const SparseMatrix<T>& M2);
/**
* @brief Computes the matrix difference of a sparse matrix and an equally-sized
* dense matrix.
* @param M1 A sparse matrix.
* @param M2 A dense matrix.
* @return The element-wise difference of @c M1 and @c M2.
*/
template<typename T>
Matrix<T> operator-(const SparseMatrix<T>& M1, const Matrix<T>& M2);
/**
* @brief Computes the element-wise negation of a sparse matrix.
* @param M A sparse matrix.
* @return The element-wise negation of @c M.
*/
template<typename T>
SparseMatrix<T> operator-(const SparseMatrix<T>& M);
/**
* @brief Prints the elements of a sparse matrix to an output stream.
* @param stream An output stream.
* @param M A sparse matrix.
* @return A reference to @c stream.
*/
template<typename T>
std::ostream& operator<<(std::ostream& stream, const SparseMatrix<T>& M);
/**
* @brief Generates a random sparse matrix with elements within a given range.
* @param rows The number of matrix rows.
* @param cols The number of matrix columns.
* @param nonzero The number of nonzero elements.
* @param lower_bound The lower bound for the sample interval.
* @param upper_bound The upper bound for the sample interval.
* @return A <tt>rows × cols</tt> matrix with @c nonzero elements sampled
* uniformly from <tt>[lower_bound, upper_bound]</tt>.
* @note This function was designed to work only with primitive integer and
* floating-point types (e.g. @c int, @c float, @c double etc.).
*/
template<typename T>
SparseMatrix<T> random_sparse_matrix(
const typename SparseMatrix<T>::size_type rows,
const typename SparseMatrix<T>::size_type cols,
const typename SparseMatrix<T>::size_type nonzero,
const T& lower_bound = T{0},
const T& upper_bound = T{1});
/**
* @brief Generates an identity matrix stored as a sparse matrix.
* @param rows The number of matrix rows.
* @return A @c rows × @c rows identity matrix stored as a sparse matrix.
*/
template<typename T>
SparseMatrix<T> identity_sparse_matrix(
const typename SparseMatrix<T>::size_type rows);
/*******************************************************************************
*
* FUNCTION DEFINITIONS
*
******************************************************************************/
template<typename T>
SparseMatrix<T>::SparseMatrix() : cols_{0}, elements_{}
{
/* nothing needs to be done here */
}
template<typename T>
SparseMatrix<T>::SparseMatrix(
std::initializer_list<std::initializer_list<T>> init)
: cols_(init.size() > 0 ? init.begin()->size() : 0), elements_(init.size())
{
if (cols_ == 0)
{
clear();
return;
}
size_type i = 0;
for (const auto& row : init)
{
FREEAML_ASSERT(row.size() == cols_);
size_type j = 0;
for (const T& element : row)
{
if (element != T{})
{
elements_[i].push_back(Element(j, element));
}
++j;
}
++i;
}
}
template<typename T>
SparseMatrix<T>::SparseMatrix(const size_type rows, const size_type cols)
: cols_(cols), elements_(rows)
{
if (rows == 0 || cols == 0)
{
clear();
}
}
template<typename T>
SparseMatrix<T>::SparseMatrix(const size_type rows,
const size_type cols,
const Vector<T>& elements)
: cols_(cols), elements_(rows)
{
if (rows == 0 || cols == 0)
{
clear();
return;
}
FREEAML_ASSERT(rows * cols == elements.size());
for (size_type i = 0; i < rows; ++i)
{
for (size_type j = 0; j < cols; ++j)
{
const T& element = elements[i * cols + j];
if (element != T{})
{
elements_[i].push_back(Element(j, element));
}
}
}
}
template<typename T>
T& SparseMatrix<T>::operator()(const size_type i, const size_type j)
{
FREEAML_ASSERT(i < num_rows() && j < num_cols());
auto iterator = std::lower_bound(
elements_[i].begin(), elements_[i].end(), Element(j, T{}),
[](const Element& element1, const Element& element2) {
return element1.first < element2.first;
});
if (iterator == elements_[i].end() || iterator->first != j)
{
iterator = elements_[i].insert(iterator, Element(j, T{}));
}
return iterator->second;
}
template<typename T>
const T& SparseMatrix<T>::operator()(const size_type i, const size_type j) const
{
FREEAML_ASSERT(i < num_rows() && j < num_cols());
static const T zero = T{};
auto iterator = std::lower_bound(
elements_[i].begin(), elements_[i].end(), Element(j, T{}),
[](const Element& element1, const Element& element2) {
return element1.first < element2.first;
});
if (iterator == elements_[i].end() || iterator->first != j)
{
return zero;
}
return iterator->second;
}
template<typename T>
bool SparseMatrix<T>::operator==(const SparseMatrix<T>& M) const
{
if (num_rows() != M.num_rows() || num_cols() != M.num_cols())
{
return false;
}
for (size_type i = 0; i < num_rows(); ++i)
{
for (const Element& element : elements_[i])
{
if (element.second != M(i, element.first))
{
return false;
}
}
}
return true;
}
template<typename T>
bool SparseMatrix<T>::operator!=(const SparseMatrix<T>& M) const
{
return !operator==(M);
}
template<typename T>
SparseMatrix<T>& SparseMatrix<T>::operator*=(const T& c)
{
for (SparseRow& row : elements_)
{
for (Element& element : row)
{
element.second *= c;
}
}
return *this;
}
template<typename T>
SparseMatrix<T>& SparseMatrix<T>::operator/=(const T& c)
{
for (SparseRow& row : elements_)
{
for (Element& element : row)
{
element.second /= c;
}
}
return *this;
}
template<typename T>
SparseMatrix<T>& SparseMatrix<T>::operator+=(const SparseMatrix<T>& M)
{
FREEAML_ASSERT(num_rows() == M.num_rows());
FREEAML_ASSERT(num_cols() == M.num_cols());
#ifdef _OPENMP
#pragma omp parallel for
#endif /* _OPENMP */
for (size_type i = 0; i < num_rows(); ++i)
{
for (const Element& element : M.elements_[i])
{
(*this)(i, element.first) += element.second;
}
}
return *this;
}
template<typename T>
SparseMatrix<T>& SparseMatrix<T>::operator-=(const SparseMatrix<T>& M)
{
FREEAML_ASSERT(num_rows() == M.num_rows());
FREEAML_ASSERT(num_cols() == M.num_cols());
#ifdef _OPENMP
#pragma omp parallel for
#endif /* _OPENMP */
for (size_type i = 0; i < num_rows(); ++i)
{
for (const Element& element : M.elements_[i])
{
(*this)(i, element.first) -= element.second;
}
}
return *this;
}
template<typename T>
SparseMatrix<T> SparseMatrix<T>::transpose() const
{
SparseMatrix<T> result(num_cols(), num_rows());
/* operator() on SparseMatrix is not thread safe, so no parallelism here */
for (size_type i = 0; i < num_rows(); ++i)
{
for (const Element& element : elements_[i])
{
result(element.first, i) = element.second;
}
}
return result;
}
template<typename T>
T SparseMatrix<T>::max_norm() const
{
T norm{};
#ifdef _OPENMP
#pragma omp parallel
{
T local_norm{};
#pragma omp for nowait
for (size_type i = 0; i < num_rows(); ++i)
{
for (const Element& element : elements_[i])
{
local_norm = std::max(local_norm, std::abs(element.second));
}
}
#pragma omp critical
{
norm = std::max(local_norm, norm);
}
}
#else
/* serial implementation */
for (size_type i = 0; i < num_rows(); ++i)
{
for (const Element& element : elements_[i])
{
norm = std::max(norm, std::abs(element.second));
}
}
#endif /* #ifdef _OPENMP */
return norm;
}
template<typename T>
bool SparseMatrix<T>::is_square() const
{
return num_rows() == num_cols();
}
template<typename T>
bool SparseMatrix<T>::is_symmetric() const
{
if (is_square() == false)
{
return false;
}
for (size_type i = 0; i < num_rows(); ++i)
{
for (const Element& element : elements_[i])
{
if ((*this)(element.first, i) != element.second)
{
return false;
}
}
}
return true;
}
template<typename T>
typename SparseMatrix<T>::size_type SparseMatrix<T>::num_rows() const
{
return elements_.size();
}
template<typename T>
typename SparseMatrix<T>::size_type SparseMatrix<T>::num_cols() const
{
return cols_;
}
template<typename T>
bool SparseMatrix<T>::empty() const
{
return elements_.empty();
}
template<typename T>
void SparseMatrix<T>::resize(const size_type rows, const size_type cols)
{
/* if there is no need for resizing, do nothing */
if (rows == num_rows() && cols == num_cols())
{
return;
}
if (rows == 0 || cols == 0)
{
clear();
return;
}
/* remove elements with invalid column indices (after resizing) */
if (cols < cols_)
{
auto predicate = [cols](const Element& element) {
return element.first >= cols;
};
for (SparseRow& row : elements_)
{
row.erase(std::remove_if(row.begin(), row.end(), predicate),
row.end());
}
}
cols_ = cols;
elements_.resize(rows);
}
template<typename T>
void SparseMatrix<T>::clear()
{
cols_ = 0;
elements_.clear();
}
template<typename T>
Vector<T> SparseMatrix<T>::flatten() const
{
Vector<T> elements(num_rows() * num_cols(), T{});
for (size_type i = 0; i < num_rows(); ++i)
{
for (const Element& element : elements_[i])
{
elements[i * num_cols() + element.first] = element.second;
}
}
return elements;
}
template<typename T>
typename SparseMatrix<T>::size_type SparseMatrix<T>::num_stored() const
{
size_type count = 0;
for (const SparseRow& row : elements_)
{
count += row.size();
}
return count;
}
template<typename T>
const typename SparseMatrix<T>::SparseRow& SparseMatrix<T>::row(
const size_type i) const
{
FREEAML_ASSERT(i < num_rows());
return elements_[i];
}
template<typename T>
SparseMatrix<T> operator*(const SparseMatrix<T>& M, const T& c)
{
SparseMatrix<T> result = M;
result *= c;
return result;
}
template<typename T>
SparseMatrix<T> operator*(const T& c, const SparseMatrix<T>& M)
{
return M * c;
}
template<typename T>
SparseMatrix<T> operator*(const SparseMatrix<T>& M1, const SparseMatrix<T>& M2)
{
FREEAML_ASSERT(M1.num_cols() == M2.num_rows());
using size_type = typename SparseMatrix<T>::size_type;
using Element = typename SparseMatrix<T>::Element;
SparseMatrix<T> result(M1.num_rows(), M2.num_cols());
#ifdef _OPENMP
#pragma omp parallel for
#endif /* _OPENMP */
for (size_type i = 0; i < M1.num_rows(); ++i)
{
for (const Element& element1 : M1.row(i))
{
for (const Element& element2 : M2.row(element1.first))
{
result(i, element2.first) += element1.second * element2.second;
}
}
}
return result;
}
template<typename T>
Matrix<T> operator*(const SparseMatrix<T>& M1, const Matrix<T>& M2)
{
FREEAML_ASSERT(M1.num_cols() == M2.num_rows());
using size_type = typename SparseMatrix<T>::size_type;
using Element = typename SparseMatrix<T>::Element;
Matrix<T> result(M1.num_rows(), M2.num_cols(), T{});
#ifdef _OPENMP
#pragma omp parallel for
#endif /* _OPENMP */
for (size_type i = 0; i < M1.num_rows(); ++i)
{
for (const Element& element : M1.row(i))
{
for (size_type j = 0; j < M2.num_cols(); ++j)
{
result(i, j) += element.second * M2(element.first, j);
}
}
}
return result;
}
template<typename T>
Matrix<T> operator*(const Matrix<T>& M1, const SparseMatrix<T>& M2)
{
FREEAML_ASSERT(M1.num_cols() == M2.num_rows());
using size_type = typename SparseMatrix<T>::size_type;
using Element = typename SparseMatrix<T>::Element;
Matrix<T> result(M1.num_rows(), M2.num_cols(), T{});
#ifdef _OPENMP
#pragma omp parallel for
#endif /* _OPENMP */
for (size_type i = 0; i < M1.num_rows(); ++i)
{
for (size_type k = 0; k < M1.num_cols(); ++k)
{
for (const Element& element : M2.row(k))
{
result(i, element.first) += M1(i, k) * element.second;
}
}
}
return result;
}
template<typename T>
Vector<T> operator*(const SparseMatrix<T>& M, const Vector<T>& v)
{
FREEAML_ASSERT(M.num_cols() == v.size());
using size_type = typename SparseMatrix<T>::size_type;
using Element = typename SparseMatrix<T>::Element;
Vector<T> result(M.num_rows(), T{});
#ifdef _OPENMP
#pragma omp parallel for
#endif /* _OPENMP */
for (size_type i = 0; i < M.num_rows(); ++i)
{
for (const Element& element : M.row(i))
{
result[i] += element.second * v[element.first];
}
}
return result;
}
template<typename T>
Vector<T> operator*(const Vector<T>& v, const SparseMatrix<T>& M)
{
FREEAML_ASSERT(v.size() == M.num_rows());
using size_type = typename SparseMatrix<T>::size_type;
using Element = typename SparseMatrix<T>::Element;
Vector<T> result(M.num_cols(), T{});
#ifdef _OPENMP
#pragma omp parallel
{
Vector<T> local_result(M.num_cols(), T{});
#pragma omp for nowait
for (size_type i = 0; i < M.num_rows(); ++i)
{
for (const Element& element : M.row(i))
{
local_result[element.first] += v[i] * element.second;
}
}
#pragma omp critical
{
result += local_result;
}
}
#else
/* serial implementation */
for (size_type i = 0; i < M.num_rows(); ++i)
{
for (const Element& element : M.row(i))
{
result[element.first] += v[i] * element.second;
}
}
#endif /* _OPENMP */
return result;
}
template<typename T>
SparseMatrix<T> operator/(const SparseMatrix<T>& M, const T& c)
{
SparseMatrix<T> result = M;
result /= c;
return result;
}
template<typename T>
SparseMatrix<T> operator+(const SparseMatrix<T>& M1, const SparseMatrix<T>& M2)
{
SparseMatrix<T> result = M1;
result += M2;
return result;
}
template<typename T>
Matrix<T> operator+(const Matrix<T>& M1, const SparseMatrix<T>& M2)
{
FREEAML_ASSERT(M1.num_rows() == M2.num_rows());
FREEAML_ASSERT(M1.num_cols() == M2.num_cols());
using size_type = typename Matrix<T>::size_type;
using Element = typename SparseMatrix<T>::Element;
Matrix<T> result = M1;
for (size_type i = 0; i < M1.num_rows(); ++i)
{
for (const Element& element : M2.row(i))
{
result(i, element.first) += element.second;
}
}
return result;
}
template<typename T>
Matrix<T> operator+(const SparseMatrix<T>& M1, const Matrix<T>& M2)
{
return M2 + M1;
}
template<typename T>
SparseMatrix<T> operator-(const SparseMatrix<T>& M1, const SparseMatrix<T>& M2)
{
SparseMatrix<T> result = M1;
result -= M2;
return result;
}
template<typename T>
Matrix<T> operator-(const Matrix<T>& M1, const SparseMatrix<T>& M2)
{
FREEAML_ASSERT(M1.num_rows() == M2.num_rows());
FREEAML_ASSERT(M1.num_cols() == M2.num_cols());
using size_type = typename Matrix<T>::size_type;
using Element = typename SparseMatrix<T>::Element;
Matrix<T> result = M1;
for (size_type i = 0; i < M1.num_rows(); ++i)
{
for (const Element& element : M2.row(i))
{
result(i, element.first) -= element.second;
}
}
return result;
}
template<typename T>
Matrix<T> operator-(const SparseMatrix<T>& M1, const Matrix<T>& M2)
{
FREEAML_ASSERT(M1.num_rows() == M2.num_rows());
FREEAML_ASSERT(M1.num_cols() == M2.num_cols());
using size_type = typename Matrix<T>::size_type;
using Element = typename SparseMatrix<T>::Element;
Matrix<T> result = -M2;
for (size_type i = 0; i < M1.num_rows(); ++i)
{
for (const Element& element : M1.row(i))
{
result(i, element.first) += element.second;
}
}
return result;
}
template<typename T>
SparseMatrix<T> operator-(const SparseMatrix<T>& M)
{
SparseMatrix<T> result(M.num_rows(), M.num_cols());
using size_type = typename SparseMatrix<T>::size_type;
using Element = typename SparseMatrix<T>::Element;
for (size_type i = 0; i < M.num_rows(); ++i)
{
for (const Element& element : M.row(i))
{
result(i, element.first) = -element.second;
}
}
return result;
}
template<typename T>
std::ostream& operator<<(std::ostream& stream, const SparseMatrix<T>& M)
{
using size_type = typename SparseMatrix<T>::size_type;
for (size_type i = 0; i < M.num_rows(); ++i)
{
stream << (i == 0 ? "[[" : " [");
for (size_type j = 0; j < M.num_cols(); ++j)
{
stream << M(i, j) << (j + 1 == M.num_cols() ? "" : ", ");
}
stream << (i + 1 == M.num_rows() ? "]]" : "],");
}
return stream;
}
template<typename T>
SparseMatrix<T> random_sparse_matrix(
const typename SparseMatrix<T>::size_type rows,
const typename SparseMatrix<T>::size_type cols,
const typename SparseMatrix<T>::size_type nonzero,
const T& lower_bound /* = T{0} */,
const T& upper_bound /* = T{1} */)
{
FREEAML_ASSERT(nonzero <= rows * cols);
FREEAML_ASSERT(lower_bound < upper_bound);
if (rows == 0 || cols == 0)
{
return {};
}
using DistributionType =
typename std::conditional<std::is_integral<T>::value,
std::uniform_int_distribution<T>,
std::uniform_real_distribution<T>>::type;
using size_type = typename SparseMatrix<T>::size_type;
std::random_device device;
std::mt19937_64 generator(device());
std::uniform_int_distribution<size_type> row_chooser(0, rows - 1);
std::uniform_int_distribution<size_type> col_chooser(0, cols - 1);
DistributionType distribution(lower_bound, upper_bound);
SparseMatrix<T> result(rows, cols);
size_type count = 0;
while (count < nonzero)
{
/* choose an element (i,j) */
size_type i = row_chooser(generator);
size_type j = col_chooser(generator);
T x = distribution(generator);
if (x != T{} && result(i, j) == T{})
{
result(i, j) = x;
++count;
}
}
return result;
}
template<typename T>
SparseMatrix<T> identity_sparse_matrix(
const typename SparseMatrix<T>::size_type rows)
{
using size_type = typename SparseMatrix<T>::size_type;
SparseMatrix<T> I(rows, rows);
for (size_type i = 0; i < rows; ++i)
{
I(i, i) = T{1};
}
return I;
}
} /* namespace freeaml */
|
dgemm_2_save.c | #define max(a,b) (((a) < (b))? (b) : (a))
#define min(a,b) (((a) < (b))? (a) : (b))
#define _TH_1 2
#include <omp.h>
void dgemm_test(const int M,const int N,const int K,const double alpha,const double* A,const int lda,const double* B,const int ldb,const double beta,double* C,const int ldc) {
int i;
int j;
int l;
int j_bk_1;
int j_bk_2;
int i_bk_3;
int l_bk_4;
double _C_cp_0_0;
double _C_cp_1_0;
double _C_cp_2_0;
double _C_cp_3_0;
double _A_cp_0_0;
double _B_cp_0_0;
double _B_cp_1_0;
double _B_cp_2_0;
double _B_cp_3_0;
omp_set_num_threads(_TH_1);
#pragma omp parallel
{
/*@;BEGIN(nest1_group3=Nest)@*/#pragma omp for private(l,i,j,j_bk_1,j_bk_2,i_bk_3,l_bk_4,_C_cp_0_0,_C_cp_1_0,_C_cp_2_0,_C_cp_3_0,_A_cp_0_0,_B_cp_0_0,_B_cp_1_0,_B_cp_2_0,_B_cp_3_0)
for (j_bk_1=0; j_bk_1<N; j_bk_1+=256)
{
/*@;BEGIN(nest1_group2=Nest)@*/for (j_bk_2=0; j_bk_2<-31+min(256,N-j_bk_1); j_bk_2+=32)
{
for (i_bk_3=0; i_bk_3<-31+M; i_bk_3+=32)
{
if ((l_bk_4=0)<-31+K)
{
for (j=0; j<32; j+=4)
{
for (i=0; i<32; i+=1)
{
_C_cp_0_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))];
_C_cp_1_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+ldc))))];
_C_cp_2_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+2*ldc))))];
_C_cp_3_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+3*ldc))))];
l = 0;
{
_B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))];
_B_cp_1_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+ldb))))];
_B_cp_2_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+2*ldb))))];
_B_cp_3_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+3*ldb))))];
_A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))];
_C_cp_0_0 = beta*_C_cp_0_0;
_C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0;
_C_cp_1_0 = beta*_C_cp_1_0;
_C_cp_1_0 = _C_cp_1_0+alpha*_A_cp_0_0*_B_cp_1_0;
_C_cp_2_0 = beta*_C_cp_2_0;
_C_cp_2_0 = _C_cp_2_0+alpha*_A_cp_0_0*_B_cp_2_0;
_C_cp_3_0 = beta*_C_cp_3_0;
_C_cp_3_0 = _C_cp_3_0+alpha*_A_cp_0_0*_B_cp_3_0;
}
for (l=1; l<32; l+=1)
{
_B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))];
_B_cp_1_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+ldb))))];
_B_cp_2_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+2*ldb))))];
_B_cp_3_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+3*ldb))))];
_A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))];
_C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0;
_C_cp_1_0 = _C_cp_1_0+alpha*_A_cp_0_0*_B_cp_1_0;
_C_cp_2_0 = _C_cp_2_0+alpha*_A_cp_0_0*_B_cp_2_0;
_C_cp_3_0 = _C_cp_3_0+alpha*_A_cp_0_0*_B_cp_3_0;
}
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))] = _C_cp_0_0;
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+ldc))))] = _C_cp_1_0;
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+2*ldc))))] = _C_cp_2_0;
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+3*ldc))))] = _C_cp_3_0;
}
}
}
for (l_bk_4=32; l_bk_4<-31+K; l_bk_4+=32)
{
/*@;BEGIN(nest1=Nest)@*/for (j=0; j<32; j+=4)
{
/*@;BEGIN(nest3=Nest)@*/for (i=0; i<32; i+=1)
{
_C_cp_0_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))];
_C_cp_1_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+ldc))))];
_C_cp_2_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+2*ldc))))];
_C_cp_3_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+3*ldc))))];
/*@;BEGIN(nest2=Nest)@*/for (l=0; l<32; l+=1)
{
_B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))];
_B_cp_1_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+ldb))))];
_B_cp_2_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+2*ldb))))];
_B_cp_3_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+3*ldb))))];
_A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))];
_C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0;
_C_cp_1_0 = _C_cp_1_0+alpha*_A_cp_0_0*_B_cp_1_0;
_C_cp_2_0 = _C_cp_2_0+alpha*_A_cp_0_0*_B_cp_2_0;
_C_cp_3_0 = _C_cp_3_0+alpha*_A_cp_0_0*_B_cp_3_0;
}
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))] = _C_cp_0_0;
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+ldc))))] = _C_cp_1_0;
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+2*ldc))))] = _C_cp_2_0;
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+3*ldc))))] = _C_cp_3_0;
}
}
}
if (l_bk_4<K)
{
for (j=0; j<32; j+=4)
{
for (i=0; i<32; i+=1)
{
_C_cp_0_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))];
_C_cp_1_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+ldc))))];
_C_cp_2_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+2*ldc))))];
_C_cp_3_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+3*ldc))))];
for (l=0; l<K-l_bk_4; l+=1)
{
_B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))];
_B_cp_1_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+ldb))))];
_B_cp_2_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+2*ldb))))];
_B_cp_3_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+3*ldb))))];
_A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))];
_C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0;
_C_cp_1_0 = _C_cp_1_0+alpha*_A_cp_0_0*_B_cp_1_0;
_C_cp_2_0 = _C_cp_2_0+alpha*_A_cp_0_0*_B_cp_2_0;
_C_cp_3_0 = _C_cp_3_0+alpha*_A_cp_0_0*_B_cp_3_0;
}
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))] = _C_cp_0_0;
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+ldc))))] = _C_cp_1_0;
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+2*ldc))))] = _C_cp_2_0;
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+3*ldc))))] = _C_cp_3_0;
}
}
}
}
if (i_bk_3<M)
{
if ((l_bk_4=0)<-31+K)
{
for (j=0; j<32; j+=4)
{
for (i=0; i<M-i_bk_3; i+=1)
{
_C_cp_0_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))];
_C_cp_1_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+ldc))))];
_C_cp_2_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+2*ldc))))];
_C_cp_3_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+3*ldc))))];
l = 0;
{
_B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))];
_B_cp_1_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+ldb))))];
_B_cp_2_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+2*ldb))))];
_B_cp_3_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+3*ldb))))];
_A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))];
_C_cp_0_0 = beta*_C_cp_0_0;
_C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0;
_C_cp_1_0 = beta*_C_cp_1_0;
_C_cp_1_0 = _C_cp_1_0+alpha*_A_cp_0_0*_B_cp_1_0;
_C_cp_2_0 = beta*_C_cp_2_0;
_C_cp_2_0 = _C_cp_2_0+alpha*_A_cp_0_0*_B_cp_2_0;
_C_cp_3_0 = beta*_C_cp_3_0;
_C_cp_3_0 = _C_cp_3_0+alpha*_A_cp_0_0*_B_cp_3_0;
}
for (l=1; l<32; l+=1)
{
_B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))];
_B_cp_1_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+ldb))))];
_B_cp_2_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+2*ldb))))];
_B_cp_3_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+3*ldb))))];
_A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))];
_C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0;
_C_cp_1_0 = _C_cp_1_0+alpha*_A_cp_0_0*_B_cp_1_0;
_C_cp_2_0 = _C_cp_2_0+alpha*_A_cp_0_0*_B_cp_2_0;
_C_cp_3_0 = _C_cp_3_0+alpha*_A_cp_0_0*_B_cp_3_0;
}
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))] = _C_cp_0_0;
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+ldc))))] = _C_cp_1_0;
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+2*ldc))))] = _C_cp_2_0;
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+3*ldc))))] = _C_cp_3_0;
}
}
}
for (l_bk_4=32; l_bk_4<-31+K; l_bk_4+=32)
{
for (j=0; j<32; j+=4)
{
for (i=0; i<M-i_bk_3; i+=1)
{
_C_cp_0_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))];
_C_cp_1_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+ldc))))];
_C_cp_2_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+2*ldc))))];
_C_cp_3_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+3*ldc))))];
for (l=0; l<32; l+=1)
{
_B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))];
_B_cp_1_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+ldb))))];
_B_cp_2_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+2*ldb))))];
_B_cp_3_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+3*ldb))))];
_A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))];
_C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0;
_C_cp_1_0 = _C_cp_1_0+alpha*_A_cp_0_0*_B_cp_1_0;
_C_cp_2_0 = _C_cp_2_0+alpha*_A_cp_0_0*_B_cp_2_0;
_C_cp_3_0 = _C_cp_3_0+alpha*_A_cp_0_0*_B_cp_3_0;
}
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))] = _C_cp_0_0;
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+ldc))))] = _C_cp_1_0;
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+2*ldc))))] = _C_cp_2_0;
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+3*ldc))))] = _C_cp_3_0;
}
}
}
if (l_bk_4<K)
{
for (j=0; j<32; j+=4)
{
for (i=0; i<M-i_bk_3; i+=1)
{
_C_cp_0_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))];
_C_cp_1_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+ldc))))];
_C_cp_2_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+2*ldc))))];
_C_cp_3_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+3*ldc))))];
for (l=0; l<K-l_bk_4; l+=1)
{
_B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))];
_B_cp_1_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+ldb))))];
_B_cp_2_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+2*ldb))))];
_B_cp_3_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+(j*ldb+3*ldb))))];
_A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))];
_C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0;
_C_cp_1_0 = _C_cp_1_0+alpha*_A_cp_0_0*_B_cp_1_0;
_C_cp_2_0 = _C_cp_2_0+alpha*_A_cp_0_0*_B_cp_2_0;
_C_cp_3_0 = _C_cp_3_0+alpha*_A_cp_0_0*_B_cp_3_0;
}
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))] = _C_cp_0_0;
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+ldc))))] = _C_cp_1_0;
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+2*ldc))))] = _C_cp_2_0;
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+(j*ldc+3*ldc))))] = _C_cp_3_0;
}
}
}
}
}
if (j_bk_2<min(256,N-j_bk_1))
{
for (i_bk_3=0; i_bk_3<-31+M; i_bk_3+=32)
{
if ((l_bk_4=0)<-31+K)
{
for (j=0; j<min(256-j_bk_2,-j_bk_2+(N-j_bk_1)); j+=1)
{
for (i=0; i<32; i+=1)
{
_C_cp_0_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))];
l = 0;
{
_B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))];
_A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))];
_C_cp_0_0 = beta*_C_cp_0_0;
_C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0;
}
for (l=1; l<32; l+=1)
{
_B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))];
_A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))];
_C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0;
}
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))] = _C_cp_0_0;
}
}
}
for (l_bk_4=32; l_bk_4<-31+K; l_bk_4+=32)
{
for (j=0; j<min(256-j_bk_2,-j_bk_2+(N-j_bk_1)); j+=1)
{
for (i=0; i<32; i+=1)
{
_C_cp_0_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))];
for (l=0; l<32; l+=1)
{
_B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))];
_A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))];
_C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0;
}
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))] = _C_cp_0_0;
}
}
}
if (l_bk_4<K)
{
for (j=0; j<min(256-j_bk_2,-j_bk_2+(N-j_bk_1)); j+=1)
{
for (i=0; i<32; i+=1)
{
_C_cp_0_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))];
for (l=0; l<K-l_bk_4; l+=1)
{
_B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))];
_A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))];
_C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0;
}
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))] = _C_cp_0_0;
}
}
}
}
if (i_bk_3<M)
{
if ((l_bk_4=0)<-31+K)
{
for (j=0; j<min(256-j_bk_2,-j_bk_2+(N-j_bk_1)); j+=1)
{
for (i=0; i<M-i_bk_3; i+=1)
{
_C_cp_0_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))];
l = 0;
{
_B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))];
_A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))];
_C_cp_0_0 = beta*_C_cp_0_0;
_C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0;
}
for (l=1; l<32; l+=1)
{
_B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))];
_A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))];
_C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0;
}
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))] = _C_cp_0_0;
}
}
}
for (l_bk_4=32; l_bk_4<-31+K; l_bk_4+=32)
{
for (j=0; j<min(256-j_bk_2,-j_bk_2+(N-j_bk_1)); j+=1)
{
for (i=0; i<M-i_bk_3; i+=1)
{
_C_cp_0_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))];
for (l=0; l<32; l+=1)
{
_B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))];
_A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))];
_C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0;
}
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))] = _C_cp_0_0;
}
}
}
if (l_bk_4<K)
{
for (j=0; j<min(256-j_bk_2,-j_bk_2+(N-j_bk_1)); j+=1)
{
for (i=0; i<M-i_bk_3; i+=1)
{
_C_cp_0_0 = C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))];
for (l=0; l<K-l_bk_4; l+=1)
{
_B_cp_0_0 = B[l+(l_bk_4+(j_bk_1*ldb+(j_bk_2*ldb+j*ldb)))];
_A_cp_0_0 = A[i+(i_bk_3+(l_bk_4*lda+l*lda))];
_C_cp_0_0 = _C_cp_0_0+alpha*_A_cp_0_0*_B_cp_0_0;
}
C[i+(i_bk_3+(j_bk_1*ldc+(j_bk_2*ldc+j*ldc)))] = _C_cp_0_0;
}
}
}
}
}
}
}
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 24;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
blake2bp-ref.c | /*
BLAKE2 reference source code package - reference C implementations
Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the
terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at
your option. The terms of these licenses can be found at:
- CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
- OpenSSL license : https://www.openssl.org/source/license.html
- Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0
More information about the BLAKE2 hash function can be found at
https://blake2.net.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "blake2.h"
#include "blake2-impl.h"
#define PARALLELISM_DEGREE 4
/*
blake2b_init_param defaults to setting the expecting output length
from the digest_length parameter block field.
In some cases, however, we do not want this, as the output length
of these instances is given by inner_length instead.
*/
static int blake2bp_init_leaf_param( blake2b_state *S, const blake2b_param *P )
{
int err = blake2b_init_param(S, P);
S->outlen = P->inner_length;
return err;
}
static int blake2bp_init_leaf( blake2b_state *S, size_t outlen, size_t keylen, uint64_t offset )
{
blake2b_param P[1];
P->digest_length = (uint8_t)outlen;
P->key_length = (uint8_t)keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store32( &P->node_offset, offset );
store32( &P->xof_length, 0 );
P->node_depth = 0;
P->inner_length = BLAKE2B_OUTBYTES;
memset( P->reserved, 0, sizeof( P->reserved ) );
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2bp_init_leaf_param( S, P );
}
static int blake2bp_init_root( blake2b_state *S, size_t outlen, size_t keylen )
{
blake2b_param P[1];
P->digest_length = (uint8_t)outlen;
P->key_length = (uint8_t)keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store32( &P->node_offset, 0 );
store32( &P->xof_length, 0 );
P->node_depth = 1;
P->inner_length = BLAKE2B_OUTBYTES;
memset( P->reserved, 0, sizeof( P->reserved ) );
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2b_init_param( S, P );
}
int blake2bp_init( blake2bp_state *S, size_t outlen )
{
size_t i;
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
S->outlen = outlen;
if( blake2bp_init_root( S->R, outlen, 0 ) < 0 )
return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2bp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
return 0;
}
int blake2bp_init_key( blake2bp_state *S, size_t outlen, const void *key, size_t keylen )
{
size_t i;
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
if( !key || !keylen || keylen > BLAKE2B_KEYBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
S->outlen = outlen;
if( blake2bp_init_root( S->R, outlen, keylen ) < 0 )
return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2bp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
{
uint8_t block[BLAKE2B_BLOCKBYTES];
memset( block, 0, BLAKE2B_BLOCKBYTES );
memcpy( block, key, keylen );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S->S[i], block, BLAKE2B_BLOCKBYTES );
secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */
}
return 0;
}
int blake2bp_update( blake2bp_state *S, const void *pin, size_t inlen )
{
const unsigned char * in = (const unsigned char *)pin;
size_t left = S->buflen;
size_t fill = sizeof( S->buf ) - left;
size_t i;
if( left && inlen >= fill )
{
memcpy( S->buf + left, in, fill );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, BLAKE2B_BLOCKBYTES );
in += fill;
inlen -= fill;
left = 0;
}
#if defined(_OPENMP)
#pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE)
#else
for( i = 0; i < PARALLELISM_DEGREE; ++i )
#endif
{
#if defined(_OPENMP)
size_t i = omp_get_thread_num();
#endif
size_t inlen__ = inlen;
const unsigned char *in__ = ( const unsigned char * )in;
in__ += i * BLAKE2B_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES )
{
blake2b_update( S->S[i], in__, BLAKE2B_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
}
}
in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES );
inlen %= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
if( inlen > 0 )
memcpy( S->buf + left, in, inlen );
S->buflen = left + inlen;
return 0;
}
int blake2bp_final( blake2bp_state *S, void *out, size_t outlen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES];
size_t i;
if(out == NULL || outlen < S->outlen) {
return -1;
}
for( i = 0; i < PARALLELISM_DEGREE; ++i )
{
if( S->buflen > i * BLAKE2B_BLOCKBYTES )
{
size_t left = S->buflen - i * BLAKE2B_BLOCKBYTES;
if( left > BLAKE2B_BLOCKBYTES ) left = BLAKE2B_BLOCKBYTES;
blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, left );
}
blake2b_final( S->S[i], hash[i], BLAKE2B_OUTBYTES );
}
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S->R, hash[i], BLAKE2B_OUTBYTES );
return blake2b_final( S->R, out, S->outlen );
}
int blake2bp( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES];
blake2b_state S[PARALLELISM_DEGREE][1];
blake2b_state FS[1];
size_t i;
/* Verify parameters */
if ( NULL == in && inlen > 0 ) return -1;
if ( NULL == out ) return -1;
if( NULL == key && keylen > 0 ) return -1;
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
if( keylen > BLAKE2B_KEYBYTES ) return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2bp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1;
S[PARALLELISM_DEGREE - 1]->last_node = 1; /* mark last node */
if( keylen > 0 )
{
uint8_t block[BLAKE2B_BLOCKBYTES];
memset( block, 0, BLAKE2B_BLOCKBYTES );
memcpy( block, key, keylen );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S[i], block, BLAKE2B_BLOCKBYTES );
secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */
}
#if defined(_OPENMP)
#pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE)
#else
for( i = 0; i < PARALLELISM_DEGREE; ++i )
#endif
{
#if defined(_OPENMP)
size_t i = omp_get_thread_num();
#endif
size_t inlen__ = inlen;
const unsigned char *in__ = ( const unsigned char * )in;
in__ += i * BLAKE2B_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES )
{
blake2b_update( S[i], in__, BLAKE2B_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
}
if( inlen__ > i * BLAKE2B_BLOCKBYTES )
{
const size_t left = inlen__ - i * BLAKE2B_BLOCKBYTES;
const size_t len = left <= BLAKE2B_BLOCKBYTES ? left : BLAKE2B_BLOCKBYTES;
blake2b_update( S[i], in__, len );
}
blake2b_final( S[i], hash[i], BLAKE2B_OUTBYTES );
}
if( blake2bp_init_root( FS, outlen, keylen ) < 0 )
return -1;
FS->last_node = 1; /* Mark as last node */
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( FS, hash[i], BLAKE2B_OUTBYTES );
return blake2b_final( FS, out, outlen );;
}
#if defined(BLAKE2BP_SELFTEST)
#include <string.h>
#include "blake2-kat.h"
int main( void )
{
uint8_t key[BLAKE2B_KEYBYTES];
uint8_t buf[BLAKE2_KAT_LENGTH];
size_t i, step;
for( i = 0; i < BLAKE2B_KEYBYTES; ++i )
key[i] = ( uint8_t )i;
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
buf[i] = ( uint8_t )i;
/* Test simple API */
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
{
uint8_t hash[BLAKE2B_OUTBYTES];
blake2bp( hash, BLAKE2B_OUTBYTES, buf, i, key, BLAKE2B_KEYBYTES );
if( 0 != memcmp( hash, blake2bp_keyed_kat[i], BLAKE2B_OUTBYTES ) )
{
goto fail;
}
}
/* Test streaming API */
for(step = 1; step < BLAKE2B_BLOCKBYTES; ++step) {
for (i = 0; i < BLAKE2_KAT_LENGTH; ++i) {
uint8_t hash[BLAKE2B_OUTBYTES];
blake2bp_state S;
uint8_t * p = buf;
size_t mlen = i;
int err = 0;
if( (err = blake2bp_init_key(&S, BLAKE2B_OUTBYTES, key, BLAKE2B_KEYBYTES)) < 0 ) {
goto fail;
}
while (mlen >= step) {
if ( (err = blake2bp_update(&S, p, step)) < 0 ) {
goto fail;
}
mlen -= step;
p += step;
}
if ( (err = blake2bp_update(&S, p, mlen)) < 0) {
goto fail;
}
if ( (err = blake2bp_final(&S, hash, BLAKE2B_OUTBYTES)) < 0) {
goto fail;
}
if (0 != memcmp(hash, blake2bp_keyed_kat[i], BLAKE2B_OUTBYTES)) {
goto fail;
}
}
}
puts( "ok" );
return 0;
fail:
puts("error");
return -1;
}
#endif
|
GB_binop__gt_bool.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__gt_bool)
// A.*B function (eWiseMult): GB (_AemultB_01__gt_bool)
// A.*B function (eWiseMult): GB (_AemultB_02__gt_bool)
// A.*B function (eWiseMult): GB (_AemultB_03__gt_bool)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_bool)
// A*D function (colscale): GB (_AxD__gt_bool)
// D*A function (rowscale): GB (_DxB__gt_bool)
// C+=B function (dense accum): GB (_Cdense_accumB__gt_bool)
// C+=b function (dense accum): GB (_Cdense_accumb__gt_bool)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_bool)
// C=scalar+B GB (_bind1st__gt_bool)
// C=scalar+B' GB (_bind1st_tran__gt_bool)
// C=A+scalar GB (_bind2nd__gt_bool)
// C=A'+scalar GB (_bind2nd_tran__gt_bool)
// C type: bool
// A type: bool
// B,b type: bool
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
bool aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
bool bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GT || GxB_NO_BOOL || GxB_NO_GT_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__gt_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__gt_bool)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__gt_bool)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__gt_bool)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__gt_bool)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__gt_bool)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__gt_bool)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__gt_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__gt_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__gt_bool)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__gt_bool)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
bool bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__gt_bool)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
bool aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__gt_bool)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__gt_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
field_utility.h | #ifndef KRATOS_FIELD_UTILITY_H
#define KRATOS_FIELD_UTILITY_H
// /* External includes */
// System includes
// Project includes
#include "includes/variables.h"
/* System includes */
#include <limits>
#include <iostream>
#include <iomanip>
/* External includes */
#ifdef _OPENMP
#include <omp.h>
#endif
/* Project includes */
#include "includes/define.h"
#include "utilities/openmp_utils.h"
#include "real_field.h"
#include "vector_field.h"
#include "sets/space_time_set.h"
namespace Kratos
{
class KRATOS_API(SWIMMING_DEM_APPLICATION) FieldUtility
{
public:
KRATOS_CLASS_POINTER_DEFINITION(FieldUtility);
/// Default constructor.
FieldUtility(): mDomain(), mpVectorField(){}
FieldUtility(SpaceTimeSet::Pointer p_sts, VectorField<3>::Pointer p_vector_field):
mDomain(p_sts), mpVectorField(p_vector_field){}
/// Destructor.
virtual ~FieldUtility(){}
//***************************************************************************************************************
//***************************************************************************************************************
void MarkNodesInside(ModelPart& r_model_part, const ProcessInfo& r_current_process_info)
{
const int nnodes = r_model_part.Nodes().size();
const double time = r_current_process_info[TIME];
mIsInArray.resize(nnodes);
#pragma omp parallel for
for (int i = 0; i < nnodes; ++i){
ModelPart::NodeIterator node_it = r_model_part.NodesBegin() + i;
double coor_x = node_it->X();
double coor_y = node_it->Y();
double coor_z = node_it->Z();
bool is_in = mDomain->IsIn(time, coor_x, coor_y, coor_z);
node_it->Set(INSIDE, is_in);
mIsInArray[i] = is_in;
}
}
//***************************************************************************************************************
//***************************************************************************************************************
double EvaluateFieldAtPoint(const double& time,
const array_1d<double, 3>& coor,
RealField::Pointer formula)
{
if (mDomain->IsIn(time, coor[0], coor[1], coor[2])){
return(formula->Evaluate(time, coor));
}
return(0.0);
}
//***************************************************************************************************************
//***************************************************************************************************************
array_1d<double, 3> EvaluateFieldAtPoint(const double& time,
const array_1d<double, 3>& coor,
VectorField<3>::Pointer formula)
{
if (mDomain->IsIn(time, coor[0], coor[1], coor[2])){
array_1d<double, 3> value;
formula->Evaluate(time, coor, value);
return(value);
}
return(ZeroVector(3));
}
//***************************************************************************************************************
//***************************************************************************************************************
virtual void ImposeFieldOnNodes(Variable<double>& destination_variable,
const double default_value,
RealField::Pointer formula,
ModelPart& r_model_part,
const ProcessInfo& r_current_process_info,
const bool recalculate_domain)
{
const unsigned int nnodes = r_model_part.Nodes().size();
const double time = r_current_process_info[TIME];
if (recalculate_domain || nnodes != mIsInArray.size()){
MarkNodesInside(r_model_part, r_current_process_info);
}
#pragma omp parallel for
for (int i = 0; i < (int)nnodes; ++i){
ModelPart::NodeIterator node_it = r_model_part.NodesBegin() + i;
double& destination_value = node_it->FastGetSolutionStepValue(destination_variable);
destination_value = default_value;
if (mIsInArray[i]){
array_1d<double, 3> coor;
coor[0] = node_it->X();
coor[1] = node_it->Y();
coor[2] = node_it->Z();
destination_value = formula->Evaluate(time, coor);
}
}
}
//***************************************************************************************************************
//***************************************************************************************************************
virtual void ImposeFieldOnNodes(Variable<array_1d<double, 3> >& destination_variable,
const array_1d<double, 3> default_value,
VectorField<3>::Pointer formula,
ModelPart& r_model_part,
const ProcessInfo& r_current_process_info,
const bool recalculate_domain)
{
const unsigned int nnodes = r_model_part.Nodes().size();
const double time = r_current_process_info[TIME];
if (recalculate_domain || nnodes != mIsInArray.size()){
MarkNodesInside(r_model_part, r_current_process_info);
}
#pragma omp parallel for
for (int i = 0; i < (int)nnodes; ++i){
ModelPart::NodeIterator node_it = r_model_part.NodesBegin() + i;
array_1d<double, 3>& destination_value = node_it->FastGetSolutionStepValue(destination_variable);
destination_value[0] = default_value[0];
destination_value[1] = default_value[1];
destination_value[2] = default_value[2];
if (mIsInArray[i]){
array_1d<double, 3> coor;
coor[0] = node_it->X();
coor[1] = node_it->Y();
coor[2] = node_it->Z();
formula->Evaluate(time, coor, destination_value);
}
}
}
//***************************************************************************************************************
//***************************************************************************************************************
virtual void ImposeFieldOnNodes(ModelPart& r_model_part, const VariablesList& variables_to_be_imposed);
virtual void ImposeFieldOnNodes(ModelPart& r_model_part, const Variable<array_1d<double, 3> >& variable_to_be_imposed);
//***************************************************************************************************************
//***************************************************************************************************************
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a stemplate<class T, std::size_t dim> tring.
virtual std::string Info() const
{
return "";
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const
{
}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member r_variables
///@{
///@}
///@name Protected member r_variables
///@{ template<class T, std::size_t dim>
RealField::Pointer mFormula;
SpaceTimeSet::Pointer mDomain;
VectorField<3>::Pointer mpVectorField;
std::vector<bool> mIsInArray;
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member r_variables
///@{
///@}
///@name Member r_variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
FieldUtility & operator=(FieldUtility const& rOther);
///@}
}; // Class FieldUtility
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
} // namespace Kratos.
#endif // KRATOS_FIELD_UTILITY_H
|
GB_binop__lt_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_01__lt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__lt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_03__lt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_fp32)
// A*D function (colscale): GB (_AxD__lt_fp32)
// D*A function (rowscale): GB (_DxB__lt_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__lt_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__lt_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_fp32)
// C=scalar+B GB (_bind1st__lt_fp32)
// C=scalar+B' GB (_bind1st_tran__lt_fp32)
// C=A+scalar GB (_bind2nd__lt_fp32)
// C=A'+scalar GB (_bind2nd_tran__lt_fp32)
// C type: bool
// A type: float
// B,b type: float
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_FP32 || GxB_NO_LT_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lt_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lt_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lt_fp32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lt_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lt_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lt_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lt_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lt_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lt_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lt_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__lt_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__lt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
par_relax_more.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.4 $
***********************************************************************EHEADER*/
/******************************************************************************
*
* a few more relaxation schemes: Chebychev, FCF-Jacobi, CG and Steepest
* Descent
*
*****************************************************************************/
#include "headers.h"
#include "float.h"
#define DBL_EPSILON 2.2204460492503131e-16
int hypre_LINPACKcgtql1(int*,double *,double *,int *);
/******************************************************************************
*
*use max norm to estimate largest eigenvalue
*
*****************************************************************************/
int hypre_ParCSRMaxEigEstimate(hypre_ParCSRMatrix *A, /* matrix to relax with */
int scale, /* scale by diagonal?*/
double *max_eig)
{
double e_max;
double row_sum, max_norm;
double *col_val;
double temp;
double diag_value;
int pos_diag, neg_diag;
HYPRE_BigInt start_row, end_row;
int row_length;
HYPRE_BigInt *col_ind;
int j;
HYPRE_BigInt i;
/* estimate with the inf-norm of A - should be ok for SPD matrices */
start_row = hypre_ParCSRMatrixFirstRowIndex(A);
end_row = hypre_ParCSRMatrixLastRowIndex(A);
max_norm = 0.0;
pos_diag = neg_diag = 0;
for ( i = start_row; i <= end_row; i++ )
{
HYPRE_ParCSRMatrixGetRow((HYPRE_ParCSRMatrix) A, i, &row_length, &col_ind, &col_val);
row_sum = 0.0;
for (j = 0; j < row_length; j++)
{
if (j==0) diag_value = fabs(col_val[j]);
row_sum += fabs(col_val[j]);
if ( col_ind[j] == i && col_val[j] > 0.0 ) pos_diag++;
if ( col_ind[j] == i && col_val[j] < 0.0 ) neg_diag++;
}
if (scale)
{
if (diag_value != 0.0)
row_sum = row_sum/diag_value;
}
if ( row_sum > max_norm ) max_norm = row_sum;
HYPRE_ParCSRMatrixRestoreRow((HYPRE_ParCSRMatrix) A, i, &row_length, &col_ind, &col_val);
}
/* get max across procs */
MPI_Allreduce(&max_norm, &temp, 1, MPI_DOUBLE, MPI_MAX, hypre_ParCSRMatrixComm(A));
max_norm = temp;
/* from Charles */
if ( pos_diag == 0 && neg_diag > 0 ) max_norm = - max_norm;
/* eig estimates */
e_max = max_norm;
/* return */
*max_eig = e_max;
return hypre_error_flag;
}
/******************************************************************************
use CG to get the eigenvalue estimate
scale means get eig est of (D^{-1/2} A D^{-1/2}
******************************************************************************/
int hypre_ParCSRMaxEigEstimateCG(hypre_ParCSRMatrix *A, /* matrix to relax with */
int scale, /* scale by diagonal?*/
int max_iter,
double *max_eig,
double *min_eig)
{
int i, j, err;
hypre_ParVector *p;
hypre_ParVector *s;
hypre_ParVector *r;
hypre_ParVector *ds;
hypre_ParVector *u;
double *tridiag;
double *trioffd;
double lambda_max, max_row_sum;
double beta, gamma = 0.0, alpha, sdotp, gamma_old, alphainv;
double diag;
double lambda_min;
double *s_data, *p_data, *ds_data, *u_data;
int local_size = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
int *A_diag_i = hypre_CSRMatrixI(A_diag);
/* check the size of A - don't iterate more than the size */
HYPRE_BigInt size = hypre_ParCSRMatrixGlobalNumRows(A);
if (size < (HYPRE_BigInt) max_iter)
max_iter = (int) size;
/* create some temp vectors: p, s, r , ds, u*/
r = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(r);
hypre_ParVectorSetPartitioningOwner(r,0);
p = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(p);
hypre_ParVectorSetPartitioningOwner(p,0);
s = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(s);
hypre_ParVectorSetPartitioningOwner(s,0);
ds = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(ds);
hypre_ParVectorSetPartitioningOwner(ds,0);
u = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(u);
hypre_ParVectorSetPartitioningOwner(u,0);
/* point to local data */
s_data = hypre_VectorData(hypre_ParVectorLocalVector(s));
p_data = hypre_VectorData(hypre_ParVectorLocalVector(p));
ds_data = hypre_VectorData(hypre_ParVectorLocalVector(ds));
u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
/* make room for tri-diag matrix */
tridiag = hypre_CTAlloc(double, max_iter+1);
trioffd = hypre_CTAlloc(double, max_iter+1);
for (i=0; i < max_iter + 1; i++)
{
tridiag[i] = 0;
trioffd[i] = 0;
}
/* set residual to random */
hypre_ParVectorSetRandomValues(r,1);
if (scale)
{
for (i = 0; i < local_size; i++)
{
diag = A_diag_data[A_diag_i[i]];
ds_data[i] = 1/sqrt(diag);
}
}
else
{
/* set ds to 1 */
hypre_ParVectorSetConstantValues(ds,1.0);
}
/* gamma = <r,Cr> */
gamma = hypre_ParVectorInnerProd(r,p);
/* for the initial filling of the tridiag matrix */
beta = 1.0;
max_row_sum = 0.0;
i = 0;
while (i < max_iter)
{
/* s = C*r */
/* TO DO: C = diag scale */
hypre_ParVectorCopy(r, s);
/*gamma = <r,Cr> */
gamma_old = gamma;
gamma = hypre_ParVectorInnerProd(r,s);
if (i==0)
{
beta = 1.0;
/* p_0 = C*r */
hypre_ParVectorCopy(s, p);
}
else
{
/* beta = gamma / gamma_old */
beta = gamma / gamma_old;
/* p = s + beta p */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) schedule(static)
#endif
for (j=0; j < local_size; j++)
{
p_data[j] = s_data[j] + beta*p_data[j];
}
}
if (scale)
{
/* s = D^{-1/2}A*D^{-1/2}*p */
for (j = 0; j < local_size; j++)
{
u_data[j] = ds_data[j] * p_data[j];
}
hypre_ParCSRMatrixMatvec(1.0, A, u, 0.0, s);
for (j = 0; j < local_size; j++)
{
s_data[j] = ds_data[j] * s_data[j];
}
}
else
{
/* s = A*p */
hypre_ParCSRMatrixMatvec(1.0, A, p, 0.0, s);
}
/* <s,p> */
sdotp = hypre_ParVectorInnerProd(s,p);
/* alpha = gamma / <s,p> */
alpha = gamma/sdotp;
/* get tridiagonal matrix */
alphainv = 1.0/alpha;
tridiag[i+1] = alphainv;
tridiag[i] *= beta;
tridiag[i] += alphainv;
trioffd[i+1] = alphainv;
trioffd[i] *= sqrt(beta);
/* x = x + alpha*p */
/* don't need */
/* r = r - alpha*s */
hypre_ParVectorAxpy( -alpha, s, r);
i++;
}
/* eispack routine - eigenvalues return in tridiag and ordered*/
hypre_LINPACKcgtql1(&i,tridiag,trioffd,&err);
lambda_max = tridiag[i-1];
lambda_min = tridiag[0];
/* printf("linpack max eig est = %g\n", lambda_max);*/
/* printf("linpack min eig est = %g\n", lambda_min);*/
hypre_ParVectorDestroy(r);
hypre_ParVectorDestroy(s);
hypre_ParVectorDestroy(p);
hypre_ParVectorDestroy(ds);
hypre_ParVectorDestroy(u);
/* return */
*max_eig = lambda_max;
*min_eig = lambda_min;
return hypre_error_flag;
}
/******************************************************************************
Chebyshev relaxation - iterative implementation
(See Saad "Iterative Methods for Sparse Systems", Alg. 12.1
plus we can scale residual by inv(M) = 1/diag(A) so that we have Chebyshev
accelerated jacobi)
NOT USED CURRENTLY
******************************************************************************/
int hypre_ParCSRRelax_Cheby3(hypre_ParCSRMatrix *A, /* matrix to relax with */
hypre_ParVector *f, /* right-hand side */
double max_eig, /* u.b = max. e-val est.*1.1 */
double eig_ratio, /* l.b = max_eig/eig ratio */
int order, /* polynomial order */
hypre_ParVector *u, /* initial/updated approximation */
hypre_ParVector *v /* temporary vector */,
hypre_ParVector *v2 /*another temp vector */ )
{
/* See Saad "Iterative Methods for Sparse Systems", Alg. 12.1 */
/* plus we can scale residual by inv(M) = 1/diag(A) so that we have Chebyshev
accelerated jacobi */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
int *A_diag_i = hypre_CSRMatrixI(A_diag);
double *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
double *v_data = hypre_VectorData(hypre_ParVectorLocalVector(v));
double *dk = hypre_VectorData(hypre_ParVectorLocalVector(v2));
double theta, delta, sigma;
double p_k, p_kp1, temp1, temp2, diag, scale;
double upper_bound, lower_bound;
int i, j;
int num_rows = hypre_CSRMatrixNumRows(A_diag);
/* make sure we are large enough - Adams et al. 2003 */
upper_bound = max_eig * 1.1;
lower_bound = max_eig/eig_ratio;
/* parameters */
theta = (upper_bound + lower_bound)/2;
delta = (upper_bound - lower_bound)/2;
sigma = theta/delta;
/* set v = f */
hypre_ParVectorCopy(f, v);
/* get residual: v = f-A*u */
hypre_ParCSRMatrixMatvec(-1.0, A, u, 1.0, v);
/* p_0*/
p_k = 1/sigma;
/*first order */
temp1 = 1/theta;
/*d_0* = 1/theta * inv(M)r_0 - M is Jacobi*/
/* x_1 = x_0 + d_0 */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,diag,scale) schedule(static)
#endif
for (i = 0; i < num_rows; i++)
{
diag = A_diag_data[A_diag_i[i]];
scale = temp1/diag;
dk[i] = scale*v_data[i];
u_data[i] += dk[i];
}
/* higher order */
for (j = 1; j < order; j++)
{
/* get residual: v = f-A*u */
hypre_ParVectorCopy(f, v);
hypre_ParCSRMatrixMatvec(-1.0, A, u, 1.0, v);
p_kp1 = 1.0/(2.0*sigma - p_k);
temp1 = p_kp1*p_k;
temp2 = 2.0*p_kp1/delta;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,diag,scale) schedule(static)
#endif
for (i = 0; i < num_rows; i++)
{
diag = A_diag_data[A_diag_i[i]];
scale = temp2/diag;
dk[i] = temp1*dk[i] + scale*v_data[i];
u_data[i] += dk[i];
}
p_k = p_kp1;
}
return hypre_error_flag;
}
/******************************************************************************
Chebyshev relaxation
Can specify order 1-4 (this is the order of the resid polynomial)- here we
explicitly code the coefficients (instead of
iteratively determining)
variant 0: standard chebyshev
this is rlx 11 if scale = 0, and 16 if scale == 1
variant 1: modified cheby: T(t)* f(t) where f(t) = (1-b/t)
this is rlx 15 if scale = 0, and 17 if scale == 1
ratio indicates the percentage of the whole spectrum to use (so .5
means half, and .1 means 10percent)
*******************************************************************************/
int hypre_ParCSRRelax_Cheby(hypre_ParCSRMatrix *A, /* matrix to relax with */
hypre_ParVector *f, /* right-hand side */
double max_eig,
double min_eig,
double eig_ratio,
int order, /* polynomial order */
int scale, /* scale by diagonal?*/
int variant,
hypre_ParVector *u, /* initial/updated approximation */
hypre_ParVector *v /* temporary vector */,
hypre_ParVector *r /*another temp vector */ )
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
int *A_diag_i = hypre_CSRMatrixI(A_diag);
double *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
double *f_data = hypre_VectorData(hypre_ParVectorLocalVector(f));
double *v_data = hypre_VectorData(hypre_ParVectorLocalVector(v));
double *r_data = hypre_VectorData(hypre_ParVectorLocalVector(r));
double theta, delta;
double den;
double upper_bound, lower_bound;
int i, j;
int num_rows = hypre_CSRMatrixNumRows(A_diag);
double coefs[5];
double mult;
double *orig_u;
double tmp_d;
int cheby_order;
double *ds_data, *tmp_data;
double diag;
hypre_ParVector *ds;
hypre_ParVector *tmp_vec;
/* u = u + p(A)r */
if (order > 4)
order = 4;
if (order < 1)
order = 1;
/* we are using the order of p(A) */
cheby_order = order -1;
/* make sure we are large enough - Adams et al. 2003 */
upper_bound = max_eig * 1.1;
/* lower_bound = max_eig/eig_ratio; */
lower_bound = (upper_bound - min_eig)* eig_ratio + min_eig;
/* theta and delta */
theta = (upper_bound + lower_bound)/2;
delta = (upper_bound - lower_bound)/2;
if (variant == 1 )
{
switch ( cheby_order ) /* these are the corresponding cheby polynomials: u = u_o + s(A)r_0 - so order is
one less that resid poly: r(t) = 1 - t*s(t) */
{
case 0:
coefs[0] = 1.0/theta;
break;
case 1: /* (del - t + 2*th)/(th^2 + del*th) */
den = (theta*theta + delta*theta);
coefs[0] = (delta + 2*theta)/den;
coefs[1] = -1.0/den;
break;
case 2: /* (4*del*th - del^2 - t*(2*del + 6*th) + 2*t^2 + 6*th^2)/(2*del*th^2 - del^2*th - del^3 + 2*th^3)*/
den = 2*delta*theta*theta - delta*delta*theta - pow(delta,3) + 2*pow(theta,3);
coefs[0] = (4*delta*theta - pow(delta,2) + 6*pow(theta,2))/den;
coefs[1] = -(2*delta + 6*theta)/den;
coefs[2] = 2/den;
break;
case 3: /* -(6*del^2*th - 12*del*th^2 - t^2*(4*del + 16*th) + t*(12*del*th - 3*del^2 + 24*th^2) + 3*del^3 + 4*t^3 - 16*th^3)/(4*del*th^3 - 3*del^2*th^2 - 3*del^3*th + 4*th^4)*/
den = - (4*delta*pow(theta,3) - 3*pow(delta,2)*pow(theta,2) - 3*pow(delta,3)*theta + 4*pow(theta,4) );
coefs[0] = (6*pow(delta,2)*theta - 12*delta*pow(theta,2) + 3*pow(delta,3) - 16*pow(theta,3) )/den;
coefs[1] = (12*delta*theta - 3*pow(delta,2) + 24*pow(theta,2))/den;
coefs[2] = -( 4*delta + 16*theta)/den;
coefs[3] = 4/den;
break;
}
}
else /* standard chebyshev */
{
switch ( cheby_order ) /* these are the corresponding cheby polynomials: u = u_o + s(A)r_0 - so order is
one less thatn resid poly: r(t) = 1 - t*s(t) */
{
case 0:
coefs[0] = 1.0/theta;
break;
case 1: /* ( 2*t - 4*th)/(del^2 - 2*th^2) */
den = delta*delta - 2*theta*theta;
coefs[0] = -4*theta/den;
coefs[1] = 2/den;
break;
case 2: /* (3*del^2 - 4*t^2 + 12*t*th - 12*th^2)/(3*del^2*th - 4*th^3)*/
den = 3*(delta*delta)*theta - 4*(theta*theta*theta);
coefs[0] = (3*delta*delta - 12 *theta*theta)/den;
coefs[1] = 12*theta/den;
coefs[2] = -4/den;
break;
case 3: /*(t*(8*del^2 - 48*th^2) - 16*del^2*th + 32*t^2*th - 8*t^3 + 32*th^3)/(del^4 - 8*del^2*th^2 + 8*th^4)*/
den = pow(delta,4) - 8*delta*delta*theta*theta + 8*pow(theta,4);
coefs[0] = (32*pow(theta,3)- 16*delta*delta*theta)/den;
coefs[1] = (8*delta*delta - 48*theta*theta)/den;
coefs[2] = 32*theta/den;
coefs[3] = -8/den;
break;
}
}
orig_u = hypre_CTAlloc(double, num_rows);
if (!scale)
{
/* get residual: r = f - A*u */
hypre_ParVectorCopy(f, r);
hypre_ParCSRMatrixMatvec(-1.0, A, u, 1.0, r);
for ( i = 0; i < num_rows; i++ )
{
orig_u[i] = u_data[i];
u_data[i] = r_data[i] * coefs[cheby_order];
}
for (i = cheby_order - 1; i >= 0; i-- )
{
hypre_ParCSRMatrixMatvec(1.0, A, u, 0.0, v);
mult = coefs[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) schedule(static)
#endif
for ( j = 0; j < num_rows; j++ )
{
u_data[j] = mult * r_data[j] + v_data[j];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
#endif
for ( i = 0; i < num_rows; i++ )
{
u_data[i] = orig_u[i] + u_data[i];
}
}
else /* scaling! */
{
/*grab 1/sqrt(diagonal) */
ds = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(ds);
hypre_ParVectorSetPartitioningOwner(ds,0);
ds_data = hypre_VectorData(hypre_ParVectorLocalVector(ds));
tmp_vec = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(tmp_vec);
hypre_ParVectorSetPartitioningOwner(tmp_vec,0);
tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(tmp_vec));
/* get ds_data and get scaled residual: r = D^(-1/2)f -
* D^(-1/2)A*u */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j,diag) schedule(static)
#endif
for (j = 0; j < num_rows; j++)
{
diag = A_diag_data[A_diag_i[j]];
ds_data[j] = 1/sqrt(diag);
r_data[j] = ds_data[j] * f_data[j];
}
hypre_ParCSRMatrixMatvec(-1.0, A, u, 0.0, tmp_vec);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) schedule(static)
#endif
for ( j = 0; j < num_rows; j++ )
{
r_data[j] += ds_data[j] * tmp_data[j];
}
/* save original u, then start
the iteration by multiplying r by the cheby coef.*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) schedule(static)
#endif
for ( j = 0; j < num_rows; j++ )
{
orig_u[j] = u_data[j]; /* orig, unscaled u */
u_data[j] = r_data[j] * coefs[cheby_order];
}
/* now do the other coefficients */
for (i = cheby_order - 1; i >= 0; i-- )
{
/* v = D^(-1/2)AD^(-1/2)u */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) schedule(static)
#endif
for ( j = 0; j < num_rows; j++ )
{
tmp_data[j] = ds_data[j] * u_data[j];
}
hypre_ParCSRMatrixMatvec(1.0, A, tmp_vec, 0.0, v);
/* u_new = coef*r + v*/
mult = coefs[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j,tmp_d) schedule(static)
#endif
for ( j = 0; j < num_rows; j++ )
{
tmp_d = ds_data[j]* v_data[j];
u_data[j] = mult * r_data[j] + tmp_d;
}
} /* end of cheby_order loop */
/* now we have to scale u_data before adding it to u_orig*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) schedule(static)
#endif
for ( j = 0; j < num_rows; j++ )
{
u_data[j] = orig_u[j] + ds_data[j]*u_data[j];
}
hypre_ParVectorDestroy(ds);
hypre_ParVectorDestroy(tmp_vec);
}/* end of scaling code */
hypre_TFree(orig_u);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_BoomerAMGRelax_FCFJacobi
*--------------------------------------------------------------------------*/
int hypre_BoomerAMGRelax_FCFJacobi( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
int *cf_marker,
double relax_weight,
hypre_ParVector *u,
hypre_ParVector *Vtemp)
{
int i;
int relax_points[3];
int relax_type = 0;
hypre_ParVector *Ztemp = NULL;
relax_points[0] = -1; /*F */
relax_points[1] = 1; /*C */
relax_points[2] = -1; /*F */
/* if we are on the coarsest level ,the cf_marker will be null
and we just do one sweep regular jacobi */
if (cf_marker == NULL)
{
hypre_BoomerAMGRelax(A,
f,
cf_marker,
relax_type,
0,
relax_weight,
0.0,
NULL,
u,
Vtemp, Ztemp);
}
else
{
for (i=0; i < 3; i++)
hypre_BoomerAMGRelax(A,
f,
cf_marker,
relax_type,
relax_points[i],
relax_weight,
0.0,
NULL,
u,
Vtemp, Ztemp);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* CG Smoother - if the CG setup is cheap, we can just do it here - for
* now we are doing it in the setup, so this function is a
* bit unnecessary ...
*
*--------------------------------------------------------------------------*/
int hypre_ParCSRRelax_CG( HYPRE_Solver solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u,
int num_its)
{
int num_iterations;
double final_res_norm;
HYPRE_PCGSetMaxIter(solver, num_its); /* max iterations */
HYPRE_ParCSRPCGSolve(solver, (HYPRE_ParCSRMatrix)A, (HYPRE_ParVector)f, (HYPRE_ParVector)u);
HYPRE_PCGGetNumIterations(solver, &num_iterations);
HYPRE_PCGGetFinalRelativeResidualNorm(solver, &final_res_norm);
#if 0
{
int myid;
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
if (myid ==0)
{
printf(" -----CG PCG Iterations = %d\n", num_iterations);
printf(" -----CG PCG Final Relative Residual Norm = %e\n", final_res_norm);
}
}
#endif
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Steepest Descent (Smoother) (Not used)
*
* We don't check for convergence - just do a fixed number of iterations
*--------------------------------------------------------------------------*/
int hypre_ParCSRRelax_SD( hypre_ParCSRMatrix *A,/* matrix to relax with */
hypre_ParVector *f, /* right-hand side */
hypre_ParVector *u,/* initial/updated approximation */
hypre_ParVector *r, /* temporary vector */
hypre_ParVector *p, /*another temp vector */
int num_its)
{
int i;
double alpha, tmp1, tmp2;
/* get residual: r = f - A*u */
hypre_ParVectorCopy(f, r); /* copy f into r */
hypre_ParCSRMatrixMatvec(-1.0, A, u, 1.0, r);
for (i = 0; i < num_its; i++)
{
/*p = A*r */
hypre_ParCSRMatrixMatvec(1.0, A, r, 0.0, p);
/* alpha = (r,r)/(p,r) */
tmp1 = hypre_ParVectorInnerProd( r, r);
tmp2 = hypre_ParVectorInnerProd( p, r);
if (tmp2 == 0.0)
break;
alpha = tmp1/tmp2;
/* u = u + alpha*r */
hypre_ParVectorAxpy( alpha, r, u);
/* r = r - alpha * p */
hypre_ParVectorAxpy( -alpha, p, r);
}
return hypre_error_flag;
}
/* tql1.f --
this is the eispack translation - from Barry Smith in Petsc
Note that this routine always uses real numbers (not complex) even
if the underlying matrix is Hermitian. This is because the Lanczos
process applied to Hermitian matrices always produces a real,
symmetric tridiagonal matrix.
*/
double hypre_LINPACKcgpthy(double*,double*);
int hypre_LINPACKcgtql1(int *n,double *d,double *e,int *ierr)
{
/* System generated locals */
int i__1,i__2;
double d__1,d__2,c_b10 = 1.0;
/* Local variables */
double c,f,g,h;
int i,j,l,m;
double p,r,s,c2,c3 = 0.0;
int l1,l2;
double s2 = 0.0;
int ii;
double dl1,el1;
int mml;
double tst1,tst2;
/* THIS SUBROUTINE IS A TRANSLATION OF THE ALGOL PROCEDURE TQL1, */
/* NUM. MATH. 11, 293-306(1968) BY BOWDLER, MARTIN, REINSCH, AND */
/* WILKINSON. */
/* HANDBOOK FOR AUTO. COMP., VOL.II-LINEAR ALGEBRA, 227-240(1971). */
/* THIS SUBROUTINE FINDS THE EIGENVALUES OF A SYMMETRIC */
/* TRIDIAGONAL MATRIX BY THE QL METHOD. */
/* ON INPUT */
/* N IS THE ORDER OF THE MATRIX. */
/* D CONTAINS THE DIAGONAL ELEMENTS OF THE INPUT MATRIX. */
/* E CONTAINS THE SUBDIAGONAL ELEMENTS OF THE INPUT MATRIX */
/* IN ITS LAST N-1 POSITIONS. E(1) IS ARBITRARY. */
/* ON OUTPUT */
/* D CONTAINS THE EIGENVALUES IN ASCENDING ORDER. IF AN */
/* ERROR EXIT IS MADE, THE EIGENVALUES ARE CORRECT AND */
/* ORDERED FOR INDICES 1,2,...IERR-1, BUT MAY NOT BE */
/* THE SMALLEST EIGENVALUES. */
/* E HAS BEEN DESTROYED. */
/* IERR IS SET TO */
/* ZERO FOR NORMAL RETURN, */
/* J IF THE J-TH EIGENVALUE HAS NOT BEEN */
/* DETERMINED AFTER 30 ITERATIONS. */
/* CALLS CGPTHY FOR DSQRT(A*A + B*B) . */
/* QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW, */
/* MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY
*/
/* THIS VERSION DATED AUGUST 1983. */
/* ------------------------------------------------------------------
*/
double ds;
--e;
--d;
*ierr = 0;
if (*n == 1) {
goto L1001;
}
i__1 = *n;
for (i = 2; i <= i__1; ++i) {
e[i - 1] = e[i];
}
f = 0.;
tst1 = 0.;
e[*n] = 0.;
i__1 = *n;
for (l = 1; l <= i__1; ++l) {
j = 0;
h = (d__1 = d[l],fabs(d__1)) + (d__2 = e[l],fabs(d__2));
if (tst1 < h) {
tst1 = h;
}
/* .......... LOOK FOR SMALL SUB-DIAGONAL ELEMENT .......... */
i__2 = *n;
for (m = l; m <= i__2; ++m) {
tst2 = tst1 + (d__1 = e[m],fabs(d__1));
if (tst2 == tst1) {
goto L120;
}
/* .......... E(N) IS ALWAYS ZERO,SO THERE IS NO EXIT */
/* THROUGH THE BOTTOM OF THE LOOP .......... */
}
L120:
if (m == l) {
goto L210;
}
L130:
if (j == 30) {
goto L1000;
}
++j;
/* .......... FORM SHIFT .......... */
l1 = l + 1;
l2 = l1 + 1;
g = d[l];
p = (d[l1] - g) / (e[l] * 2.);
r = hypre_LINPACKcgpthy(&p,&c_b10);
ds = 1.0; if (p < 0.0) ds = -1.0;
d[l] = e[l] / (p + ds*r);
d[l1] = e[l] * (p + ds*r);
dl1 = d[l1];
h = g - d[l];
if (l2 > *n) {
goto L145;
}
i__2 = *n;
for (i = l2; i <= i__2; ++i) {
d[i] -= h;
}
L145:
f += h;
/* .......... QL TRANSFORMATION .......... */
p = d[m];
c = 1.;
c2 = c;
el1 = e[l1];
s = 0.;
mml = m - l;
/* .......... FOR I=M-1 STEP -1 UNTIL L DO -- .......... */
i__2 = mml;
for (ii = 1; ii <= i__2; ++ii) {
c3 = c2;
c2 = c;
s2 = s;
i = m - ii;
g = c * e[i];
h = c * p;
r = hypre_LINPACKcgpthy(&p,&e[i]);
e[i + 1] = s * r;
s = e[i] / r;
c = p / r;
p = c * d[i] - s * g;
d[i + 1] = h + s * (c * g + s * d[i]);
}
p = -s * s2 * c3 * el1 * e[l] / dl1;
e[l] = s * p;
d[l] = c * p;
tst2 = tst1 + (d__1 = e[l],fabs(d__1));
if (tst2 > tst1) {
goto L130;
}
L210:
p = d[l] + f;
/* .......... ORDER EIGENVALUES .......... */
if (l == 1) {
goto L250;
}
/* .......... FOR I=L STEP -1 UNTIL 2 DO -- .......... */
i__2 = l;
for (ii = 2; ii <= i__2; ++ii) {
i = l + 2 - ii;
if (p >= d[i - 1]) {
goto L270;
}
d[i] = d[i - 1];
}
L250:
i = 1;
L270:
d[i] = p;
}
goto L1001;
/* .......... SET ERROR -- NO CONVERGENCE TO AN */
/* EIGENVALUE AFTER 30 ITERATIONS .......... */
L1000:
*ierr = l;
L1001:
return 0;
} /* cgtql1_ */
double hypre_LINPACKcgpthy(double *a,double *b)
{
/* System generated locals */
double ret_val,d__1,d__2,d__3;
/* Local variables */
double p,r,s,t,u;
/* FINDS DSQRT(A**2+B**2) WITHOUT OVERFLOW OR DESTRUCTIVE UNDERFLOW */
/* Computing MAX */
d__1 = fabs(*a),d__2 = fabs(*b);
p = hypre_max(d__1,d__2);
if (!p) {
goto L20;
}
/* Computing MIN */
d__2 = fabs(*a),d__3 = fabs(*b);
/* Computing 2nd power */
d__1 = hypre_min(d__2,d__3) / p;
r = d__1 * d__1;
L10:
t = r + 4.;
if (t == 4.) {
goto L20;
}
s = r / t;
u = s * 2. + 1.;
p = u * p;
/* Computing 2nd power */
d__1 = s / u;
r = d__1 * d__1 * r;
goto L10;
L20:
ret_val = p;
return ret_val;
} /* cgpthy_ */
#if 0
int hypre_ParCSRRelax_Cheby2(hypre_ParCSRMatrix *A, /* matrix to relax with */
hypre_ParVector *f, /* right-hand side */
double max_eig, /* u.b = max. e-val est.*1.1 */
double eig_ratio, /* l.b = max_eig/eig ratio */
int order, /* polynomial order */
hypre_ParVector *u, /* initial/updated approximation */
hypre_ParVector *v /* temporary vector */,
hypre_ParVector *v2 /*another temp vector */ )
{
/* See Saad "Iterative Methods for Sparse Systems", Alg. 12.1 */
/* r_m = Tm(r_0) - plus we scale residual by SCALE = (1-A/u.b.) */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
int *A_diag_i = hypre_CSRMatrixI(A_diag);
double *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
double *f_data = hypre_VectorData(hypre_ParVectorLocalVector(f));
double *v_data = hypre_VectorData(hypre_ParVectorLocalVector(v));
double *dk = hypre_VectorData(hypre_ParVectorLocalVector(v2));
double theta, delta, sigma;
double p_k, p_kp1, temp1, temp2, diag, scale;
double zero = 0.0;
double upper_bound, lower_bound;
int i, j;
int num_rows = hypre_CSRMatrixNumRows(A_diag);
hypre_ParVector *Ztemp;
Ztemp = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(Ztemp);
hypre_ParVectorSetPartitioningOwner(Ztemp,0);
/* make sure we are large enough - Adams et al. 2003 */
upper_bound = max_eig * 1.1;
lower_bound = max_eig/eig_ratio;
/* parameters */
theta = (upper_bound + lower_bound)/2;
delta = (upper_bound - lower_bound)/2;
sigma = theta/delta;
/* set v = f */
hypre_ParVectorCopy(f, v);
/* get residual: v = f-A*u */
hypre_ParCSRMatrixMatvec(-1.0, A, u, 1.0, v);
/* p_0*/
p_k = 1/sigma;
/*first order */
temp1 = 1/theta;
/*d_0* = 1/theta * SCALE*r_0 */
/* x_1 = x_0 + d_0 */
/* NEW PART*/
/* z = A*v */
hypre_ParCSRMatrixMatvec(1.0, A, v, 0.0, Ztemp);
/* v = v - Ztemp/u.b. */
scale = -1.0/upper_bound;
hypre_ParVectorAxpy(scale, Ztemp, v);
/* END NEW */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,diag,scale) schedule(static)
#endif
for (i = 0; i < num_rows; i++)
{
diag = 1;
scale = temp1/diag;
dk[i] = scale*v_data[i];
u_data[i] += dk[i];
}
/* higher order */
for (j = 1; j < order; j++)
{
/* get residual: v = f-A*u */
hypre_ParVectorCopy(f, v);
hypre_ParCSRMatrixMatvec(-1.0, A, u, 1.0, v);
p_kp1 = 1.0/(2.0*sigma - p_k);
temp1 = p_kp1*p_k;
temp2 = 2.0*p_kp1/delta;
/* NEW PART*/
/* still do jacobi */
/* z = A*v */
hypre_ParCSRMatrixMatvec(1.0, A, v, 0.0, Ztemp);
/* v = v - Ztemp/u.b. */
scale = -1.0/upper_bound;
hypre_ParVectorAxpy(scale, Ztemp, v);
/* END NEW */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,diag,scale) schedule(static)
#endif
for (i = 0; i < num_rows; i++)
{
diag = 1;
scale = temp2/diag;
dk[i] = temp1*dk[i] + scale*v_data[i];
u_data[i] += dk[i];
}
p_k = p_kp1;
}
hypre_ParVectorDestroy(Ztemp);
return hypre_error_flag;
}
#endif
/*------------------------------------------------------------------------
theta = a_ii /sum off_d((a_ij))
we want the min.
*--------------------------------------------------------------------------*/
int hypre_ParCSRComputeTheta(hypre_ParCSRMatrix *A,
double *theta_est)
{
int i, j;
int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
int *A_diag_I = hypre_CSRMatrixI(A_diag);
int *A_diag_J = hypre_CSRMatrixJ(A_diag);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
int *A_offd_I = hypre_CSRMatrixI(A_offd);
int *A_offd_J = hypre_CSRMatrixJ(A_offd);
double *A_offd_data = hypre_CSRMatrixData(A_offd);
int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
double diag, offd_sum;
double theta, ratio;
int min_row = 0;
int my_id;
MPI_Comm_rank(MPI_COMM_WORLD,&my_id);
theta = 1e9;
for (i = 0; i < num_rows; i++)
{
/* get the diag element of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
if (A_diag_J[j] == i)
{
diag = A_diag_data[j];
/* break; */
}
else
{
if (A_diag_data[j] > 0.0)
{
printf("MYID = %d, row = %d, DIAG_col = %d, val = %g \n", my_id, i, A_diag_J[j], A_diag_data[j]);
}
}
}
/* get the offd part of the ith row */
offd_sum = 0.0;
if (num_cols_offd )
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
{
offd_sum += fabs(A_offd_data[j]);
if (A_offd_data[j] > 0.0)
{
printf("MYID = %d, row = %d, OFFD_col = %d, val = %g \n", my_id, i, A_offd_J[j], A_offd_data[j]);
}
}
}
if (offd_sum > 0.0)
{
ratio = diag/offd_sum;
theta = hypre_min(theta, ratio);
if (theta == ratio)
min_row = i;
}
}
printf("MYID = %d, Min Row = %d\n",my_id, min_row);
*theta_est = theta;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRComputeL1Norms Threads
*
* Compute the l1 norms of the rows of a given matrix, depending on
* the option parameter:
*
* option 1 = Compute the l1 norm of the rows
* option 2 = Compute the l1 norm of the (processor) off-diagonal
* part of the rows plus the diagonal of A
* option 3 = Compute the l2 norm^2 of the rows
* option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid
* Smoothers for Ultra-Parallel Computing" (with or without CF)
*--------------------------------------------------------------------------*/
int hypre_ParCSRComputeL1Norms(hypre_ParCSRMatrix *A,
int option,
int *cf_marker,
double **l1_norm_ptr)
{
int i, j;
int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
int *A_diag_I = hypre_CSRMatrixI(A_diag);
int *A_diag_J = hypre_CSRMatrixJ(A_diag);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
int *A_offd_I = hypre_CSRMatrixI(A_offd);
int *A_offd_J = hypre_CSRMatrixJ(A_offd);
double *A_offd_data = hypre_CSRMatrixData(A_offd);
int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
double *l1_norm = hypre_CTAlloc(double, num_rows);
int *cf_marker_offd = NULL;
int cf_diag;
double diag;
if (cf_marker != NULL)
{
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns of A
*-------------------------------------------------------------------*/
int index;
int num_sends;
int start;
int *int_buf_data = NULL;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
if (num_cols_offd)
cf_marker_offd = hypre_CTAlloc(int, num_cols_offd);
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends))
int_buf_data = hypre_CTAlloc(int,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
cf_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data);
}
if (option == 1)
{
for (i = 0; i < num_rows; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the CF l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
if (cf_diag == cf_marker[A_diag_J[j]])
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 2)
{
for (i = 0; i < num_rows; i++)
{
/* Add the diag element of the ith row */
l1_norm[i] = fabs(A_diag_data[A_diag_I[i]]);
if (cf_marker == NULL)
{
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 3)
{
for (i = 0; i < num_rows; i++)
{
l1_norm[i] = 0.0;
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += A_diag_data[j] * A_diag_data[j];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += A_offd_data[j] * A_offd_data[j];
}
}
else if (option == 4)
{
for (i = 0; i < num_rows; i++)
{
/* Add the diag element of the ith row */
diag = l1_norm[i] = fabs(A_diag_data[A_diag_I[i]]);
if (cf_marker == NULL)
{
/* Add the scaled l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the scaled CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
/* Truncate according to Remark 6.2 */
if (l1_norm[i] <= 4.0/3.0*diag)
l1_norm[i] = diag;
}
}
/* Handle negative definite matrices */
for (i = 0; i < num_rows; i++)
if (A_diag_data[A_diag_I[i]] < 0)
l1_norm[i] = -l1_norm[i];
for (i = 0; i < num_rows; i++)
if (fabs(l1_norm[i]) < DBL_EPSILON)
{
hypre_error_in_arg(1);
break;
}
hypre_TFree(cf_marker_offd);
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRComputeL1Norms Threads
*
* Compute the l1 norms of the rows of a given matrix, depending on
* the option parameter:
*
* option 1 = Compute the l1 norm of the rows
* option 2 = Compute the l1 norm of the (processor) off-diagonal
* part of the rows plus the diagonal of A
* option 3 = Compute the l2 norm^2 of the rows
* option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid
* Smoothers for Ultra-Parallel Computing" (with or without CF)
*--------------------------------------------------------------------------*/
int hypre_ParCSRComputeL1NormsThreads(hypre_ParCSRMatrix *A,
int option,
int num_threads,
int *cf_marker,
double **l1_norm_ptr)
{
int i, j, k;
int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
int *A_diag_I = hypre_CSRMatrixI(A_diag);
int *A_diag_J = hypre_CSRMatrixJ(A_diag);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
int *A_offd_I = hypre_CSRMatrixI(A_offd);
int *A_offd_J = hypre_CSRMatrixJ(A_offd);
double *A_offd_data = hypre_CSRMatrixData(A_offd);
int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
double *l1_norm = hypre_CTAlloc(double, num_rows);
int ii, ns, ne, rest, size;
double res;
int *cf_marker_offd = NULL;
int cf_diag;
double diag;
if (cf_marker != NULL)
{
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns of A
*-------------------------------------------------------------------*/
int index;
int num_sends;
int start;
int *int_buf_data = NULL;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
if (num_cols_offd)
cf_marker_offd = hypre_CTAlloc(int, num_cols_offd);
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends))
int_buf_data = hypre_CTAlloc(int,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
cf_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data);
}
#define HYPRE_SMP_PRIVATE i,ii,j,k,ns,ne,res,rest,size,cf_diag,diag
#include "../utilities/hypre_smp_forloop.h"
for (k = 0; k < num_threads; k++)
{
size = num_rows/num_threads;
rest = num_rows - size*num_threads;
if (k < rest)
{
ns = k*size+k;
ne = (k+1)*size+k+1;
}
else
{
ns = k*size+rest;
ne = (k+1)*size+rest;
}
if (option == 1)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the CF l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
if (cf_diag == cf_marker[A_diag_J[j]])
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 2)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if (ii == i || ii < ns || ii >= ne)
l1_norm[i] += fabs(A_diag_data[j]);
}
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if ((ii == i || ii < ns || ii >= ne) &&
(cf_diag == cf_marker[A_diag_J[j]]))
l1_norm[i] += fabs(A_diag_data[j]);
}
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 3)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += A_diag_data[j] * A_diag_data[j];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += A_offd_data[j] * A_offd_data[j];
}
}
else if (option == 4)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if (ii == i || ii < ns || ii >= ne)
{
if (ii == i)
{
diag = fabs(A_diag_data[j]);
l1_norm[i] += fabs(A_diag_data[j]);
}
else
l1_norm[i] += 0.5*fabs(A_diag_data[j]);
}
}
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if ((ii == i || ii < ns || ii >= ne) &&
(cf_diag == cf_marker[A_diag_J[j]]))
{
if (ii == i)
{
diag = fabs(A_diag_data[j]);
l1_norm[i] += fabs(A_diag_data[j]);
}
else
l1_norm[i] += 0.5*fabs(A_diag_data[j]);
}
}
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
/* Truncate according to Remark 6.2 */
if (l1_norm[i] <= 4.0/3.0*diag)
l1_norm[i] = diag;
}
}
/* Handle negative definite matrices */
for (i = ns; i < ne; i++)
if (A_diag_data[A_diag_I[i]] < 0)
l1_norm[i] = -l1_norm[i];
for (i = ns; i < ne; i++)
if (fabs(l1_norm[i]) < DBL_EPSILON)
{
hypre_error_in_arg(1);
break;
}
}
hypre_TFree(cf_marker_offd);
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRRelax_L1 (Symm GS / SSOR)
*--------------------------------------------------------------------------*/
int hypre_ParCSRRelax_L1( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
double relax_weight,
double omega,
double *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
int *A_diag_i = hypre_CSRMatrixI(A_diag);
int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
int *A_offd_i = hypre_CSRMatrixI(A_offd);
double *A_offd_data = hypre_CSRMatrixData(A_offd);
int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
int n = hypre_CSRMatrixNumRows(A_diag);
int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
double *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
double *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
double *Vtemp_data = hypre_VectorData(Vtemp_local);
double *Vext_data;
double *v_buf_data;
double *tmp_data;
int i, j;
int ii, jj;
int ns, ne, size, rest;
int relax_error = 0;
int num_sends;
int index, start;
int num_procs, num_threads, my_id ;
double zero = 0.0;
double res, res2;
hypre_Vector *Ztemp_local;
double *Ztemp_data;
MPI_Comm_size(comm,&num_procs);
MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(double,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
Vext_data = hypre_CTAlloc(double,num_cols_offd);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
Ztemp_local = hypre_ParVectorLocalVector(Ztemp);
Ztemp_data = hypre_VectorData(Ztemp_local);
if (relax_weight == 1 && omega == 1)
{
/*tmp_data = hypre_CTAlloc(double,n);*/
tmp_data = Ztemp_data;
#define HYPRE_SMP_PRIVATE i
#include "../utilities/hypre_smp_forloop.h"
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#define HYPRE_SMP_PRIVATE i,ii,j,jj,ns,ne,res,rest,size
#include "../utilities/hypre_smp_forloop.h"
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
else
{
double c1 = omega*relax_weight;
double c2 = omega*(1.0-relax_weight);
/* tmp_data = hypre_CTAlloc(double,n); */
tmp_data = Ztemp_data;
#define HYPRE_SMP_PRIVATE i
#include "../utilities/hypre_smp_forloop.h"
for (i = 0; i < n; i++)
{
tmp_data[i] = u_data[i];
}
#define HYPRE_SMP_PRIVATE i,ii,j,jj,ns,ne,res,rest,size
#include "../utilities/hypre_smp_forloop.h"
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res2 = 0.0;
res = f_data[i];
Vtemp_data[i] = u_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
if (ii < i)
res2 += A_diag_data[jj] * (Vtemp_data[ii] - u_data[ii]);
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (c1*res + c2*res2) / l1_norms[i];
}
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
if (ii > i)
res2 += A_diag_data[jj] * (Vtemp_data[ii] - u_data[ii]);
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (c1*res + c2*res2) / l1_norms[i];
}
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data);
hypre_TFree(v_buf_data);
}
return(relax_error);
}
/*--------------------------------------------------------------------------
* hypre_ParCSRRelax_L1_GS (GS / SOR) (NOT SYM)
*--------------------------------------------------------------------------*/
int hypre_ParCSRRelax_L1_GS( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
double relax_weight,
double omega,
double *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
int *A_diag_i = hypre_CSRMatrixI(A_diag);
int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
int *A_offd_i = hypre_CSRMatrixI(A_offd);
double *A_offd_data = hypre_CSRMatrixData(A_offd);
int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
int n = hypre_CSRMatrixNumRows(A_diag);
int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
double *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
double *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
double *Vtemp_data = hypre_VectorData(Vtemp_local);
double *Vext_data;
double *v_buf_data;
double *tmp_data;
int i, j;
int ii, jj;
int ns, ne, size, rest;
int relax_error = 0;
int num_sends;
int index, start;
int num_procs, num_threads, my_id ;
double zero = 0.0;
double res, res2;
hypre_Vector *Ztemp_local;
double *Ztemp_data;
MPI_Comm_size(comm,&num_procs);
MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(double,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
Vext_data = hypre_CTAlloc(double,num_cols_offd);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
Ztemp_local = hypre_ParVectorLocalVector(Ztemp);
Ztemp_data = hypre_VectorData(Ztemp_local);
if (relax_weight == 1 && omega == 1)
{
/*tmp_data = hypre_CTAlloc(double,n);*/
tmp_data = Ztemp_data;
#define HYPRE_SMP_PRIVATE i
#include "../utilities/hypre_smp_forloop.h"
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#define HYPRE_SMP_PRIVATE i,ii,j,jj,ns,ne,res,rest,size
#include "../utilities/hypre_smp_forloop.h"
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
else
{
double c1 = omega*relax_weight;
double c2 = omega*(1.0-relax_weight);
/* tmp_data = hypre_CTAlloc(double,n); */
tmp_data = Ztemp_data;
#define HYPRE_SMP_PRIVATE i
#include "../utilities/hypre_smp_forloop.h"
for (i = 0; i < n; i++)
{
tmp_data[i] = u_data[i];
}
#define HYPRE_SMP_PRIVATE i,ii,j,jj,ns,ne,res,rest,size
#include "../utilities/hypre_smp_forloop.h"
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res2 = 0.0;
res = f_data[i];
Vtemp_data[i] = u_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
if (ii < i)
res2 += A_diag_data[jj] * (Vtemp_data[ii] - u_data[ii]);
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (c1*res + c2*res2) / l1_norms[i];
}
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data);
hypre_TFree(v_buf_data);
}
return(relax_error);
}
/*--------------------------------------------------------------------------
* hypre_ParCSRRelax_L1_Jacobi (allows CF)
u += w D^{-1}(f - A u), where D_ii = ||A(i,:)||_1
*--------------------------------------------------------------------------*/
int hypre_ParCSRRelax_L1_Jacobi( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
int *cf_marker,
int relax_points,
double relax_weight,
double *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
int *A_diag_i = hypre_CSRMatrixI(A_diag);
int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
int *A_offd_i = hypre_CSRMatrixI(A_offd);
double *A_offd_data = hypre_CSRMatrixData(A_offd);
int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
int n = hypre_CSRMatrixNumRows(A_diag);
int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
double *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
double *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
double *Vtemp_data = hypre_VectorData(Vtemp_local);
double *Vext_data;
double *v_buf_data;
int i, j;
int ii, jj;
int num_sends;
int index, start;
int num_procs, my_id ;
double zero = 0.0;
double res;
MPI_Comm_size(comm,&num_procs);
MPI_Comm_rank(comm,&my_id);
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(double,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
Vext_data = hypre_CTAlloc(double,num_cols_offd);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#define HYPRE_SMP_PRIVATE i
#include "../utilities/hypre_smp_forloop.h"
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_points == 0 || cf_marker == NULL)
{
#define HYPRE_SMP_PRIVATE i,ii,jj,res
#include "../utilities/hypre_smp_forloop.h"
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (relax_weight*res)/l1_norms[i];
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
#define HYPRE_SMP_PRIVATE i,ii,jj,res
#include "../utilities/hypre_smp_forloop.h"
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (relax_weight * res)/l1_norms[i];
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data);
hypre_TFree(v_buf_data);
}
return 0;
}
|
mathutil.h | /*-
* Copyright (c) 2012-2017 Ilya Kaliman
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef LIBEFP_MATH_UTIL_H
#define LIBEFP_MATH_UTIL_H
#include <math.h>
#include <string.h>
#define PI 3.14159265358979323846
#define EPSILON 1.0e-8
#define VEC(x) ((vec_t *)(&(x)))
#define CVEC(x) ((const vec_t *)(&(x)))
typedef struct {
double x, y, z;
} vec_t;
typedef struct {
double x, y, z, a, b, c;
} six_t;
typedef struct {
double xx, xy, xz, yx, yy, yz, zx, zy, zz;
} mat_t;
typedef struct {
double xxx, xxy, xxz, xyx, xyy, xyz, xzx, xzy, xzz,
yxx, yxy, yxz, yyx, yyy, yyz, yzx, yzy, yzz,
zxx, zxy, zxz, zyx, zyy, zyz, zzx, zzy, zzz;
} t3_t;
static const vec_t vec_zero = { 0.0, 0.0, 0.0 };
static const six_t six_zero = { 0.0, 0.0, 0.0,
0.0, 0.0, 0.0 };
static const mat_t mat_zero = { 0.0, 0.0, 0.0,
0.0, 0.0, 0.0,
0.0, 0.0, 0.0 };
static const mat_t mat_identity = { 1.0, 0.0, 0.0,
0.0, 1.0, 0.0,
0.0, 0.0, 1.0 };
static inline int
eq(double a, double b)
{
return fabs(a - b) < EPSILON;
}
static inline double
vec_get(const vec_t *vec, size_t idx)
{
return ((const double *)vec)[idx];
}
static inline void
vec_set(vec_t *vec, size_t idx, double val)
{
((double *)vec)[idx] = val;
}
static inline void
vec_negate(vec_t *vec)
{
vec->x = -vec->x;
vec->y = -vec->y;
vec->z = -vec->z;
}
static inline void
vec_scale(vec_t *vec, double s)
{
vec->x *= s;
vec->y *= s;
vec->z *= s;
}
static inline double
vec_dot(const vec_t *a, const vec_t *b)
{
return a->x * b->x + a->y * b->y + a->z * b->z;
}
static inline vec_t
vec_cross(const vec_t *a, const vec_t *b)
{
vec_t c = {
a->y * b->z - a->z * b->y,
a->z * b->x - a->x * b->z,
a->x * b->y - a->y * b->x
};
return c;
}
static inline void
vec_atomic_add(vec_t *a, const vec_t *b)
{
#ifdef _OPENMP
#pragma omp atomic
#endif
a->x += b->x;
#ifdef _OPENMP
#pragma omp atomic
#endif
a->y += b->y;
#ifdef _OPENMP
#pragma omp atomic
#endif
a->z += b->z;
}
static inline void
vec_atomic_sub(vec_t *a, const vec_t *b)
{
#ifdef _OPENMP
#pragma omp atomic
#endif
a->x -= b->x;
#ifdef _OPENMP
#pragma omp atomic
#endif
a->y -= b->y;
#ifdef _OPENMP
#pragma omp atomic
#endif
a->z -= b->z;
}
static inline void
six_atomic_add_xyz(six_t *six, const vec_t *a)
{
vec_atomic_add((vec_t *)six, a);
}
static inline void
six_atomic_add_abc(six_t *six, const vec_t *a)
{
vec_atomic_add(((vec_t *)six) + 1, a);
}
static inline void
six_atomic_sub_xyz(six_t *six, const vec_t *a)
{
vec_atomic_sub((vec_t *)six, a);
}
static inline void
six_atomic_sub_abc(six_t *six, const vec_t *a)
{
vec_atomic_sub(((vec_t *)six) + 1, a);
}
static inline vec_t
vec_add(const vec_t *a, const vec_t *b)
{
vec_t c = { a->x + b->x, a->y + b->y, a->z + b->z };
return c;
}
static inline vec_t
vec_sub(const vec_t *a, const vec_t *b)
{
vec_t c = { a->x - b->x, a->y - b->y, a->z - b->z };
return c;
}
static inline double
vec_len_2(const vec_t *a)
{
return vec_dot(a, a);
}
static inline double
vec_len(const vec_t *a)
{
return sqrt(vec_len_2(a));
}
static inline void
vec_normalize(vec_t *vec)
{
double len = vec_len(vec);
vec->x /= len;
vec->y /= len;
vec->z /= len;
}
static inline double
vec_dist_2(const vec_t *a, const vec_t *b)
{
vec_t dr = vec_sub(a, b);
return vec_len_2(&dr);
}
static inline double
vec_dist(const vec_t *a, const vec_t *b)
{
return sqrt(vec_dist_2(a, b));
}
static inline double
vec_angle(const vec_t *a, const vec_t *b)
{
double dot = vec_dot(a, b);
vec_t cross = vec_cross(a, b);
return atan2(vec_len(&cross), dot);
}
static inline double
mat_get(const mat_t *mat, size_t a1, size_t a2)
{
return ((const double *)mat)[3 * a1 + a2];
}
static inline void
mat_set(mat_t *mat, size_t a1, size_t a2, double val)
{
((double *)mat)[3 * a1 + a2] = val;
}
static inline vec_t
mat_vec(const mat_t *mat, const vec_t *vec)
{
vec_t out = { mat->xx * vec->x + mat->xy * vec->y + mat->xz * vec->z,
mat->yx * vec->x + mat->yy * vec->y + mat->yz * vec->z,
mat->zx * vec->x + mat->zy * vec->y + mat->zz * vec->z };
return out;
}
/* returns mat(t) * vec */
static inline vec_t
mat_trans_vec(const mat_t *mat, const vec_t *vec)
{
vec_t out = { mat->xx * vec->x + mat->yx * vec->y + mat->zx * vec->z,
mat->xy * vec->x + mat->yy * vec->y + mat->zy * vec->z,
mat->xz * vec->x + mat->yz * vec->y + mat->zz * vec->z };
return out;
}
static inline mat_t
mat_mat(const mat_t *m1, const mat_t *m2)
{
mat_t out = { m1->xx * m2->xx + m1->xy * m2->yx + m1->xz * m2->zx,
m1->xx * m2->xy + m1->xy * m2->yy + m1->xz * m2->zy,
m1->xx * m2->xz + m1->xy * m2->yz + m1->xz * m2->zz,
m1->yx * m2->xx + m1->yy * m2->yx + m1->yz * m2->zx,
m1->yx * m2->xy + m1->yy * m2->yy + m1->yz * m2->zy,
m1->yx * m2->xz + m1->yy * m2->yz + m1->yz * m2->zz,
m1->zx * m2->xx + m1->zy * m2->yx + m1->zz * m2->zx,
m1->zx * m2->xy + m1->zy * m2->yy + m1->zz * m2->zy,
m1->zx * m2->xz + m1->zy * m2->yz + m1->zz * m2->zz };
return out;
}
/* returns m1(t) * m2 */
static inline mat_t
mat_trans_mat(const mat_t *m1, const mat_t *m2)
{
mat_t out = { m1->xx * m2->xx + m1->yx * m2->yx + m1->zx * m2->zx,
m1->xx * m2->xy + m1->yx * m2->yy + m1->zx * m2->zy,
m1->xx * m2->xz + m1->yx * m2->yz + m1->zx * m2->zz,
m1->xy * m2->xx + m1->yy * m2->yx + m1->zy * m2->zx,
m1->xy * m2->xy + m1->yy * m2->yy + m1->zy * m2->zy,
m1->xy * m2->xz + m1->yy * m2->yz + m1->zy * m2->zz,
m1->xz * m2->xx + m1->yz * m2->yx + m1->zz * m2->zx,
m1->xz * m2->xy + m1->yz * m2->yy + m1->zz * m2->zy,
m1->xz * m2->xz + m1->yz * m2->yz + m1->zz * m2->zz };
return out;
}
static inline void
mat_negate(mat_t *mat)
{
mat->xx = -mat->xx;
mat->xy = -mat->xy;
mat->xz = -mat->xz;
mat->yx = -mat->yx;
mat->yy = -mat->yy;
mat->yz = -mat->yz;
mat->zx = -mat->zx;
mat->zy = -mat->zy;
mat->zz = -mat->zz;
}
static inline double
mat_det(const mat_t *mat)
{
return mat->xx * mat->yy * mat->zz +
mat->xy * mat->zx * mat->yz +
mat->yx * mat->xz * mat->zy -
mat->xz * mat->yy * mat->zx -
mat->zy * mat->yz * mat->xx -
mat->yx * mat->xy * mat->zz;
}
static inline mat_t
mat_transpose(const mat_t *mat)
{
mat_t out = { mat->xx, mat->yx, mat->zx,
mat->xy, mat->yy, mat->zy,
mat->xz, mat->yz, mat->zz };
return out;
}
static inline mat_t
mat_inv(const mat_t *mat)
{
double det = 1.0 / mat_det(mat);
mat_t out = { det * (mat->yy * mat->zz - mat->yz * mat->zy),
det * (mat->zy * mat->xz - mat->zz * mat->xy),
det * (mat->xy * mat->yz - mat->xz * mat->yy),
det * (mat->yz * mat->zx - mat->yx * mat->zz),
det * (mat->zz * mat->xx - mat->zx * mat->xz),
det * (mat->xz * mat->yx - mat->xx * mat->yz),
det * (mat->yx * mat->zy - mat->yy * mat->zx),
det * (mat->zx * mat->xy - mat->zy * mat->xx),
det * (mat->xx * mat->yy - mat->xy * mat->yx) };
return out;
}
static inline void
euler_to_matrix(double a, double b, double c, mat_t *out)
{
double sina = sin(a), cosa = cos(a);
double sinb = sin(b), cosb = cos(b);
double sinc = sin(c), cosc = cos(c);
out->xx = cosa * cosc - sina * cosb * sinc;
out->xy = -cosa * sinc - sina * cosb * cosc;
out->xz = sinb * sina;
out->yx = sina * cosc + cosa * cosb * sinc;
out->yy = -sina * sinc + cosa * cosb * cosc;
out->yz = -sinb * cosa;
out->zx = sinb * sinc;
out->zy = sinb * cosc;
out->zz = cosb;
}
static inline void
matrix_to_euler(const mat_t *rotmat, double *ea, double *eb, double *ec)
{
double a, b, c, sinb;
b = acos(rotmat->zz);
sinb = sqrt(1.0 - rotmat->zz * rotmat->zz);
if (fabs(sinb) < 1.0e-7) {
a = atan2(-rotmat->xy, rotmat->xx);
c = 0.0;
}
else {
a = atan2(rotmat->xz, -rotmat->yz);
c = atan2(rotmat->zx, rotmat->zy);
}
*ea = a;
*eb = b;
*ec = c;
}
static inline void
cart_to_frac(const six_t box, vec_t *dr)
{
double radian = 180.0/PI;
double alpha_cos = cos(box.a / radian);
double beta_sin = sin(box.b / radian);
double beta_cos = cos(box.b / radian);
double gamma_sin = sin(box.c / radian);
double gamma_cos = cos(box.c / radian);
double beta_term = (alpha_cos - beta_cos*gamma_cos) / gamma_sin;
double gamma_term = sqrt(beta_sin*beta_sin - beta_term*beta_term);
dr->z = (dr->z/gamma_term) / box.z;
dr->y = ((dr->y - dr->z * box.z * beta_term)/gamma_sin) / box.y;
dr->x = (dr->x - dr->y * box.y * gamma_cos - dr->z * box.z * beta_cos) / box.x;
}
static inline void
frac_to_cart(const six_t box, vec_t *dr) {
double radian = 180.0/PI;
double alpha_cos = cos(box.a / radian);
double beta_sin = sin(box.b / radian);
double beta_cos = cos(box.b / radian);
double gamma_sin = sin(box.c / radian);
double gamma_cos = cos(box.c / radian);
double beta_term = (alpha_cos - beta_cos*gamma_cos) / gamma_sin;
double gamma_term = sqrt(beta_sin*beta_sin - beta_term*beta_term);
dr->x = dr->x * box.x + dr->y * box.y * gamma_cos + dr->z * box.z * beta_cos;
dr->y = dr->y * box.y * gamma_sin + dr->z * box.z * beta_term;
dr->z = dr->z * gamma_term * box.z;
}
#endif /* LIBEFP_MATH_UTIL_H */
|
CompositeKernels.h | #pragma once
#include "CSRMatrix.h"
#include "Parameters.h"
float ComputeLaplacianAndInnerProduct(CSRMatrix& laplacianMatrix,
const float (&u)[XDIM][YDIM][ZDIM], float (&Lu)[XDIM][YDIM][ZDIM])
{
int N = laplacianMatrix.mSize;
const auto rowOffsets = laplacianMatrix.GetRowOffsets();
const auto columnIndices = laplacianMatrix.GetColumnIndices();
const auto values = laplacianMatrix.GetValues();
float *y = &Lu[0][0][0];
const float *x = &u[0][0][0];
double result = 0.;
#pragma omp parallel for reduction(+:result)
for (int i = 0; i < N; i++)
{
y[i] = 0.;
for (int k = rowOffsets[i]; k < rowOffsets[i+1]; k++) {
const int j = columnIndices[k];
y[i] += values[k] * x[j];
}
result += (double) x[i] * (double) y[i];
}
return (float) result;
}
|
GB_binop__first_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__first_fp64)
// A.*B function (eWiseMult): GB (_AemultB_01__first_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__first_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__first_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__first_fp64)
// A*D function (colscale): GB (_AxD__first_fp64)
// D*A function (rowscale): GB (_DxB__first_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__first_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__first_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_fp64)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = aij
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = x ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_FP64 || GxB_NO_FIRST_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__first_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__first_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__first_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__first_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__first_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__first_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__first_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__first_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__first_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__first_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
sgemm.c | #include <stdio.h>
#include <stdlib.h>
#include <inttypes.h>
#include <math.h>
#include <sys/time.h>
#include <omp.h>
/*
* C = [n, q] = A[n, m] * B[m, q]
*/
enum {
N = 1000,
M = 1000,
Q = 1000,
NREPS = 5,
};
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
/* Matrix multiplication C[n, q] = A[n, m] * B[m, q] */
void sgemm_phi(float *a, float *b, float *c, int n, int m, int q)
{
#pragma offload target(mic) in(a:length(n * m)) in(b:length(m * q)) out(c:length(n * q))
{
#pragma omp parallel
{
int k = 0;
#pragma omp for
for (int i = 0; i < n; i++)
for (int j = 0; j < q; j++)
c[k++] = 0.0;
#pragma omp for
for (int i = 0; i < n; i++) {
for (int k = 0; k < m; k++) {
for (int j = 0; j < q; j++)
c[i * q + j] += a[i * m + k] * b[k * q + j];
}
}
}
}
}
double run_phi(const char *msg, void (*sgemm_fun)(float *, float *, float *, int, int, int))
{
double gflop = 2.0 * N * Q * M * 1E-9;
float *a, *b, *c;
a = malloc(sizeof(*a) * N * M);
b = malloc(sizeof(*b) * M * Q);
c = malloc(sizeof(*c) * N * Q);
if (a == NULL || b == NULL || c == NULL) {
fprintf(stderr, "No enough memory\n");
exit(EXIT_FAILURE);
}
srand(0);
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++)
a[i * M + j] = rand() % 100; // 1.0;
}
for (int i = 0; i < M; i++) {
for (int j = 0; j < Q; j++)
b[i * Q + j] = rand() % 100; // 2.0;
}
/* Warmup */
double twarmup = wtime();
sgemm_fun(a, b, c, N, M, Q);
twarmup = wtime() - twarmup;
/* Measures */
double tavg = 0.0;
double tmin = 1E6;
double tmax = 0.0;
for (int i = 0; i < NREPS; i++) {
double t = wtime();
sgemm_fun(a, b, c, N, M, Q);
t = wtime() - t;
tavg += t;
tmin = (tmin > t) ? t : tmin;
tmax = (tmax < t) ? t : tmax;
}
tavg /= NREPS;
printf("%s (%d runs): perf %.2f GFLOPS; time: tavg %.6f, tmin %.6f, tmax %.6f, twarmup %.6f\n",
msg, NREPS, gflop / tavg, tavg, tmin, tmax, twarmup);
free(c);
free(b);
free(a);
return tavg;
}
int main(int argc, char **argv)
{
printf("SGEMM N = %d, M = %d, Q = %d\n", N, M, Q);
char buf[256];
sprintf(buf, "Phi OMP %s", getenv("MIC_OMP_NUM_THREADS"));
run_phi(buf, &sgemm_phi);
return 0;
}
|
tasks.h | /*~-------------------------------------------------------------------------~~*
* Copyright (c) 2016 Los Alamos National Laboratory, LLC
* All rights reserved
*~-------------------------------------------------------------------------~~*/
////////////////////////////////////////////////////////////////////////////////
/// \file
/// \brief Simple tasks related to solving full hydro solutions.
////////////////////////////////////////////////////////////////////////////////
#pragma once
// hydro includes
#include "types.h"
// flecsi includes
#include <flecsi-sp/io/io_exodus.h>
#include <flecsi/execution/context.h>
#include <flecsi/execution/execution.h>
#include <ristra/utils/string_utils.h>
// system includes
#include <iomanip>
namespace apps {
namespace hydro {
////////////////////////////////////////////////////////////////////////////////
//! \brief Update mesh geometry
//!
//! \param [in] mesh the mesh object
////////////////////////////////////////////////////////////////////////////////
void update_geometry(
client_handle_r<mesh_t> mesh
) {
mesh.update_geometry();
}
////////////////////////////////////////////////////////////////////////////////
//! \brief The main task for setting initial conditions
//!
//! \param [in,out] mesh the mesh object
//! \param [in] ics the initial conditions to set
//! \return 0 for success
////////////////////////////////////////////////////////////////////////////////
void initial_conditions(
client_handle_r<mesh_t> mesh,
eos_t eos,
real_t soln_time,
dense_handle_w<real_t> d,
dense_handle_w<vector_t> v,
dense_handle_w<real_t> e,
dense_handle_w<real_t> p,
dense_handle_w<real_t> T,
dense_handle_w<real_t> a
) {
for ( auto c : mesh.cells( flecsi::owned ) ) {
auto lid = c.id();
std::tie( d(c), v(c), p(c) ) = inputs_t::initial_conditions(
mesh, lid, soln_time );
eqns_t::update_state_from_pressure(
pack( c, d, v, p, e, T, a ),
eos
);
}
}
////////////////////////////////////////////////////////////////////////////////
//! \brief The main task for setting initial conditions
//!
//! \param [in,out] mesh the mesh object
//! \param [in] ics the initial conditions to set
//! \return 0 for success
////////////////////////////////////////////////////////////////////////////////
void initial_conditions_from_file(
client_handle_r<mesh_t> mesh,
eos_t eos,
real_t soln_time,
char_array_t filename,
dense_handle_w<real_t> d,
dense_handle_w<vector_t> v,
dense_handle_w<real_t> e,
dense_handle_w<real_t> p,
dense_handle_w<real_t> T,
dense_handle_w<real_t> a
) {
auto ics = inputs_t::get_initial_conditions(filename.str());
// This doesn't work with lua input
//#pragma omp parallel for
for ( auto c : mesh.cells( flecsi::owned ) ) {
std::tie( d(c), v(c), p(c) ) = ics( c->centroid(), soln_time );
eqns_t::update_state_from_pressure(
pack( c, d, v, p, e, T, a ),
eos
);
}
}
////////////////////////////////////////////////////////////////////////////////
//! \brief The main task to compute the time step size.
//!
//! \tparam E The equation of state object to use.
//! \param [in,out] mesh the mesh object
//! \return 0 for success
////////////////////////////////////////////////////////////////////////////////
real_t evaluate_time_step(
client_handle_r<mesh_t> mesh,
dense_handle_r<real_t> d,
dense_handle_r<vector_t> v,
dense_handle_r<real_t> e,
dense_handle_r<real_t> p,
dense_handle_r<real_t> T,
dense_handle_r<real_t> a,
real_t CFL,
real_t max_dt
) {
// Loop over each cell, computing the minimum time step,
// which is also the maximum 1/dt
real_t dt_inv(0);
for ( auto c : mesh.cells( flecsi::owned ) ) {
// get the solution state
auto u = pack( c, d, v, p, e, T, a );
// loop over each face
for ( auto f : mesh.faces(c) ) {
// estimate the length scale normal to the face
auto delta_x = c->volume() / f->area();
// compute the inverse of the time scale
auto dti = eqns_t::fastest_wavespeed( u, f->normal() ) / delta_x;
// check for the maximum value
dt_inv = std::max( dti, dt_inv );
} // edge
} // cell
if ( dt_inv <= 0 )
THROW_RUNTIME_ERROR( "infinite delta t" );
real_t time_step = 1 / dt_inv;
time_step *= CFL;
// access the computed time step and make sure its not too large
time_step = std::min( time_step, max_dt );
return time_step;
}
////////////////////////////////////////////////////////////////////////////////
//! \brief The main task to evaluate fluxes at each face.
//!
//! \param [in,out] mesh the mesh object
//! \return 0 for success
////////////////////////////////////////////////////////////////////////////////
void evaluate_fluxes(
client_handle_r<mesh_t> mesh,
dense_handle_r<real_t> d,
dense_handle_r<vector_t> v,
dense_handle_r<real_t> e,
dense_handle_r<real_t> p,
dense_handle_r<real_t> T,
dense_handle_r<real_t> a,
dense_handle_w<flux_data_t> flux
) {
const auto & face_list = mesh.faces( flecsi::owned );
auto num_faces = face_list.size();
#pragma omp parallel for
for ( counter_t fit = 0; fit < num_faces; ++fit )
{
const auto & f = face_list[fit];
// get the cell neighbors
const auto & cells = mesh.cells(f);
auto num_cells = cells.size();
// get the left state
auto w_left = pack( cells[0], d, v, p, e, T, a );
// compute the face flux
//
// interior cell
if ( num_cells == 2 ) {
auto w_right = pack( cells[1], d, v, p, e, T, a );
flux(f) = flux_function<eqns_t>( w_left, w_right, f->normal() );
}
// boundary cell
else {
flux(f) = boundary_flux<eqns_t>( w_left, f->normal() );
}
// scale the flux by the face area
flux(f) *= f->area();
} // for
//----------------------------------------------------------------------------
}
template<typename T>
using handle_t =
flecsi::execution::flecsi_future<T, flecsi::execution::launch_type_t::single>;
////////////////////////////////////////////////////////////////////////////////
//! \brief The main task to update the solution in each cell.
//!
//! \param [in,out] mesh the mesh object
//! \return 0 for success
////////////////////////////////////////////////////////////////////////////////
void apply_update(
client_handle_r<mesh_t> mesh,
eos_t eos,
handle_t<real_t> future_delta_t,
dense_handle_r<flux_data_t> flux,
dense_handle_rw<real_t> d,
dense_handle_rw<vector_t> v,
dense_handle_rw<real_t> e,
dense_handle_rw<real_t> p,
dense_handle_rw<real_t> T,
dense_handle_rw<real_t> a
) {
//----------------------------------------------------------------------------
// Loop over each cell, scattering the fluxes to the cell
//auto delta_t = static_cast<real_t>( time_step );
real_t delta_t = future_delta_t;
const auto & cell_list = mesh.cells( flecsi::owned );
auto num_cells = cell_list.size();
#pragma omp parallel for
for ( counter_t cit = 0; cit < num_cells; ++cit )
{
const auto & c = cell_list[cit];
// initialize the update
flux_data_t delta_u( 0 );
// loop over each connected edge
for ( auto f : mesh.faces(c) ) {
// get the cell neighbors
auto neigh = mesh.cells(f);
auto num_neigh = neigh.size();
// add the contribution to this cell only
if ( neigh[0] == c )
delta_u -= flux(f);
else
delta_u += flux(f);
} // edge
// now compute the final update
delta_u *= delta_t/c->volume();
// apply the update
auto u = pack(c, d, v, p, e, T, a);
eqns_t::update_state_from_flux( u, delta_u );
// update the rest of the quantities
eqns_t::update_state_from_energy( u, eos );
// check the solution quantities
if ( eqns_t::internal_energy(u) < 0 || eqns_t::density(u) < 0 )
THROW_RUNTIME_ERROR( "Negative density or internal energy encountered!" );
} // for
//----------------------------------------------------------------------------
}
////////////////////////////////////////////////////////////////////////////////
/// \brief output the solution
////////////////////////////////////////////////////////////////////////////////
void output(
client_handle_r<mesh_t> mesh,
char_array_t prefix,
char_array_t postfix,
size_t iteration,
real_t time,
dense_handle_r<real_t> d,
dense_handle_r<vector_t> v,
dense_handle_r<real_t> e,
dense_handle_r<real_t> p,
dense_handle_r<real_t> T,
dense_handle_r<real_t> a
) {
clog(info) << "OUTPUT MESH TASK" << std::endl;
// get the context
auto & context = flecsi::execution::context_t::instance();
auto rank = context.color();
// figure out this ranks file name
auto output_filename =
prefix.str() + "_rank" + apps::common::zero_padded(rank) +
"." + apps::common::zero_padded(iteration) + "." + postfix.str();
// now outut the mesh
using field_type = decltype(d);
std::vector<field_type*> var_ptrs{&d, &p};
std::vector<std::string> var_names{"density", "pressure"};
flecsi_sp::io::io_exodus<mesh_t>::write(
output_filename, mesh, time, var_ptrs, var_names
);
}
////////////////////////////////////////////////////////////////////////////////
/// \brief output the solution
////////////////////////////////////////////////////////////////////////////////
void print(
client_handle_r<mesh_t> mesh,
char_array_t filename
) {
// get the context
auto & context = flecsi::execution::context_t::instance();
auto rank = context.color();
clog(info) << "PRINT MESH ON RANK " << rank << std::endl;
// figure out this ranks file name
auto name_and_ext = ristra::utils::split_extension( filename.str() );
auto output_filename =
name_and_ext.first + "_rank" + apps::common::zero_padded(rank) +
"." + name_and_ext.second;
// dump to file
std::cout << "Dumping connectivity to: " << output_filename << std::endl;
std::ofstream file( output_filename );
mesh.dump( file );
// close file
file.close();
}
////////////////////////////////////////////////////////////////////////////////
/// \brief Dump solution to file for regression testing
////////////////////////////////////////////////////////////////////////////////
void dump(
client_handle_r<mesh_t> mesh,
size_t iteration,
real_t time,
dense_handle_r<real_t> d,
dense_handle_r<vector_t> v,
dense_handle_r<real_t> e,
dense_handle_r<real_t> p,
char_array_t filename) {
// get the context
auto & context = flecsi::execution::context_t::instance();
auto rank = context.color();
constexpr auto num_dims = mesh_t::num_dimensions;
const auto & vert_lid_to_gid = context.index_map( mesh_t::index_spaces_t::vertices );
const auto & cell_lid_to_gid = context.index_map( mesh_t::index_spaces_t::cells );
clog(info) << "DUMP SOLUTION ON RANK " << rank << std::endl;
// figure out this ranks file name
auto name_and_ext = ristra::utils::split_extension( filename.str() );
auto output_filename =
name_and_ext.first + "_rank" + apps::common::zero_padded(rank) +
+ "." + name_and_ext.second;
// dump to file
if (rank==0)
std::cout << "Dumping solution to: " << output_filename << std::endl;
std::ofstream file( output_filename );
// Dump cell centered quantities
file.precision(14);
file.setf( std::ios::scientific );
file << "# Solution time: " << time << std::endl;
file << "# Number iterations: " << iteration << std::endl;
file << "# BEGIN CELLS" << std::endl;
file << "# Total number: " << mesh.num_cells() << std::endl;
file << "# local_id global_id ";
for ( int dim=0; dim<num_dims; ++dim ) file << "centroid(" << dim << ") ";
file << "density internal_energy pressure ";
for ( int dim=0; dim<num_dims; ++dim ) file << "velocity(" << dim << ") ";
file << std::endl;
for ( auto c : mesh.cells() ) {
file << c.id() << " " << cell_lid_to_gid.at(c.id()) << " ";
for ( auto x : c->centroid() ) file << x << " ";
file << d(c) << " " << e(c) << " " << p(c) << " ";
for ( auto x : v(c) ) file << x << " ";
file << std::endl;
}
file << "# END CELLS" << std::endl;
// Dump vertex quantities
file << "# BEGIN VERTICES" << std::endl;
file << "# Total number: " << mesh.num_vertices() << std::endl;
file << "# local_id global_id ";
for ( int dim=0; dim<num_dims; ++dim ) file << "coordinate(" << dim << ") ";
file << std::endl;
for ( auto v : mesh.vertices() ) {
file << v.id() << " " << vert_lid_to_gid.at(v.id()) << " ";
for ( auto x : v->coordinates() ) file << x << " ";
file << std::endl;
}
file << "# END VERTICES" << std::endl;
// close file
file.close();
}
////////////////////////////////////////////////////////////////////////////////
// TASK REGISTRATION
////////////////////////////////////////////////////////////////////////////////
flecsi_register_task(update_geometry, apps::hydro, loc, index|flecsi::leaf);
flecsi_register_task(initial_conditions, apps::hydro, loc, index|flecsi::leaf);
flecsi_register_task(initial_conditions_from_file, apps::hydro, loc, index|flecsi::leaf);
flecsi_register_task(evaluate_time_step, apps::hydro, loc, index|flecsi::leaf);
flecsi_register_task(evaluate_fluxes, apps::hydro, loc, index|flecsi::leaf);
flecsi_register_task(apply_update, apps::hydro, loc, index|flecsi::leaf);
flecsi_register_task(output, apps::hydro, loc, index|flecsi::leaf);
flecsi_register_task(print, apps::hydro, loc, index|flecsi::leaf);
flecsi_register_task(dump, apps::hydro, loc, index|flecsi::leaf);
} // namespace hydro
} // namespace apps
|
GB_unop__identity_int64_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int64_bool
// op(A') function: GB_unop_tran__identity_int64_bool
// C type: int64_t
// A type: bool
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = (int64_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int64_bool
(
int64_t *Cx, // Cx and Ax may be aliased
const bool *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int64_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__abs_uint32_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint32_uint8
// op(A') function: GB_tran__abs_uint32_uint8
// C type: uint32_t
// A type: uint8_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT32 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint32_uint8
(
uint32_t *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint32_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/Availability.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Frontend/OpenMP/OMPContext.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
struct LoopHint;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class OMPClause;
class ObjCTypeParamList;
struct OMPTraitProperty;
struct OMPTraitSelector;
struct OMPTraitSet;
class OMPTraitInfo;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class ParsingOpenMPDirectiveRAII;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
/// Tracks an expected type for the current token when parsing an expression.
/// Used by code completion for ranking.
PreferredTypeBuilder PreferredType;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
mutable IdentifierInfo *Ident_abstract;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool, Ident_Bool - cached IdentifierInfos for "vector"
/// and "bool" fast comparison. Only present if AltiVec or ZVector are
/// enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
IdentifierInfo *Ident_Bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
IdentifierInfo *Ident_instancetype;
/// Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// Identifier for "message".
IdentifierInfo *Ident_message;
/// Identifier for "strict".
IdentifierInfo *Ident_strict;
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++11 contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++2a contextual keywords.
mutable IdentifierInfo *Ident_import;
mutable IdentifierInfo *Ident_module;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> FloatControlHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> MSOptimize;
std::unique_ptr<PragmaHandler> MSFenvAccess;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFenvAccessHandler;
std::unique_ptr<PragmaHandler> STDCFenvRoundHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// Parsing OpenMP directive mode.
bool OpenMPDirectiveParsing = false;
/// When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// Gets set to true after calling ProduceSignatureHelp, it is for a
/// workaround to make sure ProduceSignatureHelp is only called at the deepest
/// function call.
bool CalledSignatureHelp = false;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// Current kind of OpenMP clause
OpenMPClauseKind OMPClauseKind = llvm::omp::OMPC_unknown;
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
void setAddedDepth(unsigned D) {
Depth = Depth - AddedLevels + D;
AddedLevels = D;
}
unsigned getDepth() const { return Depth; }
unsigned getOriginalDepth() const { return Depth - AddedLevels; }
};
/// Factory object for creating ParsedAttr objects.
AttributeFactory AttrFactory;
/// Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
void MaybeDestroyTemplateIds() {
if (!TemplateIds.empty() &&
(Tok.is(tok::eof) || !PP.mightHavePendingAnnotationTokens()))
DestroyTemplateIds();
}
void DestroyTemplateIds();
/// RAII object to destroy TemplateIdAnnotations where possible, from a
/// likely-good position during parsing.
struct DestroyTemplateIdAnnotationsRAIIObj {
Parser &Self;
DestroyTemplateIdAnnotationsRAIIObj(Parser &Self) : Self(Self) {}
~DestroyTemplateIdAnnotationsRAIIObj() { Self.MaybeDestroyTemplateIds(); }
};
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
/// Tracker for '<' tokens that might have been intended to be treated as an
/// angle bracket instead of a less-than comparison.
///
/// This happens when the user intends to form a template-id, but typoes the
/// template-name or forgets a 'template' keyword for a dependent template
/// name.
///
/// We track these locations from the point where we see a '<' with a
/// name-like expression on its left until we see a '>' or '>>' that might
/// match it.
struct AngleBracketTracker {
/// Flags used to rank candidate template names when there is more than one
/// '<' in a scope.
enum Priority : unsigned short {
/// A non-dependent name that is a potential typo for a template name.
PotentialTypo = 0x0,
/// A dependent name that might instantiate to a template-name.
DependentName = 0x2,
/// A space appears before the '<' token.
SpaceBeforeLess = 0x0,
/// No space before the '<' token
NoSpaceBeforeLess = 0x1,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName)
};
struct Loc {
Expr *TemplateName;
SourceLocation LessLoc;
AngleBracketTracker::Priority Priority;
unsigned short ParenCount, BracketCount, BraceCount;
bool isActive(Parser &P) const {
return P.ParenCount == ParenCount && P.BracketCount == BracketCount &&
P.BraceCount == BraceCount;
}
bool isActiveOrNested(Parser &P) const {
return isActive(P) || P.ParenCount > ParenCount ||
P.BracketCount > BracketCount || P.BraceCount > BraceCount;
}
};
SmallVector<Loc, 8> Locs;
/// Add an expression that might have been intended to be a template name.
/// In the case of ambiguity, we arbitrarily select the innermost such
/// expression, for example in 'foo < bar < baz', 'bar' is the current
/// candidate. No attempt is made to track that 'foo' is also a candidate
/// for the case where we see a second suspicious '>' token.
void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc,
Priority Prio) {
if (!Locs.empty() && Locs.back().isActive(P)) {
if (Locs.back().Priority <= Prio) {
Locs.back().TemplateName = TemplateName;
Locs.back().LessLoc = LessLoc;
Locs.back().Priority = Prio;
}
} else {
Locs.push_back({TemplateName, LessLoc, Prio,
P.ParenCount, P.BracketCount, P.BraceCount});
}
}
/// Mark the current potential missing template location as having been
/// handled (this happens if we pass a "corresponding" '>' or '>>' token
/// or leave a bracket scope).
void clear(Parser &P) {
while (!Locs.empty() && Locs.back().isActiveOrNested(P))
Locs.pop_back();
}
/// Get the current enclosing expression that might hve been intended to be
/// a template name.
Loc *getCurrent(Parser &P) {
if (!Locs.empty() && Locs.back().isActive(P))
return &Locs.back();
return nullptr;
}
};
AngleBracketTracker AngleBrackets;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
/// Flags describing a context in which we're parsing a statement.
enum class ParsedStmtContext {
/// This context permits declarations in language modes where declarations
/// are not statements.
AllowDeclarationsInC = 0x1,
/// This context permits standalone OpenMP directives.
AllowStandaloneOpenMPDirectives = 0x2,
/// This context is at the top level of a GNU statement expression.
InStmtExpr = 0x4,
/// The context of a regular substatement.
SubStmt = 0,
/// The context of a compound-statement.
Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives,
LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr)
};
/// Act on an expression statement that might be the last statement in a
/// GNU statement expression. Checks whether we are actually at the end of
/// a statement expression and builds a suitable expression statement.
StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx);
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.isOneOf(tok::l_paren, tok::r_paren);
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.isOneOf(tok::l_square, tok::r_square);
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.isOneOf(tok::l_brace, tok::r_brace);
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed, /*IsReinject*/true);
PP.Lex(Tok);
PP.EnterToken(Next, /*IsReinject*/true);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount) {
AngleBrackets.clear(*this);
--ParenCount; // Don't let unbalanced )'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount) {
AngleBrackets.clear(*this);
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount) {
AngleBrackets.clear(*this);
--BraceCount; // Don't let unbalanced }'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// Initialize all pragma handlers.
void initializePragmaHandlers();
/// Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ACCESS...
void HandlePragmaFEnvAccess();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ROUND...
void HandlePragmaFEnvRound();
/// Handle the annotation token produced for
/// #pragma float_control
void HandlePragmaFloatControl();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static TypeResult getTypeAnnotation(const Token &Tok) {
if (!Tok.getAnnotationValue())
return TypeError();
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, TypeResult T) {
assert((T.isInvalid() || T.get()) &&
"produced a valid-but-null type annotation?");
Tok.setAnnotationValue(T.isInvalid() ? nullptr : T.get().getAsOpaquePtr());
}
static NamedDecl *getNonTypeAnnotation(const Token &Tok) {
return static_cast<NamedDecl*>(Tok.getAnnotationValue());
}
static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) {
Tok.setAnnotationValue(ND);
}
static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) {
return static_cast<IdentifierInfo*>(Tok.getAnnotationValue());
}
static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) {
Tok.setAnnotationValue(ND);
}
/// Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
bool MightBeCXXScopeToken() {
return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
(Tok.is(tok::annot_template_id) &&
NextToken().is(tok::coloncolon)) ||
Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super);
}
bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) {
return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext);
}
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
Tok.getIdentifierInfo() != Ident_Bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
PreferredTypeBuilder PrevPreferredType;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser &p)
: P(p), PrevPreferredType(P.PreferredType) {
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.PreferredType = PrevPreferredType;
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
/// Kinds of compound pseudo-tokens formed by a sequence of two real tokens.
enum class CompoundToken {
/// A '(' '{' beginning a statement-expression.
StmtExprBegin,
/// A '}' ')' ending a statement-expression.
StmtExprEnd,
/// A '[' '[' beginning a C++11 or C2x attribute.
AttrBegin,
/// A ']' ']' ending a C++11 or C2x attribute.
AttrEnd,
/// A '::' '*' forming a C++ pointer-to-member declaration.
MemberPtr,
};
/// Check that a compound operator was written in a "sensible" way, and warn
/// if not.
void checkCompoundToken(SourceLocation FirstTokLoc,
tok::TokenKind FirstTokKind, CompoundToken Op);
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// Introduces zero or more scopes for parsing. The scopes will all be exited
/// when the object is destroyed.
class MultiParseScope {
Parser &Self;
unsigned NumScopes = 0;
MultiParseScope(const MultiParseScope&) = delete;
public:
MultiParseScope(Parser &Self) : Self(Self) {}
void Enter(unsigned ScopeFlags) {
Self.EnterScope(ScopeFlags);
++NumScopes;
}
void Exit() {
while (NumScopes) {
Self.ExitScope();
--NumScopes;
}
}
~MultiParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
/// Re-enter the template scopes for a declaration that might be a template.
unsigned ReenterTemplateScopes(MultiParseScope &S, Decl *D);
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character. Balances (), [], and {} delimiter tokens while
/// skipping.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
/// The location of the first statement inside an else that might
/// have a missleading indentation. If there is no
/// MisleadingIndentationChecker on an else active, this location is invalid.
SourceLocation MisleadingIndentationElseLoc;
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
virtual void ParseLexedPragmas();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
void ParseLexedPragmas() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
IdentifierInfo *MacroII = nullptr;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
/// Contains the lexed tokens of a pragma with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
class LateParsedPragma : public LateParsedDeclaration {
Parser *Self = nullptr;
AccessSpecifier AS = AS_none;
CachedTokens Toks;
public:
explicit LateParsedPragma(Parser *P, AccessSpecifier AS)
: Self(P), AS(AS) {}
void takeToks(CachedTokens &Cached) { Toks.swap(Cached); }
const CachedTokens &toks() const { return Toks; }
AccessSpecifier getAccessSpecifier() const { return AS; }
void ParseLexedPragmas() override;
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
explicit LexedMethod(Parser *P, Decl *MD) : Self(P), D(MD) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser *Self;
/// Method - The method declaration.
Decl *Method;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), IsInterface(IsInterface),
TagOrTemplate(TagOrTemplate) {}
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// Whether this class is an __interface.
bool IsInterface : 1;
/// The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr) {}
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// The kind of template we are parsing.
enum {
/// We are not parsing a template at all.
NonTemplate = 0,
/// We are parsing a template declaration.
Template,
/// We are parsing an explicit specialization.
ExplicitSpecialization,
/// We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
// In ParseCXXInlineMethods.cpp.
struct ReenterTemplateScopeRAII;
struct ReenterClassScopeRAII;
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
ParsedAttributes &AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers &VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
void ParseLexedPragmas(ParsingClass &Class);
void ParseLexedPragma(LateParsedPragma &LP);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc is filled with the location of the last token of the simple-asm.
ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc);
ExprResult ParseAsmStringLiteral(bool ForAsmLabel);
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
ParsedAttributes &Attrs);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseCaseExpression(SourceLocation CaseLoc);
ExprResult ParseConstraintExpression();
ExprResult
ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause);
ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause);
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
/// Control what ParseCastExpression will parse.
enum CastParseKind {
AnyCastExpr = 0,
UnaryExprOnly,
PrimaryExprOnly
};
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
void checkPotentialAngleBracket(ExprResult &PotentialTemplateName);
bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &,
const Token &OpToken);
bool checkPotentialAngleBracketDelimiter(const Token &OpToken) {
if (auto *Info = AngleBrackets.getCurrent(*this))
return checkPotentialAngleBracketDelimiter(*Info, OpToken);
return false;
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseSYCLUniqueStableNameExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> ExpressionStarts =
llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
FoldExpr, // Also allow fold-expression <anything>
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool ObjectHasErrors,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false,
bool InUsingDeclaration = false);
//===--------------------------------------------------------------------===//
// C++11 5.1.2: Lambda expressions
/// Result of tentatively parsing a lambda-introducer.
enum class LambdaIntroducerTentativeParse {
/// This appears to be a lambda-introducer, which has been fully parsed.
Success,
/// This is a lambda-introducer, but has not been fully parsed, and this
/// function needs to be called again to parse it.
Incomplete,
/// This is definitely an Objective-C message send expression, rather than
/// a lambda-introducer, attribute-specifier, or array designator.
MessageSend,
/// This is not a lambda-introducer.
Invalid,
};
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
bool
ParseLambdaIntroducer(LambdaIntroducer &Intro,
LambdaIntroducerTentativeParse *Tentative = nullptr);
ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
/// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast.
ExprResult ParseBuiltinBitCast();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range,
bool MayBeFollowedByDirectInit);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while/for condition expression.
struct ForRangeInfo;
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
bool MissingOK,
ForRangeInfo *FRI = nullptr,
bool EnterForConditionScope = false);
DeclGroupPtrTy
ParseAliasDeclarationInInitStatement(DeclaratorContext Context,
ParsedAttributesWithRange &Attrs);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C++ Concepts
ExprResult ParseRequiresExpression();
void ParseTrailingRequiresClause(Declarator &D);
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
struct DesignatorCompletionInfo {
SmallVectorImpl<Expr *> &InitExprs;
QualType PreferredBaseType;
};
ExprResult ParseInitializerWithPotentialDesignator(DesignatorCompletionInfo);
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult
ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt);
StmtResult ParseStatementOrDeclaration(
StmtVector &Stmts, ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement(ParsedStmtContext StmtCtx);
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs,
ParsedStmtContext StmtCtx);
StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx,
bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx);
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
bool ConsumeNullStmt(StmtVector &Stmts);
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc, Sema::ConditionKind CK,
bool MissingOK, SourceLocation *LParenLoc,
SourceLocation *RParenLoc);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// Parse the block; this code is always used.
IEB_Parse,
/// Skip the block entirely; this code is never used.
IEB_Skip,
/// Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// The location of the initial keyword.
SourceLocation KeywordLoc;
/// Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// The name we're looking for.
UnqualifiedId Name;
/// The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
ParsedAttributes &AccessAttrs,
AccessSpecifier &CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc,
ParsedStmtContext StmtCtx);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Whether a defining-type-specifier is permitted in a given context.
enum class AllowDefiningTypeSpec {
/// The grammar doesn't allow a defining-type-specifier here, and we must
/// not parse one (eg, because a '{' could mean something else).
No,
/// The grammar doesn't allow a defining-type-specifier here, but we permit
/// one for error recovery purposes. Sema will reject.
NoButErrorRecovery,
/// The grammar allows a defining-type-specifier here, even though it's
/// always invalid. Sema will reject.
YesButInvalid,
/// The grammar allows a defining-type-specifier here, and one can be valid.
Yes
};
/// Is this a context in which we are parsing defining-type-specifiers (and
/// so permit class and enum definitions in addition to non-defining class and
/// enum elaborated-type-specifiers)?
static AllowDefiningTypeSpec
isDefiningTypeSpecifierContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_alias_declaration:
case DeclSpecContext::DSC_objc_method_result:
return AllowDefiningTypeSpec::Yes;
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_template_param:
return AllowDefiningTypeSpec::YesButInvalid;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
return AllowDefiningTypeSpec::NoButErrorRecovery;
case DeclSpecContext::DSC_trailing:
return AllowDefiningTypeSpec::No;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which an opaque-enum-declaration can appear?
static bool isOpaqueEnumDeclarationContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
return true;
case DeclSpecContext::DSC_alias_declaration:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
struct ForRangeInfo : ForRangeInit {
StmtResult LoopVar;
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
SourceLocation *DeclSpecStart = nullptr);
DeclGroupPtrTy
ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs, bool RequireSemi,
ForRangeInit *FRI = nullptr,
SourceLocation *DeclSpecStart = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType,
RecordDecl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().OpenMP)
Actions.startOpenMPLoop();
if (getLangOpts().CPlusPlus)
return Tok.is(tok::kw_using) ||
isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
ForRangeDecl, ///< Disambiguated as a for-range declaration.
Error ///< Can't be any of the above!
};
/// Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt,
bool CanBeForRangeDecl);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// Determine whether we could have an enum-base.
///
/// \p AllowSemi If \c true, then allow a ';' after the enum-base; otherwise
/// only consider this to be an enum-base if the next token is a '{'.
///
/// \return \c false if this cannot possibly be an enum base; \c true
/// otherwise.
bool isEnumBase(bool AllowSemi);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *InvalidAsDeclSpec = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// Determine whether the current token sequence might be
/// '<' template-argument-list '>'
/// rather than a less-than expression.
TPResult isTemplateArgumentList(unsigned TokensToSkip);
/// Determine whether an '(' after an 'explicit' keyword is part of a C++20
/// 'explicit(bool)' declaration, in earlier language modes where that is an
/// extension.
TPResult isExplicitBool();
/// Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
bool mayHaveDirectInit = false);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
/// Try to skip a possibly empty sequence of 'attribute-specifier's without
/// full validation of the syntactic structure of attributes.
bool TrySkipAttributes();
/// Diagnoses use of _ExtInt as being deprecated, and diagnoses use of
/// _BitInt as an extension when appropriate.
void DiagnoseBitIntUse(const Token &Tok);
public:
TypeResult
ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context = DeclaratorContext::TypeName,
AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clear();
}
void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clearListOnly();
}
void DiagnoseProhibitedAttributes(const SourceRange &Range,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID,
bool DiagnoseEmptyAttrs = false);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// Emit warnings for C++11 and C2x attributes that are in a position that
/// clang accepts as an extension.
void DiagnoseCXX11AttributeExtension(ParsedAttributesWithRange &Attrs);
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
enum ParseAttrKindMask {
PAKM_GNU = 1 << 0,
PAKM_Declspec = 1 << 1,
PAKM_CXX11 = 1 << 2,
};
/// \brief Parse attributes based on what syntaxes are desired, allowing for
/// the order to vary. e.g. with PAKM_GNU | PAKM_Declspec:
/// __attribute__((...)) __declspec(...) __attribute__((...)))
/// Note that Microsoft attributes (spelled with single square brackets) are
/// not supported by this because of parsing ambiguities with other
/// constructs.
///
/// There are some attribute parse orderings that should not be allowed in
/// arbitrary order. e.g.,
///
/// [[]] __attribute__(()) int i; // OK
/// __attribute__(()) [[]] int i; // Not OK
///
/// Such situations should use the specific attribute parsing functionality.
void ParseAttributes(unsigned WhichAttrKinds,
ParsedAttributesWithRange &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr);
void ParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
ParsedAttributesWithRange AttrsWithRange(AttrFactory);
ParseAttributes(WhichAttrKinds, AttrsWithRange, End, LateAttrs);
Attrs.takeAllFrom(AttrsWithRange);
}
/// \brief Possibly parse attributes based on what syntaxes are desired,
/// allowing for the order to vary.
bool MaybeParseAttributes(unsigned WhichAttrKinds,
ParsedAttributesWithRange &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) ||
(standardAttributesAllowed() && isCXX11AttributeSpecifier())) {
ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs);
return true;
}
return false;
}
bool MaybeParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs,
SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) ||
(standardAttributesAllowed() && isCXX11AttributeSpecifier())) {
ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs);
return true;
}
return false;
}
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
/// Parses GNU-style attributes and returns them without source range
/// information.
///
/// This API is discouraged. Use the version that takes a
/// ParsedAttributesWithRange instead.
bool MaybeParseGNUAttributes(ParsedAttributes &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributesWithRange AttrsWithRange(AttrFactory);
ParseGNUAttributes(Attrs, EndLoc, LateAttrs);
Attrs.takeAllFrom(AttrsWithRange);
return true;
}
return false;
}
bool MaybeParseGNUAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParseGNUAttributes(Attrs, EndLoc, LateAttrs);
return true;
}
return false;
}
/// Parses GNU-style attributes and returns them without source range
/// information.
///
/// This API is discouraged. Use the version that takes a
/// ParsedAttributesWithRange instead.
void ParseGNUAttributes(ParsedAttributes &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr) {
ParsedAttributesWithRange AttrsWithRange(AttrFactory);
ParseGNUAttributes(AttrsWithRange, EndLoc, LateAttrs, D);
Attrs.takeAllFrom(AttrsWithRange);
}
void ParseGNUAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ReplayOpenMPAttributeTokens(CachedTokens &OpenMPTokens) {
// If parsing the attributes found an OpenMP directive, emit those tokens
// to the parse stream now.
if (!OpenMPTokens.empty()) {
PP.EnterToken(Tok, /*IsReinject*/ true);
PP.EnterTokenStream(OpenMPTokens, /*DisableMacroExpansion*/ true,
/*IsReinject*/ true);
ConsumeAnyToken(/*ConsumeCodeCompletionTok*/ true);
}
}
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
bool MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
return true;
}
return false;
}
bool MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) {
ParseCXX11Attributes(attrs, endLoc);
return true;
}
return false;
}
void ParseOpenMPAttributeArgs(IdentifierInfo *AttrName,
CachedTokens &OpenMPTokens);
void ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs,
CachedTokens &OpenMPTokens,
SourceLocation *EndLoc = nullptr);
void ParseCXX11AttributeSpecifier(ParsedAttributes &Attrs,
SourceLocation *EndLoc = nullptr) {
CachedTokens OpenMPTokens;
ParseCXX11AttributeSpecifierInternal(Attrs, OpenMPTokens, EndLoc);
ReplayOpenMPAttributeTokens(OpenMPTokens);
}
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
CachedTokens &OpenMPTokens);
IdentifierInfo *TryParseCXX11AttributeIdentifier(
SourceLocation &Loc,
Sema::AttributeCompletion Completion = Sema::AttributeCompletion::None,
const IdentifierInfo *EnclosingScope = nullptr);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
bool MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) {
ParseMicrosoftDeclSpecs(Attrs, End);
return true;
}
return false;
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseSwiftNewTypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void
ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
SourceLocation *EndLoc, IdentifierInfo *ScopeName,
SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
ExprResult ParseExtIntegerArgument();
void ParsePtrauthQualifier(ParsedAttributes &Attrs);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
bool isClassCompatibleKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
void InitCXXThisScopeForDeclaratorIfRelevant(
const Declarator &D, const DeclSpec &DS,
llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
DeclaratorContext DeclaratorContext,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
struct InnerNamespaceInfo {
SourceLocation NamespaceLoc;
SourceLocation InlineLoc;
SourceLocation IdentLoc;
IdentifierInfo *Ident;
};
using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>;
void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &Attrs,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool
ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, ParsedAttributes &Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
ParsedType ObjectType,
bool ObjectHadErrors,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// Parse a property kind into \p TIProperty for the selector set \p Set and
/// selector \p Selector.
void parseOMPTraitPropertyKind(OMPTraitProperty &TIProperty,
llvm::omp::TraitSet Set,
llvm::omp::TraitSelector Selector,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector kind into \p TISelector for the selector set \p Set.
void parseOMPTraitSelectorKind(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector set kind into \p TISet.
void parseOMPTraitSetKind(OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context property.
void parseOMPContextProperty(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context selector.
void parseOMPContextSelector(OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &SeenSelectors);
/// Parses an OpenMP context selector set.
void parseOMPContextSelectorSet(OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &SeenSets);
/// Parses OpenMP context selectors.
bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI);
/// Parse an 'append_args' clause for '#pragma omp declare variant'.
bool parseOpenMPAppendArgs(
SmallVectorImpl<OMPDeclareVariantAttr::InteropType> &InterOpTypes);
/// Parse a `match` clause for an '#pragma omp declare variant'. Return true
/// if there was an error.
bool parseOMPDeclareVariantMatchClause(SourceLocation Loc, OMPTraitInfo &TI,
OMPTraitInfo *ParentTI);
/// Parse clauses for '#pragma omp declare variant'.
void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks,
SourceLocation Loc);
/// Parse 'omp [begin] assume[s]' directive.
void ParseOpenMPAssumesDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
/// Parse 'omp end assumes' directive.
void ParseOpenMPEndAssumesDirective(SourceLocation Loc);
/// Parse clauses for '#pragma omp [begin] declare target'.
void ParseOMPDeclareTargetClauses(Sema::DeclareTargetContextInfo &DTCI);
/// Parse '#pragma omp end declare target'.
void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind BeginDKind,
OpenMPDirectiveKind EndDKind,
SourceLocation Loc);
/// Skip tokens until a `annot_pragma_openmp_end` was found. Emit a warning if
/// it is not the current token.
void skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind);
/// Check the \p FoundKind against the \p ExpectedKind, if not issue an error
/// that the "end" matching the "begin" directive of kind \p BeginKind was not
/// found. Finally, if the expected kind was found or if \p SkipUntilOpenMPEnd
/// is set, skip ahead using the helper `skipUntilPragmaOpenMPEnd`.
void parseOMPEndDirective(OpenMPDirectiveKind BeginKind,
OpenMPDirectiveKind ExpectedKind,
OpenMPDirectiveKind FoundKind,
SourceLocation MatchingLoc,
SourceLocation FoundLoc,
bool SkipUntilOpenMPEnd);
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// Parses 'omp declare mapper' directive.
DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS);
/// Parses variable declaration in 'omp declare mapper' directive.
TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
DeclarationName &Name,
AccessSpecifier AS = AS_none);
/// Tries to parse cast part of OpenMP array shaping operation:
/// '[' expression ']' { '[' expression ']' } ')'.
bool tryParseOpenMPArrayShapingCastPart();
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// Parses declarative or executable directive.
///
/// \param StmtCtx The context in which we're parsing the directive.
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// Parses indirect clause
/// \param ParseOnly true to skip the clause's semantic actions and return
// false;
bool ParseOpenMPIndirectClause(Sema::DeclareTargetContextInfo &DTCI,
bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param DKind Directive kind.
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses the 'sizes' clause of a '#pragma omp tile' directive.
OMPClause *ParseOpenMPSizesClause();
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
/// Parses and creates OpenMP 5.0 iterators expression:
/// <iterators> = 'iterator' '(' { [ <iterator-type> ] identifier =
/// <range-specification> }+ ')'
ExprResult ParseOpenMPIteratorsExpr();
/// Parses allocators and traits in the context of the uses_allocator clause.
/// Expected format:
/// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')'
OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind);
/// Parses clause with an interop variable of kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
//
OMPClause *ParseOpenMPInteropClause(OpenMPClauseKind Kind, bool ParseOnly);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc,
bool IsAddressOfOperand = false);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *DepModOrTailExpr = nullptr;
SourceLocation ColonLoc;
SourceLocation RLoc;
CXXScopeSpec ReductionOrMapperIdScopeSpec;
DeclarationNameInfo ReductionOrMapperId;
int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or
///< lastprivate clause.
SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers>
MapTypeModifiers;
SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers>
MapTypeModifiersLoc;
SmallVector<OpenMPMotionModifierKind, NumberOfOMPMotionModifiers>
MotionModifiers;
SmallVector<SourceLocation, NumberOfOMPMotionModifiers> MotionModifiersLoc;
bool IsMapTypeImplicit = false;
SourceLocation ExtraModifierLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType,
bool ObjectHadErrors, bool EnteringContext,
bool AllowDestructorName, bool AllowConstructorName,
bool AllowDeductionGuide,
SourceLocation *TemplateKWLoc, UnqualifiedId &Result);
/// Parses the mapper modifier in map, to, and from clauses.
bool parseMapperModifier(OpenMPVarListDataTy &Data);
/// Parses map-type-modifiers in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type-modifier ::= always | close | mapper(mapper-identifier)
bool parseMapTypeModifiers(OpenMPVarListDataTy &Data);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none);
bool ParseTemplateParameters(MultiParseScope &TemplateScopes, unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
TPResult isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
bool isTypeConstraintAnnotation();
bool TryAnnotateTypeConstraint();
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation LAngleLoc,
SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc,
TemplateTy NameHint = nullptr);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true,
bool TypeConstraint = false);
void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS,
bool IsClassName = false);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs,
TemplateTy Template, SourceLocation OpenLoc);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
// C++2a: Template, concept definition [temp]
Decl *
ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl);
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
/// Parse the given string as a type.
///
/// This is a dangerous utility function currently employed only by API notes.
/// It is not a general entry-point for safely parsing types from strings.
///
/// \param typeStr The string to be parsed as a type.
/// \param context The name of the context in which this string is being
/// parsed, which will be used in diagnostics.
/// \param includeLoc The location at which this parse was triggered.
TypeResult parseTypeFromString(StringRef typeStr, StringRef context,
SourceLocation includeLoc);
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
ExprResult ParseBuiltinPtrauthTypeDiscriminator();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override;
void CodeCompleteNaturalLanguage() override;
class GNUAsmQualifiers {
unsigned Qualifiers = AQ_unspecified;
public:
enum AQ {
AQ_unspecified = 0,
AQ_volatile = 1,
AQ_inline = 2,
AQ_goto = 4,
};
static const char *getQualifierName(AQ Qualifier);
bool setAsmQualifier(AQ Qualifier);
inline bool isVolatile() const { return Qualifiers & AQ_volatile; };
inline bool isInline() const { return Qualifiers & AQ_inline; };
inline bool isGoto() const { return Qualifiers & AQ_goto; }
};
bool isGCCAsmStatement(const Token &TokAfterAsm) const;
bool isGNUAsmQualifier(const Token &TokAfterAsm) const;
GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const;
bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ);
};
} // end namespace clang
#endif
|
facedist.c | /* Generated by Cython 0.24 */
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000)
#error Cython requires Python 2.6+ or Python 3.2+.
#else
#define CYTHON_ABI "0_24"
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#endif
#if !defined(CYTHON_USE_PYLONG_INTERNALS) && CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x02070000
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#undef SHIFT
#undef BASE
#undef MASK
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#else
#define CYTHON_PEP393_ENABLED 0
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#elif CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifndef CYTHON_INLINE
#if defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ \
__pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \
}
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__facedist
#define __PYX_HAVE_API__facedist
#include "string.h"
#include "stdio.h"
#include "stdlib.h"
#include "numpy/arrayobject.h"
#include "numpy/ufuncobject.h"
#include "math.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#ifdef PYREX_WITHOUT_ASSERTIONS
#define CYTHON_WITHOUT_ASSERTIONS
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER) && defined (_M_X64)
#define __Pyx_sst_abs(value) _abs64(value)
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
#if PY_MAJOR_VERSION < 3
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u)
{
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#else
#define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen
#endif
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
#define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False))
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_COMPILING_IN_CPYTHON
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c));
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static PyObject *__pyx_m;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
/* None.proto */
#if !defined(CYTHON_CCOMPLEX)
#if defined(__cplusplus)
#define CYTHON_CCOMPLEX 1
#elif defined(_Complex_I)
#define CYTHON_CCOMPLEX 1
#else
#define CYTHON_CCOMPLEX 0
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#include <complex>
#else
#include <complex.h>
#endif
#endif
#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
#undef _Complex_I
#define _Complex_I 1.0fj
#endif
static const char *__pyx_f[] = {
"facedist.pyx",
"__init__.pxd",
"type.pxd",
};
/* BufferFormatStructs.proto */
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name;
struct __Pyx_StructField_* fields;
size_t size;
size_t arraysize[8];
int ndim;
char typegroup;
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":725
* # in Cython to enable them only on the right systems.
*
* ctypedef npy_int8 int8_t # <<<<<<<<<<<<<<
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
*/
typedef npy_int8 __pyx_t_5numpy_int8_t;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":726
*
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t # <<<<<<<<<<<<<<
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t
*/
typedef npy_int16 __pyx_t_5numpy_int16_t;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":727
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t # <<<<<<<<<<<<<<
* ctypedef npy_int64 int64_t
* #ctypedef npy_int96 int96_t
*/
typedef npy_int32 __pyx_t_5numpy_int32_t;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":728
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t # <<<<<<<<<<<<<<
* #ctypedef npy_int96 int96_t
* #ctypedef npy_int128 int128_t
*/
typedef npy_int64 __pyx_t_5numpy_int64_t;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":732
* #ctypedef npy_int128 int128_t
*
* ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<<
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
*/
typedef npy_uint8 __pyx_t_5numpy_uint8_t;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":733
*
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<<
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t
*/
typedef npy_uint16 __pyx_t_5numpy_uint16_t;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":734
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<<
* ctypedef npy_uint64 uint64_t
* #ctypedef npy_uint96 uint96_t
*/
typedef npy_uint32 __pyx_t_5numpy_uint32_t;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":735
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<<
* #ctypedef npy_uint96 uint96_t
* #ctypedef npy_uint128 uint128_t
*/
typedef npy_uint64 __pyx_t_5numpy_uint64_t;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":739
* #ctypedef npy_uint128 uint128_t
*
* ctypedef npy_float32 float32_t # <<<<<<<<<<<<<<
* ctypedef npy_float64 float64_t
* #ctypedef npy_float80 float80_t
*/
typedef npy_float32 __pyx_t_5numpy_float32_t;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":740
*
* ctypedef npy_float32 float32_t
* ctypedef npy_float64 float64_t # <<<<<<<<<<<<<<
* #ctypedef npy_float80 float80_t
* #ctypedef npy_float128 float128_t
*/
typedef npy_float64 __pyx_t_5numpy_float64_t;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":749
* # The int types are mapped a bit surprising --
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t
*/
typedef npy_long __pyx_t_5numpy_int_t;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":750
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong longlong_t
*
*/
typedef npy_longlong __pyx_t_5numpy_long_t;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":751
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_ulong uint_t
*/
typedef npy_longlong __pyx_t_5numpy_longlong_t;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":753
* ctypedef npy_longlong longlong_t
*
* ctypedef npy_ulong uint_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t
*/
typedef npy_ulong __pyx_t_5numpy_uint_t;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":754
*
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulonglong_t
*
*/
typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":755
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_intp intp_t
*/
typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":757
* ctypedef npy_ulonglong ulonglong_t
*
* ctypedef npy_intp intp_t # <<<<<<<<<<<<<<
* ctypedef npy_uintp uintp_t
*
*/
typedef npy_intp __pyx_t_5numpy_intp_t;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":758
*
* ctypedef npy_intp intp_t
* ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<<
*
* ctypedef npy_double float_t
*/
typedef npy_uintp __pyx_t_5numpy_uintp_t;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":760
* ctypedef npy_uintp uintp_t
*
* ctypedef npy_double float_t # <<<<<<<<<<<<<<
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t
*/
typedef npy_double __pyx_t_5numpy_float_t;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":761
*
* ctypedef npy_double float_t
* ctypedef npy_double double_t # <<<<<<<<<<<<<<
* ctypedef npy_longdouble longdouble_t
*
*/
typedef npy_double __pyx_t_5numpy_double_t;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":762
* ctypedef npy_double float_t
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cfloat cfloat_t
*/
typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
/* None.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< float > __pyx_t_float_complex;
#else
typedef float _Complex __pyx_t_float_complex;
#endif
#else
typedef struct { float real, imag; } __pyx_t_float_complex;
#endif
/* None.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< double > __pyx_t_double_complex;
#else
typedef double _Complex __pyx_t_double_complex;
#endif
#else
typedef struct { double real, imag; } __pyx_t_double_complex;
#endif
/*--- Type declarations ---*/
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":764
* ctypedef npy_longdouble longdouble_t
*
* ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<<
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t
*/
typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":765
*
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<<
* ctypedef npy_clongdouble clongdouble_t
*
*/
typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":766
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cdouble complex_t
*/
typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":768
* ctypedef npy_clongdouble clongdouble_t
*
* ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew1(a):
*/
typedef npy_cdouble __pyx_t_5numpy_complex_t;
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* PyObjectGetAttrStr.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* ArgTypeTest.proto */
static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact);
/* GetModuleGlobalName.proto */
static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name);
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* ExtTypeTest.proto */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
/* BufferFormatCheck.proto */
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj,
__Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type); // PROTO
/* ForceInitThreads.proto */
#ifndef __PYX_FORCE_INIT_THREADS
#define __PYX_FORCE_INIT_THREADS 0
#endif
/* GetItemInt.proto */
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
/* PyObjectCallMethO.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
/* PyObjectCallOneArg.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
#define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1)
/* PyThreadStateGet.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = PyThreadState_GET();
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* BufferFallbackError.proto */
static void __Pyx_RaiseBufferFallbackError(void);
/* PyObjectCallNoArg.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);
#else
#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL)
#endif
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* DictGetItem.proto */
#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
PyObject *value;
value = PyDict_GetItemWithError(d, key);
if (unlikely(!value)) {
if (!PyErr_Occurred()) {
PyObject* args = PyTuple_Pack(1, key);
if (likely(args))
PyErr_SetObject(PyExc_KeyError, args);
Py_XDECREF(args);
}
return NULL;
}
Py_INCREF(value);
return value;
}
#else
#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
#endif
/* RaiseTooManyValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
/* RaiseNeedMoreValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
/* RaiseNoneIterError.proto */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* ImportFrom.proto */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
/* BufferStructDeclare.proto */
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
/* None.proto */
static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0};
static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1};
/* Print.proto */
static int __Pyx_Print(PyObject*, PyObject *, int);
#if CYTHON_COMPILING_IN_PYPY || PY_MAJOR_VERSION >= 3
static PyObject* __pyx_print = 0;
static PyObject* __pyx_print_kwargs = 0;
#endif
/* None.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#define __Pyx_CREAL(z) ((z).real())
#define __Pyx_CIMAG(z) ((z).imag())
#else
#define __Pyx_CREAL(z) (__real__(z))
#define __Pyx_CIMAG(z) (__imag__(z))
#endif
#else
#define __Pyx_CREAL(z) ((z).real)
#define __Pyx_CIMAG(z) ((z).imag)
#endif
#if defined(__cplusplus) && CYTHON_CCOMPLEX && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103)
#define __Pyx_SET_CREAL(z,x) ((z).real(x))
#define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
#else
#define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
#define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
#endif
/* None.proto */
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float);
/* None.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eqf(a, b) ((a)==(b))
#define __Pyx_c_sumf(a, b) ((a)+(b))
#define __Pyx_c_difff(a, b) ((a)-(b))
#define __Pyx_c_prodf(a, b) ((a)*(b))
#define __Pyx_c_quotf(a, b) ((a)/(b))
#define __Pyx_c_negf(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zerof(z) ((z)==(float)0)
#define __Pyx_c_conjf(z) (::std::conj(z))
#if 1
#define __Pyx_c_absf(z) (::std::abs(z))
#define __Pyx_c_powf(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zerof(z) ((z)==0)
#define __Pyx_c_conjf(z) (conjf(z))
#if 1
#define __Pyx_c_absf(z) (cabsf(z))
#define __Pyx_c_powf(a, b) (cpowf(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex);
static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex);
#if 1
static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex);
#endif
#endif
/* None.proto */
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double);
/* None.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq(a, b) ((a)==(b))
#define __Pyx_c_sum(a, b) ((a)+(b))
#define __Pyx_c_diff(a, b) ((a)-(b))
#define __Pyx_c_prod(a, b) ((a)*(b))
#define __Pyx_c_quot(a, b) ((a)/(b))
#define __Pyx_c_neg(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero(z) ((z)==(double)0)
#define __Pyx_c_conj(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs(z) (::std::abs(z))
#define __Pyx_c_pow(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero(z) ((z)==0)
#define __Pyx_c_conj(z) (conj(z))
#if 1
#define __Pyx_c_abs(z) (cabs(z))
#define __Pyx_c_pow(a, b) (cpow(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex);
static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex);
#if 1
static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex);
#endif
#endif
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value);
/* PrintOne.proto */
static int __Pyx_PrintOne(PyObject* stream, PyObject *o);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* PyIdentifierFromString.proto */
#if !defined(__Pyx_PyIdentifier_FromString)
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s)
#else
#define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s)
#endif
#endif
/* ModuleImport.proto */
static PyObject *__Pyx_ImportModule(const char *name);
/* TypeImport.proto */
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
/* Module declarations from 'cpython.buffer' */
/* Module declarations from 'libc.string' */
/* Module declarations from 'libc.stdio' */
/* Module declarations from '__builtin__' */
/* Module declarations from 'cpython.type' */
static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
/* Module declarations from 'cpython' */
/* Module declarations from 'cpython.object' */
/* Module declarations from 'cpython.ref' */
/* Module declarations from 'libc.stdlib' */
/* Module declarations from 'numpy' */
/* Module declarations from 'numpy' */
static PyTypeObject *__pyx_ptype_5numpy_dtype = 0;
static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0;
static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0;
static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0;
static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/
/* Module declarations from 'cython' */
/* Module declarations from 'libc.math' */
/* Module declarations from 'facedist' */
static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_float64_t = { "float64_t", NULL, sizeof(__pyx_t_5numpy_float64_t), { 0 }, 0, 'R', 0, 0 };
#define __Pyx_MODULE_NAME "facedist"
int __pyx_module_is_main_facedist = 0;
/* Implementation of 'facedist' */
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_RuntimeError;
static const char __pyx_k_A[] = "A";
static const char __pyx_k_D[] = "D";
static const char __pyx_k_ii[] = "ii";
static const char __pyx_k_jj[] = "jj";
static const char __pyx_k_kk[] = "kk";
static const char __pyx_k_np[] = "np";
static const char __pyx_k_rD[] = "rD";
static const char __pyx_k_rd[] = "rd";
static const char __pyx_k_end[] = "end";
static const char __pyx_k_max[] = "max";
static const char __pyx_k_min[] = "min";
static const char __pyx_k_axis[] = "axis";
static const char __pyx_k_file[] = "file";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_mean[] = "mean";
static const char __pyx_k_mins[] = "mins";
static const char __pyx_k_ncol[] = "ncol";
static const char __pyx_k_nrow[] = "nrow";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_cdist[] = "cdist";
static const char __pyx_k_numpy[] = "numpy";
static const char __pyx_k_print[] = "print";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_zeros[] = "zeros";
static const char __pyx_k_double[] = "double";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_metric[] = "metric";
static const char __pyx_k_facedist[] = "facedist";
static const char __pyx_k_max_dist[] = "max_dist";
static const char __pyx_k_min_dist[] = "min_dist";
static const char __pyx_k_euclidean[] = "euclidean";
static const char __pyx_k_mean_dist[] = "mean_dist";
static const char __pyx_k_ValueError[] = "ValueError";
static const char __pyx_k_RuntimeError[] = "RuntimeError";
static const char __pyx_k_meanmin_dist[] = "meanmin_dist";
static const char __pyx_k_scipy_spatial_distance[] = "scipy.spatial.distance";
static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous";
static const char __pyx_k_home_mlode_BA_Moritz_Implementa[] = "/home/mlode/BA-Moritz/Implementation/src/face_recognition/cython_test/facedist.pyx";
static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)";
static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd";
static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported";
static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous";
static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short.";
static PyObject *__pyx_n_s_A;
static PyObject *__pyx_n_s_D;
static PyObject *__pyx_kp_u_Format_string_allocated_too_shor;
static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2;
static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor;
static PyObject *__pyx_n_s_RuntimeError;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_axis;
static PyObject *__pyx_n_s_cdist;
static PyObject *__pyx_n_s_double;
static PyObject *__pyx_n_s_end;
static PyObject *__pyx_n_s_euclidean;
static PyObject *__pyx_n_s_facedist;
static PyObject *__pyx_n_s_file;
static PyObject *__pyx_kp_s_home_mlode_BA_Moritz_Implementa;
static PyObject *__pyx_n_s_ii;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_jj;
static PyObject *__pyx_n_s_kk;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_max;
static PyObject *__pyx_n_s_max_dist;
static PyObject *__pyx_n_s_mean;
static PyObject *__pyx_n_s_mean_dist;
static PyObject *__pyx_n_s_meanmin_dist;
static PyObject *__pyx_n_s_metric;
static PyObject *__pyx_n_s_min;
static PyObject *__pyx_n_s_min_dist;
static PyObject *__pyx_n_s_mins;
static PyObject *__pyx_n_s_ncol;
static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous;
static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou;
static PyObject *__pyx_n_s_np;
static PyObject *__pyx_n_s_nrow;
static PyObject *__pyx_n_s_numpy;
static PyObject *__pyx_n_s_print;
static PyObject *__pyx_n_s_rD;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_rd;
static PyObject *__pyx_n_s_scipy_spatial_distance;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd;
static PyObject *__pyx_n_s_zeros;
static PyObject *__pyx_pf_8facedist_max_dist(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_A); /* proto */
static PyObject *__pyx_pf_8facedist_2min_dist(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_A); /* proto */
static PyObject *__pyx_pf_8facedist_4mean_dist(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_A); /* proto */
static PyObject *__pyx_pf_8facedist_6meanmin_dist(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_A); /* proto */
static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */
static PyObject *__pyx_int_1;
static PyObject *__pyx_tuple_;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__3;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_tuple__11;
static PyObject *__pyx_tuple__13;
static PyObject *__pyx_codeobj__8;
static PyObject *__pyx_codeobj__10;
static PyObject *__pyx_codeobj__12;
static PyObject *__pyx_codeobj__14;
/* "facedist.pyx":19
* # indices.
* @cython.wraparound(False)
* def max_dist(np.ndarray A): # <<<<<<<<<<<<<<
*
* # declare C types for as many of our variables as possible. note that we
*/
/* Python wrapper */
static PyObject *__pyx_pw_8facedist_1max_dist(PyObject *__pyx_self, PyObject *__pyx_v_A); /*proto*/
static PyMethodDef __pyx_mdef_8facedist_1max_dist = {"max_dist", (PyCFunction)__pyx_pw_8facedist_1max_dist, METH_O, 0};
static PyObject *__pyx_pw_8facedist_1max_dist(PyObject *__pyx_self, PyObject *__pyx_v_A) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("max_dist (wrapper)", 0);
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_A), __pyx_ptype_5numpy_ndarray, 1, "A", 0))) __PYX_ERR(0, 19, __pyx_L1_error)
__pyx_r = __pyx_pf_8facedist_max_dist(__pyx_self, ((PyArrayObject *)__pyx_v_A));
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_8facedist_max_dist(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_A) {
Py_ssize_t __pyx_v_nrow;
CYTHON_UNUSED Py_ssize_t __pyx_v_ncol;
Py_ssize_t __pyx_v_ii;
Py_ssize_t __pyx_v_jj;
PyArrayObject *__pyx_v_D = 0;
double __pyx_v_rd;
__Pyx_LocalBuf_ND __pyx_pybuffernd_D;
__Pyx_Buffer __pyx_pybuffer_D;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
Py_ssize_t __pyx_t_6;
PyObject *__pyx_t_7 = NULL;
PyArrayObject *__pyx_t_8 = NULL;
Py_ssize_t __pyx_t_9;
Py_ssize_t __pyx_t_10;
Py_ssize_t __pyx_t_11;
Py_ssize_t __pyx_t_12;
double __pyx_t_13;
Py_ssize_t __pyx_t_14;
Py_ssize_t __pyx_t_15;
Py_ssize_t __pyx_t_16;
Py_ssize_t __pyx_t_17;
__Pyx_RefNannySetupContext("max_dist", 0);
__pyx_pybuffer_D.pybuffer.buf = NULL;
__pyx_pybuffer_D.refcount = 0;
__pyx_pybuffernd_D.data = NULL;
__pyx_pybuffernd_D.rcbuffer = &__pyx_pybuffer_D;
/* "facedist.pyx":25
* cdef:
* # Py_ssize_t is just a special platform-specific type for indices
* Py_ssize_t nrow = A.shape[0] # <<<<<<<<<<<<<<
* Py_ssize_t ncol = A.shape[1]
* Py_ssize_t ii, jj, kk
*/
__pyx_v_nrow = (__pyx_v_A->dimensions[0]);
/* "facedist.pyx":26
* # Py_ssize_t is just a special platform-specific type for indices
* Py_ssize_t nrow = A.shape[0]
* Py_ssize_t ncol = A.shape[1] # <<<<<<<<<<<<<<
* Py_ssize_t ii, jj, kk
*
*/
__pyx_v_ncol = (__pyx_v_A->dimensions[1]);
/* "facedist.pyx":31
* # this line is particularly expensive, since creating a numpy array
* # involves unavoidable Python API overhead
* np.ndarray[np.float64_t, ndim=2] D = np.zeros((nrow, nrow), np.double) # <<<<<<<<<<<<<<
*
* double rd
*/
__pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 31, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 31, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_nrow); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 31, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nrow); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 31, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 31, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 31, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_double); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 31, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = NULL;
__pyx_t_6 = 0;
if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
__pyx_t_6 = 1;
}
}
__pyx_t_7 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 31, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
if (__pyx_t_4) {
__Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_4); __pyx_t_4 = NULL;
}
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_6, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_6, __pyx_t_2);
__pyx_t_5 = 0;
__pyx_t_2 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 31, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 31, __pyx_L1_error)
__pyx_t_8 = ((PyArrayObject *)__pyx_t_1);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_D.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float64_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) {
__pyx_v_D = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_D.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 31, __pyx_L1_error)
} else {__pyx_pybuffernd_D.diminfo[0].strides = __pyx_pybuffernd_D.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_D.diminfo[0].shape = __pyx_pybuffernd_D.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_D.diminfo[1].strides = __pyx_pybuffernd_D.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_D.diminfo[1].shape = __pyx_pybuffernd_D.rcbuffer->pybuffer.shape[1];
}
}
__pyx_t_8 = 0;
__pyx_v_D = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* "facedist.pyx":39
*
* #with nogil:
* for ii in prange(nrow, nogil=True, schedule='static'): # <<<<<<<<<<<<<<
* for jj in range(ii + 1, nrow):
*
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
#endif
/*try:*/ {
__pyx_t_6 = __pyx_v_nrow;
if (1 == 0) abort();
{
Py_ssize_t __pyx_parallel_temp0 = 0xbad0bad0;
Py_ssize_t __pyx_parallel_temp1 = 0xbad0bad0;
double __pyx_parallel_temp2 = __PYX_NAN();
const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0;
PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL;
int __pyx_parallel_why;
__pyx_parallel_why = 0;
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_10 = (__pyx_t_6 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_10 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17) firstprivate(__pyx_t_1, __pyx_t_2, __pyx_t_3, __pyx_t_4, __pyx_t_5, __pyx_t_7) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
Py_BEGIN_ALLOW_THREADS
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_ii) lastprivate(__pyx_v_ii) lastprivate(__pyx_v_jj) lastprivate(__pyx_v_rd) schedule(static)
#endif /* _OPENMP */
for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_10; __pyx_t_9++){
if (__pyx_parallel_why < 2)
{
__pyx_v_ii = (Py_ssize_t)(0 + 1 * __pyx_t_9);
/* Initialize private variables to invalid values */
__pyx_v_jj = ((Py_ssize_t)0xbad0bad0);
__pyx_v_rd = ((double)__PYX_NAN());
/* "facedist.pyx":40
* #with nogil:
* for ii in prange(nrow, nogil=True, schedule='static'):
* for jj in range(ii + 1, nrow): # <<<<<<<<<<<<<<
*
* with gil:
*/
__pyx_t_11 = __pyx_v_nrow;
for (__pyx_t_12 = (__pyx_v_ii + 1); __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
__pyx_v_jj = __pyx_t_12;
/* "facedist.pyx":42
* for jj in range(ii + 1, nrow):
*
* with gil: # <<<<<<<<<<<<<<
* rd = np.max(cdist(A[ii], A[jj], metric='euclidean'))
*
*/
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
/*try:*/ {
/* "facedist.pyx":43
*
* with gil:
* rd = np.max(cdist(A[ii], A[jj], metric='euclidean')) # <<<<<<<<<<<<<<
*
* D[ii, jj] = rd
*/
__pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 43, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_max); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 43, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_cdist); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 43, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = __Pyx_GetItemInt(((PyObject *)__pyx_v_A), __pyx_v_ii, Py_ssize_t, 1, PyInt_FromSsize_t, 0, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 43, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_5 = __Pyx_GetItemInt(((PyObject *)__pyx_v_A), __pyx_v_jj, Py_ssize_t, 1, PyInt_FromSsize_t, 0, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 43, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 43, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_5);
__pyx_t_2 = 0;
__pyx_t_5 = 0;
__pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 43, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_5);
if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_metric, __pyx_n_s_euclidean) < 0) __PYX_ERR(0, 43, __pyx_L15_error)
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 43, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = NULL;
if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
if (!__pyx_t_5) {
__pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 43, __pyx_L15_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_1);
} else {
__pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 43, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __pyx_t_5 = NULL;
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 43, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
}
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_13 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_13 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 43, __pyx_L15_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_rd = __pyx_t_13;
}
/* "facedist.pyx":42
* for jj in range(ii + 1, nrow):
*
* with gil: # <<<<<<<<<<<<<<
* rd = np.max(cdist(A[ii], A[jj], metric='euclidean'))
*
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
goto __pyx_L16;
}
__pyx_L15_error: {
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
goto __pyx_L8_error;
}
__pyx_L16:;
}
}
/* "facedist.pyx":45
* rd = np.max(cdist(A[ii], A[jj], metric='euclidean'))
*
* D[ii, jj] = rd # <<<<<<<<<<<<<<
* D[jj, ii] = rd # because D is symmetric
*
*/
__pyx_t_14 = __pyx_v_ii;
__pyx_t_15 = __pyx_v_jj;
*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_15, __pyx_pybuffernd_D.diminfo[1].strides) = __pyx_v_rd;
/* "facedist.pyx":46
*
* D[ii, jj] = rd
* D[jj, ii] = rd # because D is symmetric # <<<<<<<<<<<<<<
*
* return D
*/
__pyx_t_16 = __pyx_v_jj;
__pyx_t_17 = __pyx_v_ii;
*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_D.diminfo[1].strides) = __pyx_v_rd;
}
goto __pyx_L18;
__pyx_L8_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_exc_type)
#endif /* _OPENMP */
if (!__pyx_parallel_exc_type) {
__Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb);
__pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno;
__Pyx_GOTREF(__pyx_parallel_exc_type);
}
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_parallel_why = 4;
goto __pyx_L17;
__pyx_L17:;
#ifdef _OPENMP
#pragma omp critical(__pyx_parallel_lastprivates0)
#endif /* _OPENMP */
{
__pyx_parallel_temp0 = __pyx_v_ii;
__pyx_parallel_temp1 = __pyx_v_jj;
__pyx_parallel_temp2 = __pyx_v_rd;
}
__pyx_L18:;
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_why)
#endif /* _OPENMP */
}
}
#ifdef _OPENMP
Py_END_ALLOW_THREADS
#else
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
#endif /* _OPENMP */
/* Clean up any temporaries */
__Pyx_XDECREF(__pyx_t_1);
__pyx_t_1 = NULL;
__Pyx_XDECREF(__pyx_t_2);
__pyx_t_2 = NULL;
__Pyx_XDECREF(__pyx_t_3);
__pyx_t_3 = NULL;
__Pyx_XDECREF(__pyx_t_4);
__pyx_t_4 = NULL;
__Pyx_XDECREF(__pyx_t_5);
__pyx_t_5 = NULL;
__Pyx_XDECREF(__pyx_t_7);
__pyx_t_7 = NULL;
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
#ifndef _OPENMP
}
#endif /* _OPENMP */
}
}
if (__pyx_parallel_exc_type) {
/* This may have been overridden by a continue, break or return in another thread. Prefer the error. */
__pyx_parallel_why = 4;
}
if (__pyx_parallel_why) {
__pyx_v_ii = __pyx_parallel_temp0;
__pyx_v_jj = __pyx_parallel_temp1;
__pyx_v_rd = __pyx_parallel_temp2;
switch (__pyx_parallel_why) {
case 4:
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_GIVEREF(__pyx_parallel_exc_type);
__Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb);
__pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno;
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
goto __pyx_L4_error;
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "facedist.pyx":39
*
* #with nogil:
* for ii in prange(nrow, nogil=True, schedule='static'): # <<<<<<<<<<<<<<
* for jj in range(ii + 1, nrow):
*
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L4_error: {
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L1_error;
}
__pyx_L5:;
}
}
/* "facedist.pyx":48
* D[jj, ii] = rd # because D is symmetric
*
* return D # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_D));
__pyx_r = ((PyObject *)__pyx_v_D);
goto __pyx_L0;
/* "facedist.pyx":19
* # indices.
* @cython.wraparound(False)
* def max_dist(np.ndarray A): # <<<<<<<<<<<<<<
*
* # declare C types for as many of our variables as possible. note that we
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_7);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_D.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("facedist.max_dist", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_D.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_D);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "facedist.pyx":57
* # indices.
* @cython.wraparound(False)
* def min_dist(np.ndarray A): # <<<<<<<<<<<<<<
*
* # declare C types for as many of our variables as possible. note that we
*/
/* Python wrapper */
static PyObject *__pyx_pw_8facedist_3min_dist(PyObject *__pyx_self, PyObject *__pyx_v_A); /*proto*/
static PyMethodDef __pyx_mdef_8facedist_3min_dist = {"min_dist", (PyCFunction)__pyx_pw_8facedist_3min_dist, METH_O, 0};
static PyObject *__pyx_pw_8facedist_3min_dist(PyObject *__pyx_self, PyObject *__pyx_v_A) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("min_dist (wrapper)", 0);
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_A), __pyx_ptype_5numpy_ndarray, 1, "A", 0))) __PYX_ERR(0, 57, __pyx_L1_error)
__pyx_r = __pyx_pf_8facedist_2min_dist(__pyx_self, ((PyArrayObject *)__pyx_v_A));
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_8facedist_2min_dist(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_A) {
Py_ssize_t __pyx_v_nrow;
CYTHON_UNUSED Py_ssize_t __pyx_v_ncol;
Py_ssize_t __pyx_v_ii;
Py_ssize_t __pyx_v_jj;
PyArrayObject *__pyx_v_D = 0;
double __pyx_v_rd;
__Pyx_LocalBuf_ND __pyx_pybuffernd_D;
__Pyx_Buffer __pyx_pybuffer_D;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
Py_ssize_t __pyx_t_6;
PyObject *__pyx_t_7 = NULL;
PyArrayObject *__pyx_t_8 = NULL;
Py_ssize_t __pyx_t_9;
Py_ssize_t __pyx_t_10;
Py_ssize_t __pyx_t_11;
Py_ssize_t __pyx_t_12;
double __pyx_t_13;
Py_ssize_t __pyx_t_14;
Py_ssize_t __pyx_t_15;
Py_ssize_t __pyx_t_16;
Py_ssize_t __pyx_t_17;
__Pyx_RefNannySetupContext("min_dist", 0);
__pyx_pybuffer_D.pybuffer.buf = NULL;
__pyx_pybuffer_D.refcount = 0;
__pyx_pybuffernd_D.data = NULL;
__pyx_pybuffernd_D.rcbuffer = &__pyx_pybuffer_D;
/* "facedist.pyx":63
* cdef:
* # Py_ssize_t is just a special platform-specific type for indices
* Py_ssize_t nrow = A.shape[0] # <<<<<<<<<<<<<<
* Py_ssize_t ncol = A.shape[1]
* Py_ssize_t ii, jj, kk
*/
__pyx_v_nrow = (__pyx_v_A->dimensions[0]);
/* "facedist.pyx":64
* # Py_ssize_t is just a special platform-specific type for indices
* Py_ssize_t nrow = A.shape[0]
* Py_ssize_t ncol = A.shape[1] # <<<<<<<<<<<<<<
* Py_ssize_t ii, jj, kk
*
*/
__pyx_v_ncol = (__pyx_v_A->dimensions[1]);
/* "facedist.pyx":69
* # this line is particularly expensive, since creating a numpy array
* # involves unavoidable Python API overhead
* np.ndarray[np.float64_t, ndim=2] D = np.zeros((nrow, nrow), np.double) # <<<<<<<<<<<<<<
*
* double rd
*/
__pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 69, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 69, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_nrow); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 69, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nrow); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 69, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 69, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 69, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_double); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 69, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = NULL;
__pyx_t_6 = 0;
if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
__pyx_t_6 = 1;
}
}
__pyx_t_7 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 69, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
if (__pyx_t_4) {
__Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_4); __pyx_t_4 = NULL;
}
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_6, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_6, __pyx_t_2);
__pyx_t_5 = 0;
__pyx_t_2 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 69, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 69, __pyx_L1_error)
__pyx_t_8 = ((PyArrayObject *)__pyx_t_1);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_D.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float64_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) {
__pyx_v_D = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_D.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 69, __pyx_L1_error)
} else {__pyx_pybuffernd_D.diminfo[0].strides = __pyx_pybuffernd_D.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_D.diminfo[0].shape = __pyx_pybuffernd_D.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_D.diminfo[1].strides = __pyx_pybuffernd_D.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_D.diminfo[1].shape = __pyx_pybuffernd_D.rcbuffer->pybuffer.shape[1];
}
}
__pyx_t_8 = 0;
__pyx_v_D = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* "facedist.pyx":77
*
* #with nogil:
* for ii in prange(nrow, nogil=True, schedule='static'): # <<<<<<<<<<<<<<
* for jj in range(ii + 1, nrow):
*
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
#endif
/*try:*/ {
__pyx_t_6 = __pyx_v_nrow;
if (1 == 0) abort();
{
Py_ssize_t __pyx_parallel_temp0 = 0xbad0bad0;
Py_ssize_t __pyx_parallel_temp1 = 0xbad0bad0;
double __pyx_parallel_temp2 = __PYX_NAN();
const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0;
PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL;
int __pyx_parallel_why;
__pyx_parallel_why = 0;
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_10 = (__pyx_t_6 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_10 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17) firstprivate(__pyx_t_1, __pyx_t_2, __pyx_t_3, __pyx_t_4, __pyx_t_5, __pyx_t_7) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
Py_BEGIN_ALLOW_THREADS
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_ii) lastprivate(__pyx_v_ii) lastprivate(__pyx_v_jj) lastprivate(__pyx_v_rd) schedule(static)
#endif /* _OPENMP */
for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_10; __pyx_t_9++){
if (__pyx_parallel_why < 2)
{
__pyx_v_ii = (Py_ssize_t)(0 + 1 * __pyx_t_9);
/* Initialize private variables to invalid values */
__pyx_v_jj = ((Py_ssize_t)0xbad0bad0);
__pyx_v_rd = ((double)__PYX_NAN());
/* "facedist.pyx":78
* #with nogil:
* for ii in prange(nrow, nogil=True, schedule='static'):
* for jj in range(ii + 1, nrow): # <<<<<<<<<<<<<<
*
* with gil:
*/
__pyx_t_11 = __pyx_v_nrow;
for (__pyx_t_12 = (__pyx_v_ii + 1); __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
__pyx_v_jj = __pyx_t_12;
/* "facedist.pyx":80
* for jj in range(ii + 1, nrow):
*
* with gil: # <<<<<<<<<<<<<<
* rd = np.min(cdist(A[ii], A[jj], metric='euclidean'))
*
*/
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
/*try:*/ {
/* "facedist.pyx":81
*
* with gil:
* rd = np.min(cdist(A[ii], A[jj], metric='euclidean')) # <<<<<<<<<<<<<<
*
* D[ii, jj] = rd
*/
__pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 81, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_min); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 81, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_cdist); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 81, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = __Pyx_GetItemInt(((PyObject *)__pyx_v_A), __pyx_v_ii, Py_ssize_t, 1, PyInt_FromSsize_t, 0, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 81, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_5 = __Pyx_GetItemInt(((PyObject *)__pyx_v_A), __pyx_v_jj, Py_ssize_t, 1, PyInt_FromSsize_t, 0, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 81, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 81, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_5);
__pyx_t_2 = 0;
__pyx_t_5 = 0;
__pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 81, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_5);
if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_metric, __pyx_n_s_euclidean) < 0) __PYX_ERR(0, 81, __pyx_L15_error)
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 81, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = NULL;
if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
if (!__pyx_t_5) {
__pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 81, __pyx_L15_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_1);
} else {
__pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 81, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __pyx_t_5 = NULL;
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 81, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
}
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_13 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_13 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 81, __pyx_L15_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_rd = __pyx_t_13;
}
/* "facedist.pyx":80
* for jj in range(ii + 1, nrow):
*
* with gil: # <<<<<<<<<<<<<<
* rd = np.min(cdist(A[ii], A[jj], metric='euclidean'))
*
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
goto __pyx_L16;
}
__pyx_L15_error: {
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
goto __pyx_L8_error;
}
__pyx_L16:;
}
}
/* "facedist.pyx":83
* rd = np.min(cdist(A[ii], A[jj], metric='euclidean'))
*
* D[ii, jj] = rd # <<<<<<<<<<<<<<
* D[jj, ii] = rd # because D is symmetric
*
*/
__pyx_t_14 = __pyx_v_ii;
__pyx_t_15 = __pyx_v_jj;
*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_15, __pyx_pybuffernd_D.diminfo[1].strides) = __pyx_v_rd;
/* "facedist.pyx":84
*
* D[ii, jj] = rd
* D[jj, ii] = rd # because D is symmetric # <<<<<<<<<<<<<<
*
* return D
*/
__pyx_t_16 = __pyx_v_jj;
__pyx_t_17 = __pyx_v_ii;
*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_D.diminfo[1].strides) = __pyx_v_rd;
}
goto __pyx_L18;
__pyx_L8_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_exc_type)
#endif /* _OPENMP */
if (!__pyx_parallel_exc_type) {
__Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb);
__pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno;
__Pyx_GOTREF(__pyx_parallel_exc_type);
}
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_parallel_why = 4;
goto __pyx_L17;
__pyx_L17:;
#ifdef _OPENMP
#pragma omp critical(__pyx_parallel_lastprivates1)
#endif /* _OPENMP */
{
__pyx_parallel_temp0 = __pyx_v_ii;
__pyx_parallel_temp1 = __pyx_v_jj;
__pyx_parallel_temp2 = __pyx_v_rd;
}
__pyx_L18:;
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_why)
#endif /* _OPENMP */
}
}
#ifdef _OPENMP
Py_END_ALLOW_THREADS
#else
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
#endif /* _OPENMP */
/* Clean up any temporaries */
__Pyx_XDECREF(__pyx_t_1);
__pyx_t_1 = NULL;
__Pyx_XDECREF(__pyx_t_2);
__pyx_t_2 = NULL;
__Pyx_XDECREF(__pyx_t_3);
__pyx_t_3 = NULL;
__Pyx_XDECREF(__pyx_t_4);
__pyx_t_4 = NULL;
__Pyx_XDECREF(__pyx_t_5);
__pyx_t_5 = NULL;
__Pyx_XDECREF(__pyx_t_7);
__pyx_t_7 = NULL;
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
#ifndef _OPENMP
}
#endif /* _OPENMP */
}
}
if (__pyx_parallel_exc_type) {
/* This may have been overridden by a continue, break or return in another thread. Prefer the error. */
__pyx_parallel_why = 4;
}
if (__pyx_parallel_why) {
__pyx_v_ii = __pyx_parallel_temp0;
__pyx_v_jj = __pyx_parallel_temp1;
__pyx_v_rd = __pyx_parallel_temp2;
switch (__pyx_parallel_why) {
case 4:
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_GIVEREF(__pyx_parallel_exc_type);
__Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb);
__pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno;
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
goto __pyx_L4_error;
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "facedist.pyx":77
*
* #with nogil:
* for ii in prange(nrow, nogil=True, schedule='static'): # <<<<<<<<<<<<<<
* for jj in range(ii + 1, nrow):
*
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L4_error: {
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L1_error;
}
__pyx_L5:;
}
}
/* "facedist.pyx":86
* D[jj, ii] = rd # because D is symmetric
*
* return D # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_D));
__pyx_r = ((PyObject *)__pyx_v_D);
goto __pyx_L0;
/* "facedist.pyx":57
* # indices.
* @cython.wraparound(False)
* def min_dist(np.ndarray A): # <<<<<<<<<<<<<<
*
* # declare C types for as many of our variables as possible. note that we
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_7);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_D.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("facedist.min_dist", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_D.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_D);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "facedist.pyx":95
* # indices.
* @cython.wraparound(False)
* def mean_dist(np.ndarray A): # <<<<<<<<<<<<<<
*
* # declare C types for as many of our variables as possible. note that we
*/
/* Python wrapper */
static PyObject *__pyx_pw_8facedist_5mean_dist(PyObject *__pyx_self, PyObject *__pyx_v_A); /*proto*/
static PyMethodDef __pyx_mdef_8facedist_5mean_dist = {"mean_dist", (PyCFunction)__pyx_pw_8facedist_5mean_dist, METH_O, 0};
static PyObject *__pyx_pw_8facedist_5mean_dist(PyObject *__pyx_self, PyObject *__pyx_v_A) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("mean_dist (wrapper)", 0);
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_A), __pyx_ptype_5numpy_ndarray, 1, "A", 0))) __PYX_ERR(0, 95, __pyx_L1_error)
__pyx_r = __pyx_pf_8facedist_4mean_dist(__pyx_self, ((PyArrayObject *)__pyx_v_A));
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_8facedist_4mean_dist(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_A) {
Py_ssize_t __pyx_v_nrow;
Py_ssize_t __pyx_v_ncol;
Py_ssize_t __pyx_v_ii;
Py_ssize_t __pyx_v_jj;
PyArrayObject *__pyx_v_D = 0;
double __pyx_v_rd;
__Pyx_LocalBuf_ND __pyx_pybuffernd_D;
__Pyx_Buffer __pyx_pybuffer_D;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
Py_ssize_t __pyx_t_6;
PyObject *__pyx_t_7 = NULL;
PyArrayObject *__pyx_t_8 = NULL;
Py_ssize_t __pyx_t_9;
Py_ssize_t __pyx_t_10;
Py_ssize_t __pyx_t_11;
Py_ssize_t __pyx_t_12;
double __pyx_t_13;
Py_ssize_t __pyx_t_14;
Py_ssize_t __pyx_t_15;
Py_ssize_t __pyx_t_16;
Py_ssize_t __pyx_t_17;
__Pyx_RefNannySetupContext("mean_dist", 0);
__pyx_pybuffer_D.pybuffer.buf = NULL;
__pyx_pybuffer_D.refcount = 0;
__pyx_pybuffernd_D.data = NULL;
__pyx_pybuffernd_D.rcbuffer = &__pyx_pybuffer_D;
/* "facedist.pyx":101
* cdef:
* # Py_ssize_t is just a special platform-specific type for indices
* Py_ssize_t nrow = A.shape[0] # <<<<<<<<<<<<<<
* Py_ssize_t ncol = A.shape[1]
* Py_ssize_t ii, jj, kk
*/
__pyx_v_nrow = (__pyx_v_A->dimensions[0]);
/* "facedist.pyx":102
* # Py_ssize_t is just a special platform-specific type for indices
* Py_ssize_t nrow = A.shape[0]
* Py_ssize_t ncol = A.shape[1] # <<<<<<<<<<<<<<
* Py_ssize_t ii, jj, kk
*
*/
__pyx_v_ncol = (__pyx_v_A->dimensions[1]);
/* "facedist.pyx":107
* # this line is particularly expensive, since creating a numpy array
* # involves unavoidable Python API overhead
* np.ndarray[np.float64_t, ndim=2] D = np.zeros((nrow, nrow), np.double) # <<<<<<<<<<<<<<
*
* double rd
*/
__pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 107, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 107, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_nrow); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 107, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nrow); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 107, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 107, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 107, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_double); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 107, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = NULL;
__pyx_t_6 = 0;
if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
__pyx_t_6 = 1;
}
}
__pyx_t_7 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 107, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
if (__pyx_t_4) {
__Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_4); __pyx_t_4 = NULL;
}
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_6, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_6, __pyx_t_2);
__pyx_t_5 = 0;
__pyx_t_2 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 107, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 107, __pyx_L1_error)
__pyx_t_8 = ((PyArrayObject *)__pyx_t_1);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_D.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float64_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) {
__pyx_v_D = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_D.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 107, __pyx_L1_error)
} else {__pyx_pybuffernd_D.diminfo[0].strides = __pyx_pybuffernd_D.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_D.diminfo[0].shape = __pyx_pybuffernd_D.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_D.diminfo[1].strides = __pyx_pybuffernd_D.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_D.diminfo[1].shape = __pyx_pybuffernd_D.rcbuffer->pybuffer.shape[1];
}
}
__pyx_t_8 = 0;
__pyx_v_D = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* "facedist.pyx":114
* # exploit the symmetry of D by only looping over its upper triangle
*
* print nrow # <<<<<<<<<<<<<<
* print ncol
*
*/
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_nrow); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 114, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (__Pyx_PrintOne(0, __pyx_t_1) < 0) __PYX_ERR(0, 114, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "facedist.pyx":115
*
* print nrow
* print ncol # <<<<<<<<<<<<<<
*
* #with nogil:
*/
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_ncol); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 115, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (__Pyx_PrintOne(0, __pyx_t_1) < 0) __PYX_ERR(0, 115, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "facedist.pyx":118
*
* #with nogil:
* for ii in prange(nrow, nogil=True, schedule='static'): # <<<<<<<<<<<<<<
* for jj in range(ii + 1, nrow):
*
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
#endif
/*try:*/ {
__pyx_t_6 = __pyx_v_nrow;
if (1 == 0) abort();
{
Py_ssize_t __pyx_parallel_temp0 = 0xbad0bad0;
Py_ssize_t __pyx_parallel_temp1 = 0xbad0bad0;
double __pyx_parallel_temp2 = __PYX_NAN();
const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0;
PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL;
int __pyx_parallel_why;
__pyx_parallel_why = 0;
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_10 = (__pyx_t_6 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_10 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17) firstprivate(__pyx_t_1, __pyx_t_2, __pyx_t_3, __pyx_t_4, __pyx_t_5, __pyx_t_7) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
Py_BEGIN_ALLOW_THREADS
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_ii) lastprivate(__pyx_v_ii) lastprivate(__pyx_v_jj) lastprivate(__pyx_v_rd) schedule(static)
#endif /* _OPENMP */
for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_10; __pyx_t_9++){
if (__pyx_parallel_why < 2)
{
__pyx_v_ii = (Py_ssize_t)(0 + 1 * __pyx_t_9);
/* Initialize private variables to invalid values */
__pyx_v_jj = ((Py_ssize_t)0xbad0bad0);
__pyx_v_rd = ((double)__PYX_NAN());
/* "facedist.pyx":119
* #with nogil:
* for ii in prange(nrow, nogil=True, schedule='static'):
* for jj in range(ii + 1, nrow): # <<<<<<<<<<<<<<
*
* with gil:
*/
__pyx_t_11 = __pyx_v_nrow;
for (__pyx_t_12 = (__pyx_v_ii + 1); __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
__pyx_v_jj = __pyx_t_12;
/* "facedist.pyx":121
* for jj in range(ii + 1, nrow):
*
* with gil: # <<<<<<<<<<<<<<
* rd = np.mean(cdist(A[ii], A[jj], metric='euclidean'))
*
*/
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
/*try:*/ {
/* "facedist.pyx":122
*
* with gil:
* rd = np.mean(cdist(A[ii], A[jj], metric='euclidean')) # <<<<<<<<<<<<<<
*
* D[ii, jj] = rd
*/
__pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 122, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_mean); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 122, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_cdist); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 122, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = __Pyx_GetItemInt(((PyObject *)__pyx_v_A), __pyx_v_ii, Py_ssize_t, 1, PyInt_FromSsize_t, 0, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 122, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_5 = __Pyx_GetItemInt(((PyObject *)__pyx_v_A), __pyx_v_jj, Py_ssize_t, 1, PyInt_FromSsize_t, 0, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 122, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 122, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_5);
__pyx_t_2 = 0;
__pyx_t_5 = 0;
__pyx_t_5 = PyDict_New(); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 122, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_5);
if (PyDict_SetItem(__pyx_t_5, __pyx_n_s_metric, __pyx_n_s_euclidean) < 0) __PYX_ERR(0, 122, __pyx_L15_error)
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 122, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = NULL;
if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
if (!__pyx_t_5) {
__pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 122, __pyx_L15_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_1);
} else {
__pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 122, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __pyx_t_5 = NULL;
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 122, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
}
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_13 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_13 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 122, __pyx_L15_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_rd = __pyx_t_13;
}
/* "facedist.pyx":121
* for jj in range(ii + 1, nrow):
*
* with gil: # <<<<<<<<<<<<<<
* rd = np.mean(cdist(A[ii], A[jj], metric='euclidean'))
*
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
goto __pyx_L16;
}
__pyx_L15_error: {
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
goto __pyx_L8_error;
}
__pyx_L16:;
}
}
/* "facedist.pyx":124
* rd = np.mean(cdist(A[ii], A[jj], metric='euclidean'))
*
* D[ii, jj] = rd # <<<<<<<<<<<<<<
* D[jj, ii] = rd # because D is symmetric
*
*/
__pyx_t_14 = __pyx_v_ii;
__pyx_t_15 = __pyx_v_jj;
*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_15, __pyx_pybuffernd_D.diminfo[1].strides) = __pyx_v_rd;
/* "facedist.pyx":125
*
* D[ii, jj] = rd
* D[jj, ii] = rd # because D is symmetric # <<<<<<<<<<<<<<
*
* return D
*/
__pyx_t_16 = __pyx_v_jj;
__pyx_t_17 = __pyx_v_ii;
*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_D.diminfo[1].strides) = __pyx_v_rd;
}
goto __pyx_L18;
__pyx_L8_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_exc_type)
#endif /* _OPENMP */
if (!__pyx_parallel_exc_type) {
__Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb);
__pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno;
__Pyx_GOTREF(__pyx_parallel_exc_type);
}
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_parallel_why = 4;
goto __pyx_L17;
__pyx_L17:;
#ifdef _OPENMP
#pragma omp critical(__pyx_parallel_lastprivates2)
#endif /* _OPENMP */
{
__pyx_parallel_temp0 = __pyx_v_ii;
__pyx_parallel_temp1 = __pyx_v_jj;
__pyx_parallel_temp2 = __pyx_v_rd;
}
__pyx_L18:;
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_why)
#endif /* _OPENMP */
}
}
#ifdef _OPENMP
Py_END_ALLOW_THREADS
#else
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
#endif /* _OPENMP */
/* Clean up any temporaries */
__Pyx_XDECREF(__pyx_t_1);
__pyx_t_1 = NULL;
__Pyx_XDECREF(__pyx_t_2);
__pyx_t_2 = NULL;
__Pyx_XDECREF(__pyx_t_3);
__pyx_t_3 = NULL;
__Pyx_XDECREF(__pyx_t_4);
__pyx_t_4 = NULL;
__Pyx_XDECREF(__pyx_t_5);
__pyx_t_5 = NULL;
__Pyx_XDECREF(__pyx_t_7);
__pyx_t_7 = NULL;
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
#ifndef _OPENMP
}
#endif /* _OPENMP */
}
}
if (__pyx_parallel_exc_type) {
/* This may have been overridden by a continue, break or return in another thread. Prefer the error. */
__pyx_parallel_why = 4;
}
if (__pyx_parallel_why) {
__pyx_v_ii = __pyx_parallel_temp0;
__pyx_v_jj = __pyx_parallel_temp1;
__pyx_v_rd = __pyx_parallel_temp2;
switch (__pyx_parallel_why) {
case 4:
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_GIVEREF(__pyx_parallel_exc_type);
__Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb);
__pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno;
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
goto __pyx_L4_error;
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "facedist.pyx":118
*
* #with nogil:
* for ii in prange(nrow, nogil=True, schedule='static'): # <<<<<<<<<<<<<<
* for jj in range(ii + 1, nrow):
*
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L4_error: {
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L1_error;
}
__pyx_L5:;
}
}
/* "facedist.pyx":127
* D[jj, ii] = rd # because D is symmetric
*
* return D # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_D));
__pyx_r = ((PyObject *)__pyx_v_D);
goto __pyx_L0;
/* "facedist.pyx":95
* # indices.
* @cython.wraparound(False)
* def mean_dist(np.ndarray A): # <<<<<<<<<<<<<<
*
* # declare C types for as many of our variables as possible. note that we
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_7);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_D.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("facedist.mean_dist", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_D.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_D);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "facedist.pyx":136
* # indices.
* @cython.wraparound(False)
* def meanmin_dist(np.ndarray A): # <<<<<<<<<<<<<<
*
* # declare C types for as many of our variables as possible. note that we
*/
/* Python wrapper */
static PyObject *__pyx_pw_8facedist_7meanmin_dist(PyObject *__pyx_self, PyObject *__pyx_v_A); /*proto*/
static PyMethodDef __pyx_mdef_8facedist_7meanmin_dist = {"meanmin_dist", (PyCFunction)__pyx_pw_8facedist_7meanmin_dist, METH_O, 0};
static PyObject *__pyx_pw_8facedist_7meanmin_dist(PyObject *__pyx_self, PyObject *__pyx_v_A) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("meanmin_dist (wrapper)", 0);
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_A), __pyx_ptype_5numpy_ndarray, 1, "A", 0))) __PYX_ERR(0, 136, __pyx_L1_error)
__pyx_r = __pyx_pf_8facedist_6meanmin_dist(__pyx_self, ((PyArrayObject *)__pyx_v_A));
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_8facedist_6meanmin_dist(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_A) {
Py_ssize_t __pyx_v_nrow;
CYTHON_UNUSED Py_ssize_t __pyx_v_ncol;
Py_ssize_t __pyx_v_ii;
Py_ssize_t __pyx_v_jj;
PyArrayObject *__pyx_v_D = 0;
PyArrayObject *__pyx_v_rD = 0;
double __pyx_v_rd;
PyObject *__pyx_v_mins = NULL;
__Pyx_LocalBuf_ND __pyx_pybuffernd_D;
__Pyx_Buffer __pyx_pybuffer_D;
__Pyx_LocalBuf_ND __pyx_pybuffernd_rD;
__Pyx_Buffer __pyx_pybuffer_rD;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
Py_ssize_t __pyx_t_6;
PyObject *__pyx_t_7 = NULL;
PyArrayObject *__pyx_t_8 = NULL;
Py_ssize_t __pyx_t_9;
Py_ssize_t __pyx_t_10;
Py_ssize_t __pyx_t_11;
Py_ssize_t __pyx_t_12;
PyArrayObject *__pyx_t_13 = NULL;
int __pyx_t_14;
PyObject *__pyx_t_15 = NULL;
PyObject *__pyx_t_16 = NULL;
PyObject *__pyx_t_17 = NULL;
double __pyx_t_18;
Py_ssize_t __pyx_t_19;
Py_ssize_t __pyx_t_20;
Py_ssize_t __pyx_t_21;
Py_ssize_t __pyx_t_22;
__Pyx_RefNannySetupContext("meanmin_dist", 0);
__pyx_pybuffer_D.pybuffer.buf = NULL;
__pyx_pybuffer_D.refcount = 0;
__pyx_pybuffernd_D.data = NULL;
__pyx_pybuffernd_D.rcbuffer = &__pyx_pybuffer_D;
__pyx_pybuffer_rD.pybuffer.buf = NULL;
__pyx_pybuffer_rD.refcount = 0;
__pyx_pybuffernd_rD.data = NULL;
__pyx_pybuffernd_rD.rcbuffer = &__pyx_pybuffer_rD;
/* "facedist.pyx":142
* cdef:
* # Py_ssize_t is just a special platform-specific type for indices
* Py_ssize_t nrow = A.shape[0] # <<<<<<<<<<<<<<
* Py_ssize_t ncol = A.shape[1]
* Py_ssize_t ii, jj, kk
*/
__pyx_v_nrow = (__pyx_v_A->dimensions[0]);
/* "facedist.pyx":143
* # Py_ssize_t is just a special platform-specific type for indices
* Py_ssize_t nrow = A.shape[0]
* Py_ssize_t ncol = A.shape[1] # <<<<<<<<<<<<<<
* Py_ssize_t ii, jj, kk
*
*/
__pyx_v_ncol = (__pyx_v_A->dimensions[1]);
/* "facedist.pyx":148
* # this line is particularly expensive, since creating a numpy array
* # involves unavoidable Python API overhead
* np.ndarray[np.float64_t, ndim=2] D = np.zeros((nrow, nrow), np.double) # <<<<<<<<<<<<<<
* np.ndarray[np.float64_t, ndim=2] rD
*
*/
__pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_nrow); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nrow); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_double); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = NULL;
__pyx_t_6 = 0;
if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
__pyx_t_6 = 1;
}
}
__pyx_t_7 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
if (__pyx_t_4) {
__Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_4); __pyx_t_4 = NULL;
}
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_6, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_6, __pyx_t_2);
__pyx_t_5 = 0;
__pyx_t_2 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 148, __pyx_L1_error)
__pyx_t_8 = ((PyArrayObject *)__pyx_t_1);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_D.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float64_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) {
__pyx_v_D = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_D.rcbuffer->pybuffer.buf = NULL;
__PYX_ERR(0, 148, __pyx_L1_error)
} else {__pyx_pybuffernd_D.diminfo[0].strides = __pyx_pybuffernd_D.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_D.diminfo[0].shape = __pyx_pybuffernd_D.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_D.diminfo[1].strides = __pyx_pybuffernd_D.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_D.diminfo[1].shape = __pyx_pybuffernd_D.rcbuffer->pybuffer.shape[1];
}
}
__pyx_t_8 = 0;
__pyx_v_D = ((PyArrayObject *)__pyx_t_1);
__pyx_t_1 = 0;
/* "facedist.pyx":157
*
* #with nogil:
* for ii in prange(nrow, nogil=True, schedule='static'): # <<<<<<<<<<<<<<
* for jj in range(ii + 1, nrow):
*
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
#endif
/*try:*/ {
__pyx_t_6 = __pyx_v_nrow;
if (1 == 0) abort();
{
Py_ssize_t __pyx_parallel_temp0 = 0xbad0bad0;
Py_ssize_t __pyx_parallel_temp1 = 0xbad0bad0;
double __pyx_parallel_temp2 = __PYX_NAN();
const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0;
PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL;
int __pyx_parallel_why;
__pyx_parallel_why = 0;
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_10 = (__pyx_t_6 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_10 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_11, __pyx_t_12, __pyx_t_14, __pyx_t_18, __pyx_t_19, __pyx_t_20, __pyx_t_21, __pyx_t_22) firstprivate(__pyx_t_1, __pyx_t_13, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_2, __pyx_t_3, __pyx_t_7) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
Py_BEGIN_ALLOW_THREADS
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_ii) lastprivate(__pyx_v_ii) lastprivate(__pyx_v_jj) lastprivate(__pyx_v_rd) schedule(static)
#endif /* _OPENMP */
for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_10; __pyx_t_9++){
if (__pyx_parallel_why < 2)
{
__pyx_v_ii = (Py_ssize_t)(0 + 1 * __pyx_t_9);
/* Initialize private variables to invalid values */
__pyx_v_jj = ((Py_ssize_t)0xbad0bad0);
__pyx_v_rd = ((double)__PYX_NAN());
/* "facedist.pyx":158
* #with nogil:
* for ii in prange(nrow, nogil=True, schedule='static'):
* for jj in range(ii + 1, nrow): # <<<<<<<<<<<<<<
*
* with gil:
*/
__pyx_t_11 = __pyx_v_nrow;
for (__pyx_t_12 = (__pyx_v_ii + 1); __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
__pyx_v_jj = __pyx_t_12;
/* "facedist.pyx":160
* for jj in range(ii + 1, nrow):
*
* with gil: # <<<<<<<<<<<<<<
* rD = cdist(A[ii], A[jj], metric='euclidean')
* # meanmin:
*/
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
/*try:*/ {
/* "facedist.pyx":161
*
* with gil:
* rD = cdist(A[ii], A[jj], metric='euclidean') # <<<<<<<<<<<<<<
* # meanmin:
* # get min of every coloumn/row, mean from these
*/
__pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_cdist); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 161, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = __Pyx_GetItemInt(((PyObject *)__pyx_v_A), __pyx_v_ii, Py_ssize_t, 1, PyInt_FromSsize_t, 0, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 161, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_7 = __Pyx_GetItemInt(((PyObject *)__pyx_v_A), __pyx_v_jj, Py_ssize_t, 1, PyInt_FromSsize_t, 0, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 161, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 161, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_7);
__pyx_t_3 = 0;
__pyx_t_7 = 0;
__pyx_t_7 = PyDict_New(); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 161, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_7);
if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_metric, __pyx_n_s_euclidean) < 0) __PYX_ERR(0, 161, __pyx_L15_error)
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_2, __pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 161, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 161, __pyx_L15_error)
__pyx_t_13 = ((PyArrayObject *)__pyx_t_3);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_rD.rcbuffer->pybuffer);
__pyx_t_14 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_rD.rcbuffer->pybuffer, (PyObject*)__pyx_t_13, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float64_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack);
if (unlikely(__pyx_t_14 < 0)) {
PyErr_Fetch(&__pyx_t_15, &__pyx_t_16, &__pyx_t_17);
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_rD.rcbuffer->pybuffer, (PyObject*)__pyx_v_rD, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float64_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {
Py_XDECREF(__pyx_t_15); Py_XDECREF(__pyx_t_16); Py_XDECREF(__pyx_t_17);
__Pyx_RaiseBufferFallbackError();
} else {
PyErr_Restore(__pyx_t_15, __pyx_t_16, __pyx_t_17);
}
}
__pyx_pybuffernd_rD.diminfo[0].strides = __pyx_pybuffernd_rD.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_rD.diminfo[0].shape = __pyx_pybuffernd_rD.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_rD.diminfo[1].strides = __pyx_pybuffernd_rD.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_rD.diminfo[1].shape = __pyx_pybuffernd_rD.rcbuffer->pybuffer.shape[1];
if (unlikely(__pyx_t_14 < 0)) __PYX_ERR(0, 161, __pyx_L15_error)
}
__pyx_t_13 = 0;
__Pyx_XDECREF_SET(__pyx_v_rD, ((PyArrayObject *)__pyx_t_3));
__pyx_t_3 = 0;
/* "facedist.pyx":164
* # meanmin:
* # get min of every coloumn/row, mean from these
* mins = rD.min(axis=1) # <<<<<<<<<<<<<<
* rd = mins.mean()
*
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_rD), __pyx_n_s_min); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 164, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_7 = PyDict_New(); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 164, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_7);
if (PyDict_SetItem(__pyx_t_7, __pyx_n_s_axis, __pyx_int_1) < 0) __PYX_ERR(0, 164, __pyx_L15_error)
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_empty_tuple, __pyx_t_7); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 164, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF_SET(__pyx_v_mins, __pyx_t_2);
__pyx_t_2 = 0;
/* "facedist.pyx":165
* # get min of every coloumn/row, mean from these
* mins = rD.min(axis=1)
* rd = mins.mean() # <<<<<<<<<<<<<<
*
* D[ii, jj] = rd
*/
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_mins, __pyx_n_s_mean); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 165, __pyx_L15_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_3 = NULL;
if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_3 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_3)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
if (__pyx_t_3) {
__pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 165, __pyx_L15_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else {
__pyx_t_2 = __Pyx_PyObject_CallNoArg(__pyx_t_7); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 165, __pyx_L15_error)
}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_18 = __pyx_PyFloat_AsDouble(__pyx_t_2); if (unlikely((__pyx_t_18 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 165, __pyx_L15_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v_rd = __pyx_t_18;
}
/* "facedist.pyx":160
* for jj in range(ii + 1, nrow):
*
* with gil: # <<<<<<<<<<<<<<
* rD = cdist(A[ii], A[jj], metric='euclidean')
* # meanmin:
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
goto __pyx_L16;
}
__pyx_L15_error: {
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
goto __pyx_L8_error;
}
__pyx_L16:;
}
}
/* "facedist.pyx":167
* rd = mins.mean()
*
* D[ii, jj] = rd # <<<<<<<<<<<<<<
* D[jj, ii] = rd # because D is symmetric
*
*/
__pyx_t_19 = __pyx_v_ii;
__pyx_t_20 = __pyx_v_jj;
*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_19, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_20, __pyx_pybuffernd_D.diminfo[1].strides) = __pyx_v_rd;
/* "facedist.pyx":168
*
* D[ii, jj] = rd
* D[jj, ii] = rd # because D is symmetric # <<<<<<<<<<<<<<
*
* return D
*/
__pyx_t_21 = __pyx_v_jj;
__pyx_t_22 = __pyx_v_ii;
*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_21, __pyx_pybuffernd_D.diminfo[0].strides, __pyx_t_22, __pyx_pybuffernd_D.diminfo[1].strides) = __pyx_v_rd;
}
goto __pyx_L18;
__pyx_L8_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_exc_type)
#endif /* _OPENMP */
if (!__pyx_parallel_exc_type) {
__Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb);
__pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno;
__Pyx_GOTREF(__pyx_parallel_exc_type);
}
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_parallel_why = 4;
goto __pyx_L17;
__pyx_L17:;
#ifdef _OPENMP
#pragma omp critical(__pyx_parallel_lastprivates3)
#endif /* _OPENMP */
{
__pyx_parallel_temp0 = __pyx_v_ii;
__pyx_parallel_temp1 = __pyx_v_jj;
__pyx_parallel_temp2 = __pyx_v_rd;
}
__pyx_L18:;
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_why)
#endif /* _OPENMP */
}
}
#ifdef _OPENMP
Py_END_ALLOW_THREADS
#else
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
#endif /* _OPENMP */
/* Clean up any temporaries */
__Pyx_XDECREF(__pyx_t_1);
__pyx_t_1 = NULL;
__Pyx_XDECREF(((PyObject *)__pyx_t_13));
__pyx_t_13 = NULL;
__Pyx_XDECREF(__pyx_t_15);
__pyx_t_15 = NULL;
__Pyx_XDECREF(__pyx_t_16);
__pyx_t_16 = NULL;
__Pyx_XDECREF(__pyx_t_17);
__pyx_t_17 = NULL;
__Pyx_XDECREF(__pyx_t_2);
__pyx_t_2 = NULL;
__Pyx_XDECREF(__pyx_t_3);
__pyx_t_3 = NULL;
__Pyx_XDECREF(__pyx_t_7);
__pyx_t_7 = NULL;
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
#ifndef _OPENMP
}
#endif /* _OPENMP */
}
}
if (__pyx_parallel_exc_type) {
/* This may have been overridden by a continue, break or return in another thread. Prefer the error. */
__pyx_parallel_why = 4;
}
if (__pyx_parallel_why) {
__pyx_v_ii = __pyx_parallel_temp0;
__pyx_v_jj = __pyx_parallel_temp1;
__pyx_v_rd = __pyx_parallel_temp2;
switch (__pyx_parallel_why) {
case 4:
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_GIVEREF(__pyx_parallel_exc_type);
__Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb);
__pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno;
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
goto __pyx_L4_error;
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "facedist.pyx":157
*
* #with nogil:
* for ii in prange(nrow, nogil=True, schedule='static'): # <<<<<<<<<<<<<<
* for jj in range(ii + 1, nrow):
*
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L4_error: {
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L1_error;
}
__pyx_L5:;
}
}
/* "facedist.pyx":170
* D[jj, ii] = rd # because D is symmetric
*
* return D # <<<<<<<<<<<<<<
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_D));
__pyx_r = ((PyObject *)__pyx_v_D);
goto __pyx_L0;
/* "facedist.pyx":136
* # indices.
* @cython.wraparound(False)
* def meanmin_dist(np.ndarray A): # <<<<<<<<<<<<<<
*
* # declare C types for as many of our variables as possible. note that we
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_7);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_rD.rcbuffer->pybuffer);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("facedist.meanmin_dist", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_D.rcbuffer->pybuffer);
__Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_rD.rcbuffer->pybuffer);
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_D);
__Pyx_XDECREF((PyObject *)__pyx_v_rD);
__Pyx_XDECREF(__pyx_v_mins);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":197
* # experimental exception made for __getbuffer__ and __releasebuffer__
* # -- the details of this may change.
* def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fullfill the PEP.
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_copy_shape;
int __pyx_v_i;
int __pyx_v_ndim;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
int __pyx_v_t;
char *__pyx_v_f;
PyArray_Descr *__pyx_v_descr = 0;
int __pyx_v_offset;
int __pyx_v_hasfields;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
char *__pyx_t_7;
__Pyx_RefNannySetupContext("__getbuffer__", 0);
if (__pyx_v_info != NULL) {
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":203
* # of flags
*
* if info == NULL: return # <<<<<<<<<<<<<<
*
* cdef int copy_shape, i, ndim
*/
__pyx_t_1 = ((__pyx_v_info == NULL) != 0);
if (__pyx_t_1) {
__pyx_r = 0;
goto __pyx_L0;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":206
*
* cdef int copy_shape, i, ndim
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
*
*/
__pyx_v_endian_detector = 1;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":207
* cdef int copy_shape, i, ndim
* cdef int endian_detector = 1
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
*
* ndim = PyArray_NDIM(self)
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":209
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
*
* ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<<
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_v_ndim = PyArray_NDIM(__pyx_v_self);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":211
* ndim = PyArray_NDIM(self)
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* copy_shape = 1
* else:
*/
__pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
if (__pyx_t_1) {
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":212
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* copy_shape = 1 # <<<<<<<<<<<<<<
* else:
* copy_shape = 0
*/
__pyx_v_copy_shape = 1;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":211
* ndim = PyArray_NDIM(self)
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* copy_shape = 1
* else:
*/
goto __pyx_L4;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":214
* copy_shape = 1
* else:
* copy_shape = 0 # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
*/
/*else*/ {
__pyx_v_copy_shape = 0;
}
__pyx_L4:;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":216
* copy_shape = 0
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
__pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L6_bool_binop_done;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":217
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not C contiguous")
*
*/
__pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L6_bool_binop_done:;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":216
* copy_shape = 0
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
if (__pyx_t_1) {
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 218, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 218, __pyx_L1_error)
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":216
* copy_shape = 0
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":220
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
__pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L9_bool_binop_done;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":221
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not Fortran contiguous")
*
*/
__pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L9_bool_binop_done:;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":220
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
if (__pyx_t_1) {
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 222, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 222, __pyx_L1_error)
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":220
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":224
* raise ValueError(u"ndarray is not Fortran contiguous")
*
* info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<<
* info.ndim = ndim
* if copy_shape:
*/
__pyx_v_info->buf = PyArray_DATA(__pyx_v_self);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":225
*
* info.buf = PyArray_DATA(self)
* info.ndim = ndim # <<<<<<<<<<<<<<
* if copy_shape:
* # Allocate new buffer for strides and shape info.
*/
__pyx_v_info->ndim = __pyx_v_ndim;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":226
* info.buf = PyArray_DATA(self)
* info.ndim = ndim
* if copy_shape: # <<<<<<<<<<<<<<
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
*/
__pyx_t_1 = (__pyx_v_copy_shape != 0);
if (__pyx_t_1) {
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":229
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) # <<<<<<<<<<<<<<
* info.shape = info.strides + ndim
* for i in range(ndim):
*/
__pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2)));
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":230
* # This is allocated as one block, strides first.
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
* info.shape = info.strides + ndim # <<<<<<<<<<<<<<
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
*/
__pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":231
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
* info.shape = info.strides + ndim
* for i in range(ndim): # <<<<<<<<<<<<<<
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i]
*/
__pyx_t_4 = __pyx_v_ndim;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":232
* info.shape = info.strides + ndim
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<<
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
*/
(__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":233
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<<
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
*/
(__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]);
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":226
* info.buf = PyArray_DATA(self)
* info.ndim = ndim
* if copy_shape: # <<<<<<<<<<<<<<
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
*/
goto __pyx_L11;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":235
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<<
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL
*/
/*else*/ {
__pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self));
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":236
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
* info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
*/
__pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self));
}
__pyx_L11:;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":237
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self)
*/
__pyx_v_info->suboffsets = NULL;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":238
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<<
* info.readonly = not PyArray_ISWRITEABLE(self)
*
*/
__pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":239
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<<
*
* cdef int t
*/
__pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0));
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":242
*
* cdef int t
* cdef char* f = NULL # <<<<<<<<<<<<<<
* cdef dtype descr = self.descr
* cdef int offset
*/
__pyx_v_f = NULL;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":243
* cdef int t
* cdef char* f = NULL
* cdef dtype descr = self.descr # <<<<<<<<<<<<<<
* cdef int offset
*
*/
__pyx_t_3 = ((PyObject *)__pyx_v_self->descr);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_descr = ((PyArray_Descr *)__pyx_t_3);
__pyx_t_3 = 0;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":246
* cdef int offset
*
* cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<<
*
* if not hasfields and not copy_shape:
*/
__pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":248
* cdef bint hasfields = PyDataType_HASFIELDS(descr)
*
* if not hasfields and not copy_shape: # <<<<<<<<<<<<<<
* # do not call releasebuffer
* info.obj = None
*/
__pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L15_bool_binop_done;
}
__pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L15_bool_binop_done:;
if (__pyx_t_1) {
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":250
* if not hasfields and not copy_shape:
* # do not call releasebuffer
* info.obj = None # <<<<<<<<<<<<<<
* else:
* # need to call releasebuffer
*/
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = Py_None;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":248
* cdef bint hasfields = PyDataType_HASFIELDS(descr)
*
* if not hasfields and not copy_shape: # <<<<<<<<<<<<<<
* # do not call releasebuffer
* info.obj = None
*/
goto __pyx_L14;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":253
* else:
* # need to call releasebuffer
* info.obj = self # <<<<<<<<<<<<<<
*
* if not hasfields:
*/
/*else*/ {
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
}
__pyx_L14:;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":255
* info.obj = self
*
* if not hasfields: # <<<<<<<<<<<<<<
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
*/
__pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0);
if (__pyx_t_1) {
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":256
*
* if not hasfields:
* t = descr.type_num # <<<<<<<<<<<<<<
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
*/
__pyx_t_4 = __pyx_v_descr->type_num;
__pyx_v_t = __pyx_t_4;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257
* if not hasfields:
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0);
if (!__pyx_t_2) {
goto __pyx_L20_next_or;
} else {
}
__pyx_t_2 = (__pyx_v_little_endian != 0);
if (!__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L19_bool_binop_done;
}
__pyx_L20_next_or:;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":258
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
*/
__pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L19_bool_binop_done;
}
__pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L19_bool_binop_done:;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257
* if not hasfields:
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
if (__pyx_t_1) {
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":259
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 259, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 259, __pyx_L1_error)
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257
* if not hasfields:
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
*/
switch (__pyx_v_t) {
case NPY_BYTE:
__pyx_v_f = ((char *)"b");
break;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":261
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
*/
case NPY_UBYTE:
__pyx_v_f = ((char *)"B");
break;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":262
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
*/
case NPY_SHORT:
__pyx_v_f = ((char *)"h");
break;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":263
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
*/
case NPY_USHORT:
__pyx_v_f = ((char *)"H");
break;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":264
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
*/
case NPY_INT:
__pyx_v_f = ((char *)"i");
break;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":265
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
*/
case NPY_UINT:
__pyx_v_f = ((char *)"I");
break;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":266
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
*/
case NPY_LONG:
__pyx_v_f = ((char *)"l");
break;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":267
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
*/
case NPY_ULONG:
__pyx_v_f = ((char *)"L");
break;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":268
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
*/
case NPY_LONGLONG:
__pyx_v_f = ((char *)"q");
break;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":269
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
*/
case NPY_ULONGLONG:
__pyx_v_f = ((char *)"Q");
break;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":270
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
*/
case NPY_FLOAT:
__pyx_v_f = ((char *)"f");
break;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":271
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
*/
case NPY_DOUBLE:
__pyx_v_f = ((char *)"d");
break;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":272
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
*/
case NPY_LONGDOUBLE:
__pyx_v_f = ((char *)"g");
break;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":273
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
*/
case NPY_CFLOAT:
__pyx_v_f = ((char *)"Zf");
break;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O"
*/
case NPY_CDOUBLE:
__pyx_v_f = ((char *)"Zd");
break;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":275
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f = "O"
* else:
*/
case NPY_CLONGDOUBLE:
__pyx_v_f = ((char *)"Zg");
break;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":276
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
case NPY_OBJECT:
__pyx_v_f = ((char *)"O");
break;
default:
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":278
* elif t == NPY_OBJECT: f = "O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* info.format = f
* return
*/
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 278, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 278, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 278, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6);
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 278, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(1, 278, __pyx_L1_error)
break;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":279
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f # <<<<<<<<<<<<<<
* return
* else:
*/
__pyx_v_info->format = __pyx_v_f;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":280
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f
* return # <<<<<<<<<<<<<<
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
*/
__pyx_r = 0;
goto __pyx_L0;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":255
* info.obj = self
*
* if not hasfields: # <<<<<<<<<<<<<<
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
*/
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":282
* return
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<<
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0
*/
/*else*/ {
__pyx_v_info->format = ((char *)malloc(0xFF));
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":283
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
* info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<<
* offset = 0
* f = _util_dtypestring(descr, info.format + 1,
*/
(__pyx_v_info->format[0]) = '^';
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":284
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0 # <<<<<<<<<<<<<<
* f = _util_dtypestring(descr, info.format + 1,
* info.format + _buffer_format_string_len,
*/
__pyx_v_offset = 0;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":285
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0
* f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<<
* info.format + _buffer_format_string_len,
* &offset)
*/
__pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == NULL)) __PYX_ERR(1, 285, __pyx_L1_error)
__pyx_v_f = __pyx_t_7;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":288
* info.format + _buffer_format_string_len,
* &offset)
* f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<<
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
*/
(__pyx_v_f[0]) = '\x00';
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":197
* # experimental exception made for __getbuffer__ and __releasebuffer__
* # -- the details of this may change.
* def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fullfill the PEP.
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(Py_None);
__Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
}
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_descr);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":290
* f[0] = c'\0' # Terminate format string
*
* def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
*/
/* Python wrapper */
static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/
static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0);
__pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__releasebuffer__", 0);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<<
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0);
if (__pyx_t_1) {
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":292
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format) # <<<<<<<<<<<<<<
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* stdlib.free(info.strides)
*/
free(__pyx_v_info->format);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<<
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":293
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* stdlib.free(info.strides)
* # info.shape was stored after info.strides in the same block
*/
__pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
if (__pyx_t_1) {
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":294
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* stdlib.free(info.strides) # <<<<<<<<<<<<<<
* # info.shape was stored after info.strides in the same block
*
*/
free(__pyx_v_info->strides);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":293
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* stdlib.free(info.strides)
* # info.shape was stored after info.strides in the same block
*/
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":290
* f[0] = c'\0' # Terminate format string
*
* def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":770
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771
*
* cdef inline object PyArray_MultiIterNew1(a):
* return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew2(a, b):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 771, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":770
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":773
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774
*
* cdef inline object PyArray_MultiIterNew2(a, b):
* return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 774, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":773
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":776
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 777, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":776
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":779
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 780, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":779
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":782
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<<
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 783, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":782
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":785
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
* # Recursive utility function used in __getbuffer__ to get format
* # string. The new location in the format string is returned.
*/
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) {
PyArray_Descr *__pyx_v_child = 0;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
PyObject *__pyx_v_fields = 0;
PyObject *__pyx_v_childname = NULL;
PyObject *__pyx_v_new_offset = NULL;
PyObject *__pyx_v_t = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
long __pyx_t_8;
char *__pyx_t_9;
__Pyx_RefNannySetupContext("_util_dtypestring", 0);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":790
*
* cdef dtype child
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
* cdef tuple fields
*/
__pyx_v_endian_detector = 1;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":791
* cdef dtype child
* cdef int endian_detector = 1
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
* cdef tuple fields
*
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":794
* cdef tuple fields
*
* for childname in descr.names: # <<<<<<<<<<<<<<
* fields = descr.fields[childname]
* child, new_offset = fields
*/
if (unlikely(__pyx_v_descr->names == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
__PYX_ERR(1, 794, __pyx_L1_error)
}
__pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
for (;;) {
if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 794, __pyx_L1_error)
#else
__pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 794, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
#endif
__Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3);
__pyx_t_3 = 0;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":795
*
* for childname in descr.names:
* fields = descr.fields[childname] # <<<<<<<<<<<<<<
* child, new_offset = fields
*
*/
if (unlikely(__pyx_v_descr->fields == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 795, __pyx_L1_error)
}
__pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 795, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 795, __pyx_L1_error)
__Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3));
__pyx_t_3 = 0;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":796
* for childname in descr.names:
* fields = descr.fields[childname]
* child, new_offset = fields # <<<<<<<<<<<<<<
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
*/
if (likely(__pyx_v_fields != Py_None)) {
PyObject* sequence = __pyx_v_fields;
#if CYTHON_COMPILING_IN_CPYTHON
Py_ssize_t size = Py_SIZE(sequence);
#else
Py_ssize_t size = PySequence_Size(sequence);
#endif
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 796, __pyx_L1_error)
}
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
#else
__pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 796, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 796, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
#endif
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 796, __pyx_L1_error)
}
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 796, __pyx_L1_error)
__Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3));
__pyx_t_3 = 0;
__Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4);
__pyx_t_4 = 0;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":798
* child, new_offset = fields
*
* if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
*/
__pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 798, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 798, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 798, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0);
if (__pyx_t_6) {
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == c'>' and little_endian) or
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 799, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 799, __pyx_L1_error)
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":798
* child, new_offset = fields
*
* if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
*/
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0);
if (!__pyx_t_7) {
goto __pyx_L8_next_or;
} else {
}
__pyx_t_7 = (__pyx_v_little_endian != 0);
if (!__pyx_t_7) {
} else {
__pyx_t_6 = __pyx_t_7;
goto __pyx_L7_bool_binop_done;
}
__pyx_L8_next_or:;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802
*
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* # One could encode it in the format string and have Cython
*/
__pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0);
if (__pyx_t_7) {
} else {
__pyx_t_6 = __pyx_t_7;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0);
__pyx_t_6 = __pyx_t_7;
__pyx_L7_bool_binop_done:;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
if (__pyx_t_6) {
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* # One could encode it in the format string and have Cython
* # complain instead, BUT: < and > in format strings also imply
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 803, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 803, __pyx_L1_error)
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":813
*
* # Output padding bytes
* while offset[0] < new_offset: # <<<<<<<<<<<<<<
* f[0] = 120 # "x"; pad byte
* f += 1
*/
while (1) {
__pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 813, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 813, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 813, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (!__pyx_t_6) break;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":814
* # Output padding bytes
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<<
* f += 1
* offset[0] += 1
*/
(__pyx_v_f[0]) = 0x78;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":815
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte
* f += 1 # <<<<<<<<<<<<<<
* offset[0] += 1
*
*/
__pyx_v_f = (__pyx_v_f + 1);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":816
* f[0] = 120 # "x"; pad byte
* f += 1
* offset[0] += 1 # <<<<<<<<<<<<<<
*
* offset[0] += child.itemsize
*/
__pyx_t_8 = 0;
(__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1);
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":818
* offset[0] += 1
*
* offset[0] += child.itemsize # <<<<<<<<<<<<<<
*
* if not PyDataType_HASFIELDS(child):
*/
__pyx_t_8 = 0;
(__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":820
* offset[0] += child.itemsize
*
* if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<<
* t = child.type_num
* if end - f < 5:
*/
__pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0);
if (__pyx_t_6) {
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":821
*
* if not PyDataType_HASFIELDS(child):
* t = child.type_num # <<<<<<<<<<<<<<
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.")
*/
__pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 821, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4);
__pyx_t_4 = 0;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":822
* if not PyDataType_HASFIELDS(child):
* t = child.type_num
* if end - f < 5: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short.")
*
*/
__pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0);
if (__pyx_t_6) {
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 823, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__PYX_ERR(1, 823, __pyx_L1_error)
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":822
* if not PyDataType_HASFIELDS(child):
* t = child.type_num
* if end - f < 5: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short.")
*
*/
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826
*
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 826, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 826, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 826, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 98;
goto __pyx_L15;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":827
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 827, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 827, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 827, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 66;
goto __pyx_L15;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":828
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 828, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 828, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 828, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x68;
goto __pyx_L15;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":829
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 829, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 829, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 829, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 72;
goto __pyx_L15;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":830
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 830, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 830, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 830, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x69;
goto __pyx_L15;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":831
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 831, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 831, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 831, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 73;
goto __pyx_L15;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":832
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 832, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 832, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 832, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x6C;
goto __pyx_L15;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":833
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 833, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 833, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 833, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 76;
goto __pyx_L15;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":834
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 834, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 834, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 834, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x71;
goto __pyx_L15;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":835
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 835, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 835, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 835, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 81;
goto __pyx_L15;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":836
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 836, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 836, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 836, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x66;
goto __pyx_L15;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":837
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 837, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 837, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 837, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x64;
goto __pyx_L15;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":838
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 838, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 838, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 838, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 0x67;
goto __pyx_L15;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":839
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 839, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 839, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 839, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 0x66;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":840
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O"
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 840, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 840, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 840, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 0x64;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":841
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
*/
__pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 841, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 841, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 841, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 0x67;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":842
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
__pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 842, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 842, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 842, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 79;
goto __pyx_L15;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":844
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* f += 1
* else:
*/
/*else*/ {
__pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 844, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 844, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 844, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 844, __pyx_L1_error)
}
__pyx_L15:;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":845
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* f += 1 # <<<<<<<<<<<<<<
* else:
* # Cython ignores struct boundary information ("T{...}"),
*/
__pyx_v_f = (__pyx_v_f + 1);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":820
* offset[0] += child.itemsize
*
* if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<<
* t = child.type_num
* if end - f < 5:
*/
goto __pyx_L13;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":849
* # Cython ignores struct boundary information ("T{...}"),
* # so don't output it
* f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<<
* return f
*
*/
/*else*/ {
__pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == NULL)) __PYX_ERR(1, 849, __pyx_L1_error)
__pyx_v_f = __pyx_t_9;
}
__pyx_L13:;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":794
* cdef tuple fields
*
* for childname in descr.names: # <<<<<<<<<<<<<<
* fields = descr.fields[childname]
* child, new_offset = fields
*/
}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":850
* # so don't output it
* f = _util_dtypestring(child, f, end, offset)
* return f # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_f;
goto __pyx_L0;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":785
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
* # Recursive utility function used in __getbuffer__ to get format
* # string. The new location in the format string is returned.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_child);
__Pyx_XDECREF(__pyx_v_fields);
__Pyx_XDECREF(__pyx_v_childname);
__Pyx_XDECREF(__pyx_v_new_offset);
__Pyx_XDECREF(__pyx_v_t);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966
*
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* cdef PyObject* baseptr
* if base is None:
*/
static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) {
PyObject *__pyx_v_baseptr;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
__Pyx_RefNannySetupContext("set_array_base", 0);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":968
* cdef inline void set_array_base(ndarray arr, object base):
* cdef PyObject* baseptr
* if base is None: # <<<<<<<<<<<<<<
* baseptr = NULL
* else:
*/
__pyx_t_1 = (__pyx_v_base == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969
* cdef PyObject* baseptr
* if base is None:
* baseptr = NULL # <<<<<<<<<<<<<<
* else:
* Py_INCREF(base) # important to do this before decref below!
*/
__pyx_v_baseptr = NULL;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":968
* cdef inline void set_array_base(ndarray arr, object base):
* cdef PyObject* baseptr
* if base is None: # <<<<<<<<<<<<<<
* baseptr = NULL
* else:
*/
goto __pyx_L3;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":971
* baseptr = NULL
* else:
* Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<<
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base)
*/
/*else*/ {
Py_INCREF(__pyx_v_base);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":972
* else:
* Py_INCREF(base) # important to do this before decref below!
* baseptr = <PyObject*>base # <<<<<<<<<<<<<<
* Py_XDECREF(arr.base)
* arr.base = baseptr
*/
__pyx_v_baseptr = ((PyObject *)__pyx_v_base);
}
__pyx_L3:;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":973
* Py_INCREF(base) # important to do this before decref below!
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base) # <<<<<<<<<<<<<<
* arr.base = baseptr
*
*/
Py_XDECREF(__pyx_v_arr->base);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":974
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base)
* arr.base = baseptr # <<<<<<<<<<<<<<
*
* cdef inline object get_array_base(ndarray arr):
*/
__pyx_v_arr->base = __pyx_v_baseptr;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966
*
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* cdef PyObject* baseptr
* if base is None:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976
* arr.base = baseptr
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* if arr.base is NULL:
* return None
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("get_array_base", 0);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":977
*
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL: # <<<<<<<<<<<<<<
* return None
* else:
*/
__pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0);
if (__pyx_t_1) {
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":978
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL:
* return None # <<<<<<<<<<<<<<
* else:
* return <object>arr.base
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(Py_None);
__pyx_r = Py_None;
goto __pyx_L0;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":977
*
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL: # <<<<<<<<<<<<<<
* return None
* else:
*/
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":980
* return None
* else:
* return <object>arr.base # <<<<<<<<<<<<<<
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_arr->base));
__pyx_r = ((PyObject *)__pyx_v_arr->base);
goto __pyx_L0;
}
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976
* arr.base = baseptr
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* if arr.base is NULL:
* return None
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyMethodDef __pyx_methods[] = {
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef __pyx_moduledef = {
#if PY_VERSION_HEX < 0x03020000
{ PyObject_HEAD_INIT(NULL) NULL, 0, NULL },
#else
PyModuleDef_HEAD_INIT,
#endif
"facedist",
0, /* m_doc */
-1, /* m_size */
__pyx_methods /* m_methods */,
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_n_s_A, __pyx_k_A, sizeof(__pyx_k_A), 0, 0, 1, 1},
{&__pyx_n_s_D, __pyx_k_D, sizeof(__pyx_k_D), 0, 0, 1, 1},
{&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0},
{&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0},
{&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0},
{&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_n_s_axis, __pyx_k_axis, sizeof(__pyx_k_axis), 0, 0, 1, 1},
{&__pyx_n_s_cdist, __pyx_k_cdist, sizeof(__pyx_k_cdist), 0, 0, 1, 1},
{&__pyx_n_s_double, __pyx_k_double, sizeof(__pyx_k_double), 0, 0, 1, 1},
{&__pyx_n_s_end, __pyx_k_end, sizeof(__pyx_k_end), 0, 0, 1, 1},
{&__pyx_n_s_euclidean, __pyx_k_euclidean, sizeof(__pyx_k_euclidean), 0, 0, 1, 1},
{&__pyx_n_s_facedist, __pyx_k_facedist, sizeof(__pyx_k_facedist), 0, 0, 1, 1},
{&__pyx_n_s_file, __pyx_k_file, sizeof(__pyx_k_file), 0, 0, 1, 1},
{&__pyx_kp_s_home_mlode_BA_Moritz_Implementa, __pyx_k_home_mlode_BA_Moritz_Implementa, sizeof(__pyx_k_home_mlode_BA_Moritz_Implementa), 0, 0, 1, 0},
{&__pyx_n_s_ii, __pyx_k_ii, sizeof(__pyx_k_ii), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_jj, __pyx_k_jj, sizeof(__pyx_k_jj), 0, 0, 1, 1},
{&__pyx_n_s_kk, __pyx_k_kk, sizeof(__pyx_k_kk), 0, 0, 1, 1},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_max, __pyx_k_max, sizeof(__pyx_k_max), 0, 0, 1, 1},
{&__pyx_n_s_max_dist, __pyx_k_max_dist, sizeof(__pyx_k_max_dist), 0, 0, 1, 1},
{&__pyx_n_s_mean, __pyx_k_mean, sizeof(__pyx_k_mean), 0, 0, 1, 1},
{&__pyx_n_s_mean_dist, __pyx_k_mean_dist, sizeof(__pyx_k_mean_dist), 0, 0, 1, 1},
{&__pyx_n_s_meanmin_dist, __pyx_k_meanmin_dist, sizeof(__pyx_k_meanmin_dist), 0, 0, 1, 1},
{&__pyx_n_s_metric, __pyx_k_metric, sizeof(__pyx_k_metric), 0, 0, 1, 1},
{&__pyx_n_s_min, __pyx_k_min, sizeof(__pyx_k_min), 0, 0, 1, 1},
{&__pyx_n_s_min_dist, __pyx_k_min_dist, sizeof(__pyx_k_min_dist), 0, 0, 1, 1},
{&__pyx_n_s_mins, __pyx_k_mins, sizeof(__pyx_k_mins), 0, 0, 1, 1},
{&__pyx_n_s_ncol, __pyx_k_ncol, sizeof(__pyx_k_ncol), 0, 0, 1, 1},
{&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0},
{&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0},
{&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1},
{&__pyx_n_s_nrow, __pyx_k_nrow, sizeof(__pyx_k_nrow), 0, 0, 1, 1},
{&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
{&__pyx_n_s_print, __pyx_k_print, sizeof(__pyx_k_print), 0, 0, 1, 1},
{&__pyx_n_s_rD, __pyx_k_rD, sizeof(__pyx_k_rD), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_rd, __pyx_k_rd, sizeof(__pyx_k_rd), 0, 0, 1, 1},
{&__pyx_n_s_scipy_spatial_distance, __pyx_k_scipy_spatial_distance, sizeof(__pyx_k_scipy_spatial_distance), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0},
{&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 40, __pyx_L1_error)
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 218, __pyx_L1_error)
__pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 799, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 218, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple_);
__Pyx_GIVEREF(__pyx_tuple_);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 222, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__2);
__Pyx_GIVEREF(__pyx_tuple__2);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":259
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 259, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__3);
__Pyx_GIVEREF(__pyx_tuple__3);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == c'>' and little_endian) or
*/
__pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 799, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* # One could encode it in the format string and have Cython
* # complain instead, BUT: < and > in format strings also imply
*/
__pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 803, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 823, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "facedist.pyx":19
* # indices.
* @cython.wraparound(False)
* def max_dist(np.ndarray A): # <<<<<<<<<<<<<<
*
* # declare C types for as many of our variables as possible. note that we
*/
__pyx_tuple__7 = PyTuple_Pack(8, __pyx_n_s_A, __pyx_n_s_nrow, __pyx_n_s_ncol, __pyx_n_s_ii, __pyx_n_s_jj, __pyx_n_s_kk, __pyx_n_s_D, __pyx_n_s_rd); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(0, 19, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__7);
__Pyx_GIVEREF(__pyx_tuple__7);
__pyx_codeobj__8 = (PyObject*)__Pyx_PyCode_New(1, 0, 8, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__7, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_mlode_BA_Moritz_Implementa, __pyx_n_s_max_dist, 19, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__8)) __PYX_ERR(0, 19, __pyx_L1_error)
/* "facedist.pyx":57
* # indices.
* @cython.wraparound(False)
* def min_dist(np.ndarray A): # <<<<<<<<<<<<<<
*
* # declare C types for as many of our variables as possible. note that we
*/
__pyx_tuple__9 = PyTuple_Pack(8, __pyx_n_s_A, __pyx_n_s_nrow, __pyx_n_s_ncol, __pyx_n_s_ii, __pyx_n_s_jj, __pyx_n_s_kk, __pyx_n_s_D, __pyx_n_s_rd); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(0, 57, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
__pyx_codeobj__10 = (PyObject*)__Pyx_PyCode_New(1, 0, 8, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__9, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_mlode_BA_Moritz_Implementa, __pyx_n_s_min_dist, 57, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__10)) __PYX_ERR(0, 57, __pyx_L1_error)
/* "facedist.pyx":95
* # indices.
* @cython.wraparound(False)
* def mean_dist(np.ndarray A): # <<<<<<<<<<<<<<
*
* # declare C types for as many of our variables as possible. note that we
*/
__pyx_tuple__11 = PyTuple_Pack(8, __pyx_n_s_A, __pyx_n_s_nrow, __pyx_n_s_ncol, __pyx_n_s_ii, __pyx_n_s_jj, __pyx_n_s_kk, __pyx_n_s_D, __pyx_n_s_rd); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(0, 95, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__11);
__Pyx_GIVEREF(__pyx_tuple__11);
__pyx_codeobj__12 = (PyObject*)__Pyx_PyCode_New(1, 0, 8, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__11, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_mlode_BA_Moritz_Implementa, __pyx_n_s_mean_dist, 95, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__12)) __PYX_ERR(0, 95, __pyx_L1_error)
/* "facedist.pyx":136
* # indices.
* @cython.wraparound(False)
* def meanmin_dist(np.ndarray A): # <<<<<<<<<<<<<<
*
* # declare C types for as many of our variables as possible. note that we
*/
__pyx_tuple__13 = PyTuple_Pack(10, __pyx_n_s_A, __pyx_n_s_nrow, __pyx_n_s_ncol, __pyx_n_s_ii, __pyx_n_s_jj, __pyx_n_s_kk, __pyx_n_s_D, __pyx_n_s_rD, __pyx_n_s_rd, __pyx_n_s_mins); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(0, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__13);
__Pyx_GIVEREF(__pyx_tuple__13);
__pyx_codeobj__14 = (PyObject*)__Pyx_PyCode_New(1, 0, 10, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__13, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_mlode_BA_Moritz_Implementa, __pyx_n_s_meanmin_dist, 136, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__14)) __PYX_ERR(0, 136, __pyx_L1_error)
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_InitGlobals(void) {
/* InitThreads.init */
#ifdef WITH_THREAD
PyEval_InitThreads();
#endif
if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
__pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
#if PY_MAJOR_VERSION < 3
PyMODINIT_FUNC initfacedist(void); /*proto*/
PyMODINIT_FUNC initfacedist(void)
#else
PyMODINIT_FUNC PyInit_facedist(void); /*proto*/
PyMODINIT_FUNC PyInit_facedist(void)
#endif
{
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannyDeclarations
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_facedist(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("facedist", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
#if CYTHON_COMPILING_IN_PYPY
Py_INCREF(__pyx_b);
#endif
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_facedist) {
if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "facedist")) {
if (unlikely(PyDict_SetItemString(modules, "facedist", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Global init code ---*/
/*--- Variable export code ---*/
/*--- Function export code ---*/
/*--- Type init code ---*/
/*--- Type import code ---*/
__pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type",
#if CYTHON_COMPILING_IN_PYPY
sizeof(PyTypeObject),
#else
sizeof(PyHeapTypeObject),
#endif
0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(2, 9, __pyx_L1_error)
__pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) __PYX_ERR(1, 155, __pyx_L1_error)
__pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) __PYX_ERR(1, 168, __pyx_L1_error)
__pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) __PYX_ERR(1, 172, __pyx_L1_error)
__pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) __PYX_ERR(1, 181, __pyx_L1_error)
__pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) __PYX_ERR(1, 861, __pyx_L1_error)
/*--- Variable import code ---*/
/*--- Function import code ---*/
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "facedist.pyx":1
* import numpy as np # <<<<<<<<<<<<<<
* from scipy.spatial.distance import cdist
* cimport numpy as np
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "facedist.pyx":2
* import numpy as np
* from scipy.spatial.distance import cdist # <<<<<<<<<<<<<<
* cimport numpy as np
* cimport cython
*/
__pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_n_s_cdist);
__Pyx_GIVEREF(__pyx_n_s_cdist);
PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_cdist);
__pyx_t_2 = __Pyx_Import(__pyx_n_s_scipy_spatial_distance, __pyx_t_1, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_cdist); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_cdist, __pyx_t_1) < 0) __PYX_ERR(0, 2, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "facedist.pyx":19
* # indices.
* @cython.wraparound(False)
* def max_dist(np.ndarray A): # <<<<<<<<<<<<<<
*
* # declare C types for as many of our variables as possible. note that we
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8facedist_1max_dist, NULL, __pyx_n_s_facedist); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_max_dist, __pyx_t_2) < 0) __PYX_ERR(0, 19, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "facedist.pyx":57
* # indices.
* @cython.wraparound(False)
* def min_dist(np.ndarray A): # <<<<<<<<<<<<<<
*
* # declare C types for as many of our variables as possible. note that we
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8facedist_3min_dist, NULL, __pyx_n_s_facedist); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 57, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_min_dist, __pyx_t_2) < 0) __PYX_ERR(0, 57, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "facedist.pyx":95
* # indices.
* @cython.wraparound(False)
* def mean_dist(np.ndarray A): # <<<<<<<<<<<<<<
*
* # declare C types for as many of our variables as possible. note that we
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8facedist_5mean_dist, NULL, __pyx_n_s_facedist); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 95, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_mean_dist, __pyx_t_2) < 0) __PYX_ERR(0, 95, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "facedist.pyx":136
* # indices.
* @cython.wraparound(False)
* def meanmin_dist(np.ndarray A): # <<<<<<<<<<<<<<
*
* # declare C types for as many of our variables as possible. note that we
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8facedist_7meanmin_dist, NULL, __pyx_n_s_facedist); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_meanmin_dist, __pyx_t_2) < 0) __PYX_ERR(0, 136, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "facedist.pyx":1
* import numpy as np # <<<<<<<<<<<<<<
* from scipy.spatial.distance import cdist
* cimport numpy as np
*/
__pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976
* arr.base = baseptr
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* if arr.base is NULL:
* return None
*/
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init facedist", __pyx_clineno, __pyx_lineno, __pyx_filename);
}
Py_DECREF(__pyx_m); __pyx_m = 0;
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init facedist");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if PY_MAJOR_VERSION < 3
return;
#else
return __pyx_m;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule((char *)modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, (char *)"RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* ArgTypeTest */
static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) {
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
}
static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (none_allowed && obj == Py_None) return 1;
else if (exact) {
if (likely(Py_TYPE(obj) == type)) return 1;
#if PY_MAJOR_VERSION == 2
else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(PyObject_TypeCheck(obj, type))) return 1;
}
__Pyx_RaiseArgumentTypeInvalid(name, obj, type);
return 0;
}
/* GetModuleGlobalName */
static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) {
PyObject *result;
#if CYTHON_COMPILING_IN_CPYTHON
result = PyDict_GetItem(__pyx_d, name);
if (likely(result)) {
Py_INCREF(result);
} else {
#else
result = PyObject_GetItem(__pyx_d, name);
if (!result) {
PyErr_Clear();
#endif
result = __Pyx_GetBuiltinName(name);
}
return result;
}
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* ExtTypeTest */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(PyObject_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
/* BufferFormatCheck */
static CYTHON_INLINE int __Pyx_IsLittleEndian(void) {
unsigned int n = 1;
return *(unsigned char*)(&n) != 0;
}
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t < '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1)
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count;
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break;
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue;
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static CYTHON_INLINE PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number;
int ndim = ctx->head->field->type->ndim;
;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break;
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case '\r':
case '\n':
++ts;
break;
case '<':
if (!__Pyx_IsLittleEndian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_IsLittleEndian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T':
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}':
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 'p':
if (ctx->enc_type == *ts && got_Z == ctx->is_complex &&
ctx->enc_packmode == ctx->new_packmode) {
ctx->enc_count += ctx->new_count;
ctx->new_count = 1;
got_Z = 0;
++ts;
break;
}
case 's':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) {
buf->buf = NULL;
buf->obj = NULL;
buf->strides = __Pyx_zeros;
buf->shape = __Pyx_zeros;
buf->suboffsets = __Pyx_minusones;
}
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(
Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags,
int nd, int cast, __Pyx_BufFmt_StackElem* stack)
{
if (obj == Py_None || obj == NULL) {
__Pyx_ZeroBuffer(buf);
return 0;
}
buf->buf = NULL;
if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail;
if (buf->ndim != nd) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
nd, buf->ndim);
goto fail;
}
if (!cast) {
__Pyx_BufFmt_Context ctx;
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if ((unsigned)buf->itemsize != dtype->size) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)",
buf->itemsize, (buf->itemsize > 1) ? "s" : "",
dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones;
return 0;
fail:;
__Pyx_ZeroBuffer(buf);
return -1;
}
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) {
if (info->buf == NULL) return;
if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL;
__Pyx_ReleaseBuffer(info);
}
/* GetItemInt */
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_COMPILING_IN_CPYTHON
if (wraparound & unlikely(i < 0)) i += PyList_GET_SIZE(o);
if ((!boundscheck) || likely((0 <= i) & (i < PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_COMPILING_IN_CPYTHON
if (wraparound & unlikely(i < 0)) i += PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_COMPILING_IN_CPYTHON
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return NULL;
PyErr_Clear();
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
/* PyObjectCallMethO */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallOneArg */
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#ifdef __Pyx_CyFunction_USED
if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) {
#else
if (likely(PyCFunction_Check(func))) {
#endif
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/* PyErrFetchRestore */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* BufferFallbackError */
static void __Pyx_RaiseBufferFallbackError(void) {
PyErr_SetString(PyExc_ValueError,
"Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!");
}
/* PyObjectCallNoArg */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) {
#ifdef __Pyx_CyFunction_USED
if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) {
#else
if (likely(PyCFunction_Check(func))) {
#endif
if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) {
return __Pyx_PyObject_CallMethO(func, NULL);
}
}
return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL);
}
#endif
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
#if PY_VERSION_HEX >= 0x03030000
if (cause) {
#else
if (cause && cause != Py_None) {
#endif
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = PyThreadState_GET();
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* RaiseTooManyValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
/* RaiseNeedMoreValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
/* RaiseNoneIterError */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_VERSION_HEX < 0x03030000
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if (strchr(__Pyx_MODULE_NAME, '.')) {
#if PY_VERSION_HEX < 0x03030000
PyObject *py_level = PyInt_FromLong(1);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
#endif
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_VERSION_HEX < 0x03030000
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_VERSION_HEX < 0x03030000
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* ImportFrom */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Format(PyExc_ImportError,
#if PY_MAJOR_VERSION < 3
"cannot import name %.230s", PyString_AS_STRING(name));
#else
"cannot import name %S", name);
#endif
}
return value;
}
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
py_code = __pyx_find_code_object(c_line ? c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? c_line : py_line, py_code);
}
py_frame = PyFrame_New(
PyThreadState_GET(), /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
py_frame->f_lineno = py_line;
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags);
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) { __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); return; }
Py_DECREF(obj);
view->obj = NULL;
}
#endif
/* Print */
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION < 3
static PyObject *__Pyx_GetStdout(void) {
PyObject *f = PySys_GetObject((char *)"stdout");
if (!f) {
PyErr_SetString(PyExc_RuntimeError, "lost sys.stdout");
}
return f;
}
static int __Pyx_Print(PyObject* f, PyObject *arg_tuple, int newline) {
int i;
if (!f) {
if (!(f = __Pyx_GetStdout()))
return -1;
}
Py_INCREF(f);
for (i=0; i < PyTuple_GET_SIZE(arg_tuple); i++) {
PyObject* v;
if (PyFile_SoftSpace(f, 1)) {
if (PyFile_WriteString(" ", f) < 0)
goto error;
}
v = PyTuple_GET_ITEM(arg_tuple, i);
if (PyFile_WriteObject(v, f, Py_PRINT_RAW) < 0)
goto error;
if (PyString_Check(v)) {
char *s = PyString_AsString(v);
Py_ssize_t len = PyString_Size(v);
if (len > 0) {
switch (s[len-1]) {
case ' ': break;
case '\f': case '\r': case '\n': case '\t': case '\v':
PyFile_SoftSpace(f, 0);
break;
default: break;
}
}
}
}
if (newline) {
if (PyFile_WriteString("\n", f) < 0)
goto error;
PyFile_SoftSpace(f, 0);
}
Py_DECREF(f);
return 0;
error:
Py_DECREF(f);
return -1;
}
#else
static int __Pyx_Print(PyObject* stream, PyObject *arg_tuple, int newline) {
PyObject* kwargs = 0;
PyObject* result = 0;
PyObject* end_string;
if (unlikely(!__pyx_print)) {
__pyx_print = PyObject_GetAttr(__pyx_b, __pyx_n_s_print);
if (!__pyx_print)
return -1;
}
if (stream) {
kwargs = PyDict_New();
if (unlikely(!kwargs))
return -1;
if (unlikely(PyDict_SetItem(kwargs, __pyx_n_s_file, stream) < 0))
goto bad;
if (!newline) {
end_string = PyUnicode_FromStringAndSize(" ", 1);
if (unlikely(!end_string))
goto bad;
if (PyDict_SetItem(kwargs, __pyx_n_s_end, end_string) < 0) {
Py_DECREF(end_string);
goto bad;
}
Py_DECREF(end_string);
}
} else if (!newline) {
if (unlikely(!__pyx_print_kwargs)) {
__pyx_print_kwargs = PyDict_New();
if (unlikely(!__pyx_print_kwargs))
return -1;
end_string = PyUnicode_FromStringAndSize(" ", 1);
if (unlikely(!end_string))
return -1;
if (PyDict_SetItem(__pyx_print_kwargs, __pyx_n_s_end, end_string) < 0) {
Py_DECREF(end_string);
return -1;
}
Py_DECREF(end_string);
}
kwargs = __pyx_print_kwargs;
}
result = PyObject_Call(__pyx_print, arg_tuple, kwargs);
if (unlikely(kwargs) && (kwargs != __pyx_print_kwargs))
Py_DECREF(kwargs);
if (!result)
return -1;
Py_DECREF(result);
return 0;
bad:
if (kwargs != __pyx_print_kwargs)
Py_XDECREF(kwargs);
return -1;
}
#endif
/* None */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return ::std::complex< float >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return x + y*(__pyx_t_float_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
__pyx_t_float_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* None */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float denom = b.real * b.real + b.imag * b.imag;
z.real = (a.real * b.real + a.imag * b.imag) / denom;
z.imag = (a.imag * b.real - a.real * b.imag) / denom;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrtf(z.real*z.real + z.imag*z.imag);
#else
return hypotf(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
float denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prodf(a, a);
return __Pyx_c_prodf(a, a);
case 3:
z = __Pyx_c_prodf(a, a);
return __Pyx_c_prodf(z, a);
case 4:
z = __Pyx_c_prodf(a, a);
return __Pyx_c_prodf(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
}
r = a.real;
theta = 0;
} else {
r = __Pyx_c_absf(a);
theta = atan2f(a.imag, a.real);
}
lnr = logf(r);
z_r = expf(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cosf(z_theta);
z.imag = z_r * sinf(z_theta);
return z;
}
#endif
#endif
/* None */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return ::std::complex< double >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return x + y*(__pyx_t_double_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
__pyx_t_double_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* None */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double denom = b.real * b.real + b.imag * b.imag;
z.real = (a.real * b.real + a.imag * b.imag) / denom;
z.imag = (a.imag * b.real - a.real * b.imag) / denom;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrt(z.real*z.real + z.imag*z.imag);
#else
return hypot(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
double denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prod(a, a);
return __Pyx_c_prod(a, a);
case 3:
z = __Pyx_c_prod(a, a);
return __Pyx_c_prod(z, a);
case 4:
z = __Pyx_c_prod(a, a);
return __Pyx_c_prod(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
}
r = a.real;
theta = 0;
} else {
r = __Pyx_c_abs(a);
theta = atan2(a.imag, a.real);
}
lnr = log(r);
z_r = exp(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cos(z_theta);
z.imag = z_r * sin(z_theta);
return z;
}
#endif
#endif
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
const int neg_one = (int) -1, const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) {
const enum NPY_TYPES neg_one = (enum NPY_TYPES) -1, const_zero = (enum NPY_TYPES) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(enum NPY_TYPES) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
} else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
}
} else {
if (sizeof(enum NPY_TYPES) <= sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES),
little, !is_unsigned);
}
}
/* PrintOne */
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION < 3
static int __Pyx_PrintOne(PyObject* f, PyObject *o) {
if (!f) {
if (!(f = __Pyx_GetStdout()))
return -1;
}
Py_INCREF(f);
if (PyFile_SoftSpace(f, 0)) {
if (PyFile_WriteString(" ", f) < 0)
goto error;
}
if (PyFile_WriteObject(o, f, Py_PRINT_RAW) < 0)
goto error;
if (PyFile_WriteString("\n", f) < 0)
goto error;
Py_DECREF(f);
return 0;
error:
Py_DECREF(f);
return -1;
/* the line below is just to avoid C compiler
* warnings about unused functions */
return __Pyx_Print(f, NULL, 0);
}
#else
static int __Pyx_PrintOne(PyObject* stream, PyObject *o) {
int res;
PyObject* arg_tuple = PyTuple_Pack(1, o);
if (unlikely(!arg_tuple))
return -1;
res = __Pyx_Print(stream, arg_tuple, 1);
Py_DECREF(arg_tuple);
return res;
}
#endif
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
const int neg_one = (int) -1, const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
const long neg_one = (long) -1, const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
const long neg_one = (long) -1, const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* ModuleImport */
#ifndef __PYX_HAVE_RT_ImportModule
#define __PYX_HAVE_RT_ImportModule
static PyObject *__Pyx_ImportModule(const char *name) {
PyObject *py_name = 0;
PyObject *py_module = 0;
py_name = __Pyx_PyIdentifier_FromString(name);
if (!py_name)
goto bad;
py_module = PyImport_Import(py_name);
Py_DECREF(py_name);
return py_module;
bad:
Py_XDECREF(py_name);
return 0;
}
#endif
/* TypeImport */
#ifndef __PYX_HAVE_RT_ImportType
#define __PYX_HAVE_RT_ImportType
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name,
size_t size, int strict)
{
PyObject *py_module = 0;
PyObject *result = 0;
PyObject *py_name = 0;
char warning[200];
Py_ssize_t basicsize;
#ifdef Py_LIMITED_API
PyObject *py_basicsize;
#endif
py_module = __Pyx_ImportModule(module_name);
if (!py_module)
goto bad;
py_name = __Pyx_PyIdentifier_FromString(class_name);
if (!py_name)
goto bad;
result = PyObject_GetAttr(py_module, py_name);
Py_DECREF(py_name);
py_name = 0;
Py_DECREF(py_module);
py_module = 0;
if (!result)
goto bad;
if (!PyType_Check(result)) {
PyErr_Format(PyExc_TypeError,
"%.200s.%.200s is not a type object",
module_name, class_name);
goto bad;
}
#ifndef Py_LIMITED_API
basicsize = ((PyTypeObject *)result)->tp_basicsize;
#else
py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
if (!py_basicsize)
goto bad;
basicsize = PyLong_AsSsize_t(py_basicsize);
Py_DECREF(py_basicsize);
py_basicsize = 0;
if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
goto bad;
#endif
if (!strict && (size_t)basicsize > size) {
PyOS_snprintf(warning, sizeof(warning),
"%s.%s size changed, may indicate binary incompatibility. Expected %zd, got %zd",
module_name, class_name, basicsize, size);
if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
}
else if ((size_t)basicsize != size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s has the wrong size, try recompiling. Expected %zd, got %zd",
module_name, class_name, basicsize, size);
goto bad;
}
return (PyTypeObject *)result;
bad:
Py_XDECREF(py_module);
Py_XDECREF(result);
return NULL;
}
#endif
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if CYTHON_COMPILING_IN_CPYTHON && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
#if PY_VERSION_HEX < 0x03030000
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
#else
if (__Pyx_PyUnicode_READY(o) == -1) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (PyUnicode_IS_ASCII(o)) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
#endif
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
PyNumberMethods *m;
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (PyInt_Check(x) || PyLong_Check(x))
#else
if (PyLong_Check(x))
#endif
return __Pyx_NewRef(x);
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = PyNumber_Int(x);
}
else if (m && m->nb_long) {
name = "long";
res = PyNumber_Long(x);
}
#else
if (m && m->nb_int) {
name = "int";
res = PyNumber_Long(x);
}
#endif
if (res) {
#if PY_MAJOR_VERSION < 3
if (!PyInt_Check(res) && !PyLong_Check(res)) {
#else
if (!PyLong_Check(res)) {
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
name, name, Py_TYPE(res)->tp_name);
Py_DECREF(res);
return NULL;
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(x);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
|
rose_v1_globalArrays.c | #define MSIZE 256
#include <omp.h>
double u[256][256];
double f[256][256];
int n;
int m;
void initialize()
{
int i;
int j;
int xx;
n = 256;
m = 256;
double dx = 2.0 / (n - 1);
#pragma omp parallel for private (xx,i,j) firstprivate (n,m)
for (i = 0; i <= n - 1; i += 1) {
#pragma omp parallel for private (xx,j) firstprivate (dx)
for (j = 0; j <= m - 1; j += 1) {
xx = ((int )(- 1.0 + dx * (i - 1)));
u[i][j] = 0.0;
f[i][j] = - 1.0 * (1.0 - (xx * xx));
}
}
}
|
DRB014-outofbounds-orig-yes.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
The outmost loop is parallelized.
But the inner level loop has out of bound access for b[i][j] when j equals to 0.
This will case memory access of a previous row's last element.
For example, an array of 4x4:
j=0 1 2 3
i=0 x x x x
1 x x x x
2 x x x x
3 x x x x
outer loop: i=2,
inner loop: j=0
array element accessed b[i][j-1] becomes b[2][-1], which in turn is b[1][3]
due to linearized row-major storage of the 2-D array.
This causes loop-carried data dependence between i=2 and i=1.
Data race pair: b[i][j]@75 vs. b[i][j-1]@75.
*/
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char * argv[])
{
int i, j;
int n = 100, m = 100;
double b[n][m];
int _ret_val_0;
#pragma cetus private(i, j)
#pragma loop name main#0
for (i=1; i<n; i ++ )
{
#pragma cetus private(j)
#pragma loop name main#0#0
#pragma cetus parallel
#pragma omp parallel for private(j)
for (j=0; j<m; j ++ )
{
b[i][j]=b[i-1][j];
}
}
printf("b[50][50]=%f\n", b[50][50]);
_ret_val_0=0;
return _ret_val_0;
}
|
GB_binop__le_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__le_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__le_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__le_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__le_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__le_uint64)
// A*D function (colscale): GB (_AxD__le_uint64)
// D*A function (rowscale): GB (_DxB__le_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__le_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__le_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_uint64)
// C=scalar+B GB (_bind1st__le_uint64)
// C=scalar+B' GB (_bind1st_tran__le_uint64)
// C=A+scalar GB (_bind2nd__le_uint64)
// C=A'+scalar GB (_bind2nd_tran__le_uint64)
// C type: bool
// A type: uint64_t
// A pattern? 0
// B type: uint64_t
// B pattern? 0
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_UINT64 || GxB_NO_LE_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__le_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__le_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__le_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__le_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__le_uint64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__le_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__le_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__le_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__le_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__le_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__le_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__le_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__le_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__le_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ccsd_t.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <complex.h>
#include "config.h"
#include "np_helper/np_helper.h"
#include "vhf/fblas.h"
#include <math.h>
#include <string.h>
typedef struct {
void *cache[6];
short a;
short b;
short c;
short _padding;
} CacheJob;
/*
* 4 * w + w.transpose(1,2,0) + w.transpose(2,0,1)
* - 2 * w.transpose(2,1,0) - 2 * w.transpose(0,2,1)
* - 2 * w.transpose(1,0,2)
*/
static void add_and_permute(double *out, double *w, double *v, int n)
{
int nn = n * n;
int nnn = nn * n;
int i, j, k;
for (i = 0; i < nnn; i++) {
v[i] += w[i];
}
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
for (k = 0; k < n; k++) {
out[i*nn+j*n+k] = v[i*nn+j*n+k] * 4
+ v[j*nn+k*n+i]
+ v[k*nn+i*n+j]
- v[k*nn+j*n+i] * 2
- v[i*nn+k*n+j] * 2
- v[j*nn+i*n+k] * 2;
} } }
}
/*
* t2T = t2.transpose(2,3,1,0)
* ov = vv_op[:,nocc:]
* oo = vv_op[:,:nocc]
* w = numpy.einsum('if,fjk->ijk', ov, t2T[c])
* w-= numpy.einsum('ijm,mk->ijk', vooo[a], t2T[c,b])
* v = numpy.einsum('ij,k->ijk', oo, t1T[c]*.5)
* v+= numpy.einsum('ij,k->ijk', t2T[b,a], fov[:,c]*.5)
* v+= w
*/
static void get_wv(double *w, double *v, double *cache,
double *fvohalf, double *vooo,
double *vv_op, double *t1Thalf, double *t2T,
int nocc, int nvir, int a, int b, int c, int *idx)
{
const double D0 = 0;
const double D1 = 1;
const double DN1 =-1;
const char TRANS_N = 'N';
const int nmo = nocc + nvir;
const int noo = nocc * nocc;
const size_t nooo = nocc * noo;
const size_t nvoo = nvir * noo;
int i, j, k, n;
double *pt2T;
dgemm_(&TRANS_N, &TRANS_N, &noo, &nocc, &nvir,
&D1, t2T+c*nvoo, &noo, vv_op+nocc, &nmo,
&D0, cache, &noo);
dgemm_(&TRANS_N, &TRANS_N, &nocc, &noo, &nocc,
&DN1, t2T+c*nvoo+b*noo, &nocc, vooo+a*nooo, &nocc,
&D1, cache, &nocc);
pt2T = t2T + b * nvoo + a * noo;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
w[idx[n]] += cache[n];
v[idx[n]] +=(vv_op[i*nmo+j] * t1Thalf[c*nocc+k]
+ pt2T[i*nocc+j] * fvohalf[c*nocc+k]);
} } }
}
static void sym_wv(double *w, double *v, double *cache,
double *fvohalf, double *vooo,
double *vv_op, double *t1Thalf, double *t2T,
int nocc, int nvir, int a, int b, int c, int nirrep,
int *o_ir_loc, int *v_ir_loc, int *oo_ir_loc, int *orbsym,
int *idx)
{
const double D0 = 0;
const double D1 = 1;
const char TRANS_N = 'N';
const int nmo = nocc + nvir;
const int noo = nocc * nocc;
const size_t nooo = nocc * noo;
const size_t nvoo = nvir * noo;
int a_irrep = orbsym[nocc+a];
int b_irrep = orbsym[nocc+b];
int c_irrep = orbsym[nocc+c];
int ab_irrep = a_irrep ^ b_irrep;
int bc_irrep = c_irrep ^ b_irrep;
int i, j, k, n;
int fr, f0, f1, df, mr, m0, m1, dm, mk0;
int ir, i0, i1, di, kr, k0, k1, dk, jr;
int ijr, ij0, ij1, dij, jkr, jk0, jk1, djk;
double *pt2T;
/* symmetry adapted
* w = numpy.einsum('if,fjk->ijk', ov, t2T[c]) */
pt2T = t2T + c * nvoo;
for (ir = 0; ir < nirrep; ir++) {
i0 = o_ir_loc[ir];
i1 = o_ir_loc[ir+1];
di = i1 - i0;
if (di > 0) {
fr = ir ^ ab_irrep;
f0 = v_ir_loc[fr];
f1 = v_ir_loc[fr+1];
df = f1 - f0;
if (df > 0) {
jkr = fr ^ c_irrep;
jk0 = oo_ir_loc[jkr];
jk1 = oo_ir_loc[jkr+1];
djk = jk1 - jk0;
if (djk > 0) {
dgemm_(&TRANS_N, &TRANS_N, &djk, &di, &df,
&D1, pt2T+f0*noo+jk0, &noo, vv_op+i0*nmo+nocc+f0, &nmo,
&D0, cache, &djk);
for (n = 0, i = o_ir_loc[ir]; i < o_ir_loc[ir+1]; i++) {
for (jr = 0; jr < nirrep; jr++) {
kr = jkr ^ jr;
for (j = o_ir_loc[jr]; j < o_ir_loc[jr+1]; j++) {
for (k = o_ir_loc[kr]; k < o_ir_loc[kr+1]; k++, n++) {
w[idx[i*noo+j*nocc+k]] += cache[n];
} }
} }
}
}
}
}
/* symmetry adapted
* w-= numpy.einsum('ijm,mk->ijk', eris_vooo[a], t2T[c,b]) */
pt2T = t2T + c * nvoo + b * noo;
vooo += a * nooo;
mk0 = oo_ir_loc[bc_irrep];
for (mr = 0; mr < nirrep; mr++) {
m0 = o_ir_loc[mr];
m1 = o_ir_loc[mr+1];
dm = m1 - m0;
if (dm > 0) {
kr = mr ^ bc_irrep;
k0 = o_ir_loc[kr];
k1 = o_ir_loc[kr+1];
dk = k1 - k0;
if (dk > 0) {
ijr = mr ^ a_irrep;
ij0 = oo_ir_loc[ijr];
ij1 = oo_ir_loc[ijr+1];
dij = ij1 - ij0;
if (dij > 0) {
dgemm_(&TRANS_N, &TRANS_N, &dk, &dij, &dm,
&D1, pt2T+mk0, &dk, vooo+ij0*nocc+m0, &nocc,
&D0, cache, &dk);
for (n = 0, ir = 0; ir < nirrep; ir++) {
jr = ijr ^ ir;
for (i = o_ir_loc[ir]; i < o_ir_loc[ir+1]; i++) {
for (j = o_ir_loc[jr]; j < o_ir_loc[jr+1]; j++) {
for (k = o_ir_loc[kr]; k < o_ir_loc[kr+1]; k++, n++) {
w[idx[i*noo+j*nocc+k]] -= cache[n];
} }
} }
}
mk0 += dm * dk;
}
}
}
pt2T = t2T + b * nvoo + a * noo;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
v[idx[n]] +=(vv_op[i*nmo+j] * t1Thalf[c*nocc+k]
+ pt2T[i*nocc+j] * fvohalf[c*nocc+k]);
} } }
}
double _ccsd_t_get_energy(double *w, double *v, double *mo_energy, int nocc,
int a, int b, int c, double fac)
{
int i, j, k, n;
double abc = mo_energy[nocc+a] + mo_energy[nocc+b] + mo_energy[nocc+c];
double et = 0;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
et += fac * w[n] * v[n] / (mo_energy[i] + mo_energy[j] + mo_energy[k] - abc);
} } }
return et;
}
static double contract6(int nocc, int nvir, int a, int b, int c,
double *mo_energy, double *t1T, double *t2T,
int nirrep, int *o_ir_loc, int *v_ir_loc,
int *oo_ir_loc, int *orbsym, double *fvo,
double *vooo, double *cache1, void **cache,
int *permute_idx)
{
int nooo = nocc * nocc * nocc;
int *idx0 = permute_idx;
int *idx1 = idx0 + nooo;
int *idx2 = idx1 + nooo;
int *idx3 = idx2 + nooo;
int *idx4 = idx3 + nooo;
int *idx5 = idx4 + nooo;
double *v0 = cache1;
double *w0 = v0 + nooo;
double *z0 = w0 + nooo;
double *wtmp = z0;
int i;
for (i = 0; i < nooo; i++) {
w0[i] = 0;
v0[i] = 0;
}
if (nirrep == 1) {
get_wv(w0, v0, wtmp, fvo, vooo, cache[0], t1T, t2T, nocc, nvir, a, b, c, idx0);
get_wv(w0, v0, wtmp, fvo, vooo, cache[1], t1T, t2T, nocc, nvir, a, c, b, idx1);
get_wv(w0, v0, wtmp, fvo, vooo, cache[2], t1T, t2T, nocc, nvir, b, a, c, idx2);
get_wv(w0, v0, wtmp, fvo, vooo, cache[3], t1T, t2T, nocc, nvir, b, c, a, idx3);
get_wv(w0, v0, wtmp, fvo, vooo, cache[4], t1T, t2T, nocc, nvir, c, a, b, idx4);
get_wv(w0, v0, wtmp, fvo, vooo, cache[5], t1T, t2T, nocc, nvir, c, b, a, idx5);
} else {
sym_wv(w0, v0, wtmp, fvo, vooo, cache[0], t1T, t2T, nocc, nvir, a, b, c,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx0);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[1], t1T, t2T, nocc, nvir, a, c, b,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx1);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[2], t1T, t2T, nocc, nvir, b, a, c,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx2);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[3], t1T, t2T, nocc, nvir, b, c, a,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx3);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[4], t1T, t2T, nocc, nvir, c, a, b,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx4);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[5], t1T, t2T, nocc, nvir, c, b, a,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx5);
}
add_and_permute(z0, w0, v0, nocc);
double et;
if (a == c) {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, 1./6);
} else if (a == b || b == c) {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, .5);
} else {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, 1.);
}
return et;
}
size_t _ccsd_t_gen_jobs(CacheJob *jobs, int nocc, int nvir,
int a0, int a1, int b0, int b1,
void *cache_row_a, void *cache_col_a,
void *cache_row_b, void *cache_col_b, size_t stride)
{
size_t nov = nocc * (nocc+nvir) * stride;
int da = a1 - a0;
int db = b1 - b0;
size_t m, a, b, c;
if (b1 <= a0) {
m = 0;
for (a = a0; a < a1; a++) {
for (b = b0; b < b1; b++) {
for (c = 0; c < b0; c++, m++) {
jobs[m].a = a;
jobs[m].b = b;
jobs[m].c = c;
jobs[m].cache[0] = cache_row_a + nov*(a1*(a-a0)+b );
jobs[m].cache[1] = cache_row_a + nov*(a1*(a-a0)+c );
jobs[m].cache[2] = cache_col_a + nov*(da*(b) +a-a0);
jobs[m].cache[3] = cache_row_b + nov*(b1*(b-b0)+c );
jobs[m].cache[4] = cache_col_a + nov*(da*(c) +a-a0);
jobs[m].cache[5] = cache_col_b + nov*(db*(c) +b-b0);
}
for (c = b0; c <= b; c++, m++) {
jobs[m].a = a;
jobs[m].b = b;
jobs[m].c = c;
jobs[m].cache[0] = cache_row_a + nov*(a1*(a-a0)+b );
jobs[m].cache[1] = cache_row_a + nov*(a1*(a-a0)+c );
jobs[m].cache[2] = cache_col_a + nov*(da*(b) +a-a0);
jobs[m].cache[3] = cache_row_b + nov*(b1*(b-b0)+c );
jobs[m].cache[4] = cache_col_a + nov*(da*(c) +a-a0);
jobs[m].cache[5] = cache_row_b + nov*(b1*(c-b0)+b );
}
} }
} else {
m = 0;
for (a = a0; a < a1; a++) {
for (b = a0; b <= a; b++) {
for (c = 0; c < a0; c++, m++) {
jobs[m].a = a;
jobs[m].b = b;
jobs[m].c = c;
jobs[m].cache[0] = cache_row_a + nov*(a1*(a-a0)+b);
jobs[m].cache[1] = cache_row_a + nov*(a1*(a-a0)+c);
jobs[m].cache[2] = cache_row_a + nov*(a1*(b-a0)+a);
jobs[m].cache[3] = cache_row_a + nov*(a1*(b-a0)+c);
jobs[m].cache[4] = cache_col_a + nov*(da*(c)+a-a0);
jobs[m].cache[5] = cache_col_a + nov*(da*(c)+b-a0);
}
for (c = a0; c <= b; c++, m++) {
jobs[m].a = a;
jobs[m].b = b;
jobs[m].c = c;
jobs[m].cache[0] = cache_row_a + nov*(a1*(a-a0)+b);
jobs[m].cache[1] = cache_row_a + nov*(a1*(a-a0)+c);
jobs[m].cache[2] = cache_row_a + nov*(a1*(b-a0)+a);
jobs[m].cache[3] = cache_row_a + nov*(a1*(b-a0)+c);
jobs[m].cache[4] = cache_row_a + nov*(a1*(c-a0)+a);
jobs[m].cache[5] = cache_row_a + nov*(a1*(c-a0)+b);
}
} }
}
return m;
}
void _make_permute_indices(int *idx, int n)
{
const int nn = n * n;
const int nnn = nn * n;
int *idx0 = idx;
int *idx1 = idx0 + nnn;
int *idx2 = idx1 + nnn;
int *idx3 = idx2 + nnn;
int *idx4 = idx3 + nnn;
int *idx5 = idx4 + nnn;
int i, j, k, m;
for (m = 0, i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
for (k = 0; k < n; k++, m++) {
idx0[m] = i * nn + j * n + k;
idx1[m] = i * nn + k * n + j;
idx2[m] = j * nn + i * n + k;
idx3[m] = k * nn + i * n + j;
idx4[m] = j * nn + k * n + i;
idx5[m] = k * nn + j * n + i;
} } }
}
void CCsd_t_contract(double *e_tot,
double *mo_energy, double *t1T, double *t2T,
double *vooo, double *fvo,
int nocc, int nvir, int a0, int a1, int b0, int b1,
int nirrep, int *o_ir_loc, int *v_ir_loc,
int *oo_ir_loc, int *orbsym,
void *cache_row_a, void *cache_col_a,
void *cache_row_b, void *cache_col_b)
{
int da = a1 - a0;
int db = b1 - b0;
CacheJob *jobs = malloc(sizeof(CacheJob) * da*db*b1);
size_t njobs = _ccsd_t_gen_jobs(jobs, nocc, nvir, a0, a1, b0, b1,
cache_row_a, cache_col_a,
cache_row_b, cache_col_b, sizeof(double));
int *permute_idx = malloc(sizeof(int) * nocc*nocc*nocc * 6);
_make_permute_indices(permute_idx, nocc);
#pragma omp parallel default(none) \
shared(njobs, nocc, nvir, mo_energy, t1T, t2T, nirrep, o_ir_loc, \
v_ir_loc, oo_ir_loc, orbsym, vooo, fvo, jobs, e_tot, permute_idx)
{
int a, b, c;
size_t k;
double *cache1 = malloc(sizeof(double) * (nocc*nocc*nocc*3+2));
double *t1Thalf = malloc(sizeof(double) * nvir*nocc * 2);
double *fvohalf = t1Thalf + nvir*nocc;
for (k = 0; k < nvir*nocc; k++) {
t1Thalf[k] = t1T[k] * .5;
fvohalf[k] = fvo[k] * .5;
}
double e = 0;
#pragma omp for schedule (dynamic, 4)
for (k = 0; k < njobs; k++) {
a = jobs[k].a;
b = jobs[k].b;
c = jobs[k].c;
e += contract6(nocc, nvir, a, b, c, mo_energy, t1Thalf, t2T,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym,
fvohalf, vooo, cache1, jobs[k].cache, permute_idx);
}
free(t1Thalf);
free(cache1);
#pragma omp critical
*e_tot += e;
}
free(permute_idx);
}
/*
* Complex version of all functions
*/
static void zadd_and_permute(double complex *out, double complex *w,
double complex *v, int n)
{
int nn = n * n;
int nnn = nn * n;
int i, j, k;
for (i = 0; i < nnn; i++) {
v[i] += w[i];
}
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
for (k = 0; k < n; k++) {
out[i*nn+j*n+k] = v[i*nn+j*n+k] * 4
+ v[j*nn+k*n+i]
+ v[k*nn+i*n+j]
- v[k*nn+j*n+i] * 2
- v[i*nn+k*n+j] * 2
- v[j*nn+i*n+k] * 2;
} } }
}
static void zget_wv(double complex *w, double complex *v,
double complex *cache, double complex *fvohalf,
double complex *vooo, double complex *vv_op,
double complex *t1Thalf, double complex *t2T,
int nocc, int nvir, int a, int b, int c, int *idx)
{
const double complex D0 = 0;
const double complex D1 = 1;
const double complex DN1 =-1;
const char TRANS_N = 'N';
const int nmo = nocc + nvir;
const int noo = nocc * nocc;
const size_t nooo = nocc * noo;
const size_t nvoo = nvir * noo;
int i, j, k, n;
double complex *pt2T;
zgemm_(&TRANS_N, &TRANS_N, &noo, &nocc, &nvir,
&D1, t2T+c*nvoo, &noo, vv_op+nocc, &nmo,
&D0, cache, &noo);
zgemm_(&TRANS_N, &TRANS_N, &nocc, &noo, &nocc,
&DN1, t2T+c*nvoo+b*noo, &nocc, vooo+a*nooo, &nocc,
&D1, cache, &nocc);
pt2T = t2T + b * nvoo + a * noo;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
w[idx[n]] += cache[n];
v[idx[n]] +=(vv_op[i*nmo+j] * t1Thalf[c*nocc+k]
+ pt2T[i*nocc+j] * fvohalf[c*nocc+k]);
} } }
}
double _ccsd_t_zget_energy(double complex *w, double complex *v,
double *mo_energy, int nocc,
int a, int b, int c, double fac)
{
int i, j, k, n;
double abc = mo_energy[nocc+a] + mo_energy[nocc+b] + mo_energy[nocc+c];
double et = 0;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
et += fac / (mo_energy[i] + mo_energy[j] + mo_energy[k] - abc) * w[n] * conj(v[n]);
} } }
return et;
}
static double complex
zcontract6(int nocc, int nvir, int a, int b, int c,
double *mo_energy, double complex *t1T, double complex *t2T,
int nirrep, int *o_ir_loc, int *v_ir_loc,
int *oo_ir_loc, int *orbsym, double complex *fvo,
double complex *vooo, double complex *cache1, void **cache,
int *permute_idx)
{
int nooo = nocc * nocc * nocc;
int *idx0 = permute_idx;
int *idx1 = idx0 + nooo;
int *idx2 = idx1 + nooo;
int *idx3 = idx2 + nooo;
int *idx4 = idx3 + nooo;
int *idx5 = idx4 + nooo;
double complex *v0 = cache1;
double complex *w0 = v0 + nooo;
double complex *z0 = w0 + nooo;
double complex *wtmp = z0;
int i;
for (i = 0; i < nooo; i++) {
w0[i] = 0;
v0[i] = 0;
}
zget_wv(w0, v0, wtmp, fvo, vooo, cache[0], t1T, t2T, nocc, nvir, a, b, c, idx0);
zget_wv(w0, v0, wtmp, fvo, vooo, cache[1], t1T, t2T, nocc, nvir, a, c, b, idx1);
zget_wv(w0, v0, wtmp, fvo, vooo, cache[2], t1T, t2T, nocc, nvir, b, a, c, idx2);
zget_wv(w0, v0, wtmp, fvo, vooo, cache[3], t1T, t2T, nocc, nvir, b, c, a, idx3);
zget_wv(w0, v0, wtmp, fvo, vooo, cache[4], t1T, t2T, nocc, nvir, c, a, b, idx4);
zget_wv(w0, v0, wtmp, fvo, vooo, cache[5], t1T, t2T, nocc, nvir, c, b, a, idx5);
zadd_and_permute(z0, w0, v0, nocc);
double complex et;
if (a == c) {
et = _ccsd_t_zget_energy(w0, z0, mo_energy, nocc, a, b, c, 1./6);
} else if (a == b || b == c) {
et = _ccsd_t_zget_energy(w0, z0, mo_energy, nocc, a, b, c, .5);
} else {
et = _ccsd_t_zget_energy(w0, z0, mo_energy, nocc, a, b, c, 1.);
}
return et;
}
void CCsd_t_zcontract(double complex *e_tot,
double *mo_energy, double complex *t1T, double complex *t2T,
double complex *vooo, double complex *fvo,
int nocc, int nvir, int a0, int a1, int b0, int b1,
int nirrep, int *o_ir_loc, int *v_ir_loc,
int *oo_ir_loc, int *orbsym,
void *cache_row_a, void *cache_col_a,
void *cache_row_b, void *cache_col_b)
{
int da = a1 - a0;
int db = b1 - b0;
CacheJob *jobs = malloc(sizeof(CacheJob) * da*db*b1);
size_t njobs = _ccsd_t_gen_jobs(jobs, nocc, nvir, a0, a1, b0, b1,
cache_row_a, cache_col_a,
cache_row_b, cache_col_b,
sizeof(double complex));
int *permute_idx = malloc(sizeof(int) * nocc*nocc*nocc * 6);
_make_permute_indices(permute_idx, nocc);
#pragma omp parallel default(none) \
shared(njobs, nocc, nvir, mo_energy, t1T, t2T, nirrep, o_ir_loc, \
v_ir_loc, oo_ir_loc, orbsym, vooo, fvo, jobs, e_tot, permute_idx)
{
int a, b, c;
size_t k;
double complex *cache1 = malloc(sizeof(double complex) * (nocc*nocc*nocc*3+2));
double complex *t1Thalf = malloc(sizeof(double complex) * nvir*nocc * 2);
double complex *fvohalf = t1Thalf + nvir*nocc;
for (k = 0; k < nvir*nocc; k++) {
t1Thalf[k] = t1T[k] * .5;
fvohalf[k] = fvo[k] * .5;
}
double complex e = 0;
#pragma omp for schedule (dynamic, 4)
for (k = 0; k < njobs; k++) {
a = jobs[k].a;
b = jobs[k].b;
c = jobs[k].c;
e += zcontract6(nocc, nvir, a, b, c, mo_energy, t1Thalf, t2T,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym,
fvohalf, vooo, cache1, jobs[k].cache, permute_idx);
}
free(t1Thalf);
free(cache1);
#pragma omp critical
*e_tot += e;
}
free(permute_idx);
}
/*****************************************************************************
*
* mpi4pyscf
*
*****************************************************************************/
static void MPICCget_wv(double *w, double *v, double *cache,
double *fvohalf, double *vooo,
double *vv_op, double *t1Thalf,
double *t2T_a, double *t2T_c,
int nocc, int nvir, int a, int b, int c,
int a0, int b0, int c0, int *idx)
{
const double D0 = 0;
const double D1 = 1;
const double DN1 = -1;
const char TRANS_N = 'N';
const int nmo = nocc + nvir;
const int noo = nocc * nocc;
const size_t nooo = nocc * noo;
const size_t nvoo = nvir * noo;
int i, j, k, n;
double *pt2T;
dgemm_(&TRANS_N, &TRANS_N, &noo, &nocc, &nvir,
&D1, t2T_c+(c-c0)*nvoo, &noo, vv_op+nocc, &nmo,
&D0, cache, &noo);
dgemm_(&TRANS_N, &TRANS_N, &nocc, &noo, &nocc,
&DN1, t2T_c+(c-c0)*nvoo+b*noo, &nocc, vooo+(a-a0)*nooo, &nocc,
&D1, cache, &nocc);
pt2T = t2T_a + (a-a0) * nvoo + b * noo;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
w[idx[n]] += cache[n];
v[idx[n]] +=(vv_op[i*nmo+j] * t1Thalf[c*nocc+k]
+ pt2T[i*nocc+j] * fvohalf[c*nocc+k]);
} } }
}
static double MPICCcontract6(int nocc, int nvir, int a, int b, int c,
double *mo_energy, double *t1T, double *fvo,
int *slices, double **data_ptrs, double *cache1,
int *permute_idx)
{
const int a0 = slices[0];
const int a1 = slices[1];
const int b0 = slices[2];
const int b1 = slices[3];
const int c0 = slices[4];
const int c1 = slices[5];
const int da = a1 - a0;
const int db = b1 - b0;
const int dc = c1 - c0;
const int nooo = nocc * nocc * nocc;
const int nmo = nocc + nvir;
const size_t nop = nocc * nmo;
int *idx0 = permute_idx;
int *idx1 = idx0 + nooo;
int *idx2 = idx1 + nooo;
int *idx3 = idx2 + nooo;
int *idx4 = idx3 + nooo;
int *idx5 = idx4 + nooo;
double *vvop_ab = data_ptrs[0] + ((a-a0)*db+b-b0) * nop;
double *vvop_ac = data_ptrs[1] + ((a-a0)*dc+c-c0) * nop;
double *vvop_ba = data_ptrs[2] + ((b-b0)*da+a-a0) * nop;
double *vvop_bc = data_ptrs[3] + ((b-b0)*dc+c-c0) * nop;
double *vvop_ca = data_ptrs[4] + ((c-c0)*da+a-a0) * nop;
double *vvop_cb = data_ptrs[5] + ((c-c0)*db+b-b0) * nop;
double *vooo_a = data_ptrs[6];
double *vooo_b = data_ptrs[7];
double *vooo_c = data_ptrs[8];
double *t2T_a = data_ptrs[9 ];
double *t2T_b = data_ptrs[10];
double *t2T_c = data_ptrs[11];
double *v0 = cache1;
double *w0 = v0 + nooo;
double *z0 = w0 + nooo;
double *wtmp = z0;
int i;
for (i = 0; i < nooo; i++) {
w0[i] = 0;
v0[i] = 0;
}
MPICCget_wv(w0, v0, wtmp, fvo, vooo_a, vvop_ab, t1T, t2T_a, t2T_c, nocc, nvir, a, b, c, a0, b0, c0, idx0);
MPICCget_wv(w0, v0, wtmp, fvo, vooo_a, vvop_ac, t1T, t2T_a, t2T_b, nocc, nvir, a, c, b, a0, c0, b0, idx1);
MPICCget_wv(w0, v0, wtmp, fvo, vooo_b, vvop_ba, t1T, t2T_b, t2T_c, nocc, nvir, b, a, c, b0, a0, c0, idx2);
MPICCget_wv(w0, v0, wtmp, fvo, vooo_b, vvop_bc, t1T, t2T_b, t2T_a, nocc, nvir, b, c, a, b0, c0, a0, idx3);
MPICCget_wv(w0, v0, wtmp, fvo, vooo_c, vvop_ca, t1T, t2T_c, t2T_b, nocc, nvir, c, a, b, c0, a0, b0, idx4);
MPICCget_wv(w0, v0, wtmp, fvo, vooo_c, vvop_cb, t1T, t2T_c, t2T_a, nocc, nvir, c, b, a, c0, b0, a0, idx5);
add_and_permute(z0, w0, v0, nocc);
double et;
if (a == c) {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, 1./6);
} else if (a == b || b == c) {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, .5);
} else {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, 1.);
}
return et;
}
size_t _MPICCsd_t_gen_jobs(CacheJob *jobs, int nocc, int nvir,
int *slices, double **data_ptrs)
{
const int a0 = slices[0];
const int a1 = slices[1];
const int b0 = slices[2];
const int b1 = slices[3];
const int c0 = slices[4];
const int c1 = slices[5];
size_t m, a, b, c;
m = 0;
for (a = a0; a < a1; a++) {
for (b = b0; b < MIN(b1, a+1); b++) {
for (c = c0; c < MIN(c1, b+1); c++, m++) {
jobs[m].a = a;
jobs[m].b = b;
jobs[m].c = c;
} } }
return m;
}
void MPICCsd_t_contract(double *e_tot, double *mo_energy, double *t1T,
double *fvo, int nocc, int nvir,
int *slices, double **data_ptrs)
{
const int a0 = slices[0];
const int a1 = slices[1];
const int b0 = slices[2];
const int b1 = slices[3];
const int c0 = slices[4];
const int c1 = slices[5];
int da = a1 - a0;
int db = b1 - b0;
int dc = c1 - c0;
CacheJob *jobs = malloc(sizeof(CacheJob) * da*db*dc);
size_t njobs = _MPICCsd_t_gen_jobs(jobs, nocc, nvir, slices, data_ptrs);
int *permute_idx = malloc(sizeof(int) * nocc*nocc*nocc * 6);
_make_permute_indices(permute_idx, nocc);
#pragma omp parallel default(none) \
shared(njobs, nocc, nvir, mo_energy, t1T, fvo, jobs, e_tot, slices, \
data_ptrs, permute_idx)
{
int a, b, c;
size_t k;
double *cache1 = malloc(sizeof(double) * (nocc*nocc*nocc*3+2));
double *t1Thalf = malloc(sizeof(double) * nvir*nocc * 2);
double *fvohalf = t1Thalf + nvir*nocc;
for (k = 0; k < nvir*nocc; k++) {
t1Thalf[k] = t1T[k] * .5;
fvohalf[k] = fvo[k] * .5;
}
double e = 0;
#pragma omp for schedule (dynamic, 4)
for (k = 0; k < njobs; k++) {
a = jobs[k].a;
b = jobs[k].b;
c = jobs[k].c;
e += MPICCcontract6(nocc, nvir, a, b, c, mo_energy, t1Thalf,
fvohalf, slices, data_ptrs, cache1,
permute_idx);
}
free(t1Thalf);
free(cache1);
#pragma omp critical
*e_tot += e;
}
free(permute_idx);
}
/*****************************************************************************
*
* pyscf periodic ccsd(t) with k-points
*
*****************************************************************************/
size_t _CCsd_t_gen_jobs_full(CacheJob *jobs, int nocc, int nvir,
int *slices)
{
const int a0 = slices[0];
const int a1 = slices[1];
const int b0 = slices[2];
const int b1 = slices[3];
const int c0 = slices[4];
const int c1 = slices[5];
size_t m, a, b, c;
m = 0;
for (a = a0; a < a1; a++) {
for (b = b0; b < b1; b++) {
for (c = c0; c < c1; c++, m++) {
jobs[m].a = a;
jobs[m].b = b;
jobs[m].c = c;
} } }
return m;
}
static void CCzget_wv(double complex *w, double complex *v, double complex *cache,
double complex *fvohalf, double complex *vooo,
double complex *vv_op, double complex *vv_op2,
double complex *t1Thalf, double complex *t2T_c1,
double complex *t2T_c2, double complex *t2T_c3,
int nocc, int nvir, int a, int b, int c,
int a0, int b0, int c0, int *idx, int bool_add_v)
{
const double complex D0 = 0;
const double complex D1 = 1;
const double complex DN1 = -1;
const char TRANS_N = 'N';
const int nmo = nocc + nvir;
const int noo = nocc * nocc;
const size_t nooo = nocc * noo;
const size_t nvoo = nvir * noo;
int i, j, k, n;
double complex *pt2T;
zgemm_(&TRANS_N, &TRANS_N, &noo, &nocc, &nvir,
&D1, t2T_c1+(c-c0)*nvoo, &noo, vv_op+nocc, &nmo,
&D0, cache, &noo);
zgemm_(&TRANS_N, &TRANS_N, &nocc, &noo, &nocc,
&DN1, t2T_c2+(c-c0)*nvoo+b*noo, &nocc, vooo+(a-a0)*nooo, &nocc,
&D1, cache, &nocc);
pt2T = t2T_c3 + (b-b0)*nvoo + a*noo;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
w[idx[n]] += cache[n];
if(bool_add_v == 1){
v[idx[n]] += (vv_op2[j*nmo+i] * t1Thalf[c*nocc+k]
+ pt2T[i*nocc+j] * fvohalf[c*nocc+k]);
}
} } }
}
static void zcontract6_t3T(int nocc, int nvir, int a, int b, int c,
int *mo_offset, double complex *t3Tw,
double complex *t3Tv, double *mo_energy,
double complex *t1T, double complex *fvo, int *slices,
double complex **data_ptrs, double complex *cache1,
int *permute_idx)
{
const int a0 = slices[0];
const int a1 = slices[1];
const int b0 = slices[2];
const int b1 = slices[3];
const int c0 = slices[4];
const int c1 = slices[5];
const int da = a1 - a0;
const int db = b1 - b0;
const int dc = c1 - c0;
const int nooo = nocc * nocc * nocc;
const int nmo = nocc + nvir;
const int nop = nocc * nmo;
const int nov = nocc * nvir;
int *idx0 = permute_idx;
int *idx1 = idx0 + nooo;
int *idx2 = idx1 + nooo;
int *idx3 = idx2 + nooo;
int *idx4 = idx3 + nooo;
int *idx5 = idx4 + nooo;
int ki = mo_offset[0];
int kj = mo_offset[1];
int kk = mo_offset[2];
int ka = mo_offset[3];
int kb = mo_offset[4];
int kc = mo_offset[5];
double complex *t1T_a = t1T + ka * nov;
double complex *t1T_b = t1T + kb * nov;
double complex *t1T_c = t1T + kc * nov;
double complex *fvo_a = fvo + ka * nov;
double complex *fvo_b = fvo + kb * nov;
double complex *fvo_c = fvo + kc * nov;
double complex *vvop_ab = data_ptrs[0] + ((a-a0)*db+b-b0) * nop;
double complex *vvop_ac = data_ptrs[1] + ((a-a0)*dc+c-c0) * nop;
double complex *vvop_ba = data_ptrs[2] + ((b-b0)*da+a-a0) * nop;
double complex *vvop_bc = data_ptrs[3] + ((b-b0)*dc+c-c0) * nop;
double complex *vvop_ca = data_ptrs[4] + ((c-c0)*da+a-a0) * nop;
double complex *vvop_cb = data_ptrs[5] + ((c-c0)*db+b-b0) * nop;
double complex *vooo_aj = data_ptrs[6];
double complex *vooo_ak = data_ptrs[7];
double complex *vooo_bi = data_ptrs[8];
double complex *vooo_bk = data_ptrs[9];
double complex *vooo_ci = data_ptrs[10];
double complex *vooo_cj = data_ptrs[11];
double complex *t2T_cj = data_ptrs[12];
double complex *t2T_cb = data_ptrs[13];
double complex *t2T_bk = data_ptrs[14];
double complex *t2T_bc = data_ptrs[15];
double complex *t2T_ci = data_ptrs[16];
double complex *t2T_ca = data_ptrs[17];
double complex *t2T_ak = data_ptrs[18];
double complex *t2T_ac = data_ptrs[19];
double complex *t2T_bi = data_ptrs[20];
double complex *t2T_ba = data_ptrs[21];
double complex *t2T_aj = data_ptrs[22];
double complex *t2T_ab = data_ptrs[23];
double abc = mo_energy[nocc+a+ka*nmo] + mo_energy[nocc+b+kb*nmo] + mo_energy[nocc+c+kc*nmo];
double div;
double complex *v0 = cache1;
double complex *w0 = v0 + nooo;
double complex *z0 = w0 + nooo;
double complex *wtmp = z0;
int i, j, k, n;
int offset;
for (i = 0; i < nooo; i++) {
w0[i] = 0;
v0[i] = 0;
}
/*
* t2T = t2.transpose(2,3,1,0)
* ov = vv_op[:,nocc:]
* oo = vv_op[:,:nocc]
* w = numpy.einsum('if,fjk->ijk', ov, t2T[c])
* w-= numpy.einsum('ijm,mk->ijk', vooo[a], t2T[c,b])
* v = numpy.einsum('ij,k->ijk', oo, t1T[c]*.5)
* v+= numpy.einsum('ij,k->ijk', t2T[b,a], fov[:,c]*.5)
* v+= w
*/
CCzget_wv(w0, v0, wtmp, fvo_c, vooo_aj, vvop_ab, vvop_ba, t1T_c, t2T_cj, t2T_cb, t2T_ba,
nocc, nvir, a, b, c, a0, b0, c0, idx0, (kk==kc));
CCzget_wv(w0, v0, wtmp, fvo_b, vooo_ak, vvop_ac, vvop_ca, t1T_b, t2T_bk, t2T_bc, t2T_ca,
nocc, nvir, a, c, b, a0, c0, b0, idx1, (kj==kb));
CCzget_wv(w0, v0, wtmp, fvo_c, vooo_bi, vvop_ba, vvop_ab, t1T_c, t2T_ci, t2T_ca, t2T_ab,
nocc, nvir, b, a, c, b0, a0, c0, idx2, (kk==kc));
CCzget_wv(w0, v0, wtmp, fvo_a, vooo_bk, vvop_bc, vvop_cb, t1T_a, t2T_ak, t2T_ac, t2T_cb,
nocc, nvir, b, c, a, b0, c0, a0, idx3, (ka==ki));
CCzget_wv(w0, v0, wtmp, fvo_b, vooo_ci, vvop_ca, vvop_ac, t1T_b, t2T_bi, t2T_ba, t2T_ac,
nocc, nvir, c, a, b, c0, a0, b0, idx4, (kb==kj));
CCzget_wv(w0, v0, wtmp, fvo_a, vooo_cj, vvop_cb, vvop_bc, t1T_a, t2T_aj, t2T_ab, t2T_bc,
nocc, nvir, c, b, a, c0, b0, a0, idx5, (ka==ki));
offset = (((a-a0)*db + b-b0)*dc + c-c0)*nooo;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
//div = 1. / (mo_energy[i+ki*nmo] + mo_energy[j+kj*nmo] + mo_energy[k+kk*nmo] - abc);
t3Tw[offset + n] = w0[n];
t3Tv[offset + n] = v0[n];
} } }
}
void CCsd_zcontract_t3T(double complex *t3Tw, double complex *t3Tv, double *mo_energy,
double complex *t1T, double complex *fvo, int nocc, int nvir, int nkpts,
int *mo_offset, int *slices, double complex **data_ptrs)
{
const int a0 = slices[0];
const int a1 = slices[1];
const int b0 = slices[2];
const int b1 = slices[3];
const int c0 = slices[4];
const int c1 = slices[5];
int da = a1 - a0;
int db = b1 - b0;
int dc = c1 - c0;
CacheJob *jobs = malloc(sizeof(CacheJob) * da*db*dc);
size_t njobs = _CCsd_t_gen_jobs_full(jobs, nocc, nvir, slices);
int *permute_idx = malloc(sizeof(int) * nocc*nocc*nocc * 6);
_make_permute_indices(permute_idx, nocc);
#pragma omp parallel default(none) \
shared(njobs, nocc, nvir, nkpts, t3Tw, t3Tv, mo_offset, mo_energy, t1T, fvo, jobs, slices, \
data_ptrs, permute_idx)
{
int a, b, c;
size_t k;
complex double *cache1 = malloc(sizeof(double complex) * (nocc*nocc*nocc*3+2));
complex double *t1Thalf = malloc(sizeof(double complex) * nkpts*nvir*nocc*2);
complex double *fvohalf = t1Thalf + nkpts*nvir*nocc;
for (k = 0; k < nkpts*nvir*nocc; k++) {
t1Thalf[k] = t1T[k] * .5;
fvohalf[k] = fvo[k] * .5;
}
#pragma omp for schedule (dynamic, 4)
for (k = 0; k < njobs; k++) {
a = jobs[k].a;
b = jobs[k].b;
c = jobs[k].c;
zcontract6_t3T(nocc, nvir, a, b, c, mo_offset, t3Tw, t3Tv, mo_energy, t1Thalf,
fvohalf, slices, data_ptrs, cache1,
permute_idx);
}
free(t1Thalf);
free(cache1);
}
free(jobs);
free(permute_idx);
}
int Sc(int i, int a, int nocc) { return nocc * ( a + 1 ) - ( i + 1 ); }
int Dc(int i, int j, int a, int b, int nocc2)
{ return (int) (nocc2 * ( b*(b-1)/2 + a + 1 ) - ( j*(j-1)/2 + i + 1 )); }
int Tc(int i, int j, int k, int a, int b, int c, int nocc3)
{ return (int) (nocc3 * ( c*(c-1)*(c-2)/6 + b*(b-1)/2 + a + 1 ) - ( k*(k-1)*(k-2)/6 + j*(j-1)/2 + i + 1 )); }
int DSc(int i, int j, int k, int a, int b, int c, int nocc, int nvir, int nocc2)
{ return Dc(i, j, a, b, nocc2) * nocc * nvir + Sc(k, c, nocc); }
int S(int i, int a, int nvir) { return i*nvir+a; }
int D(int i, int j, int a, int b, int nocc, int nvir)
{ return ((i*nocc+j)*nvir+a)*nvir+b; }
int De(int i, int a, int j, int b, int nocc, int nvir)
{ return ((i*nvir+a)*nocc+j)*nvir+b; }
int Dtmp1(int i, int j, int k, int a, int nocc, int nvir)
{ return ((i*nocc+j)*nocc+k)*nvir+a; }
//{ return ((a*nocc+k)*nocc+j)*nocc+i; }
int Dtmp2(int i, int a, int b, int c, int nvir)
{ return ((i*nvir+a)*nvir+b)*nvir+c; }
size_t T(int i, int j, int k, int a, int b, int c, int nocc, int nvir)
{ return ((((i*nocc+(size_t)(j))*nocc+k)*nvir+a)*nvir+b)*nvir+c; }
int Q(int i, int j, int k, int l, int a, int b, int c, int d, int nocc, int nvir)
{ return ((((((i*nocc+j)*nocc+k)*nocc+l)*nvir+a)*nvir+b)*nvir+c)*nvir+d; }
double t1xt1aa(int i, int j, int a, int b, int nocc, int nvir, double *t1)
{
double t1xt1 = 0.0;
t1xt1 += t1[S(i, a, nvir)] * t1[S(j, b, nvir)];
t1xt1 -= t1[S(i, b, nvir)] * t1[S(j, a, nvir)];
return t1xt1;
}
double t1xt1ab(int i, int j, int a, int b, int nocc, int nvir, double *t1)
{
double t1xt1;
t1xt1 = t1[S(i, a, nvir)] * t1[S(j, b, nvir)];
return t1xt1;
}
double t1xt1ab_u(int i, int j, int a, int b, int nvira, int nvirb, double *t1a, double *t1b)
{
double t1xt1;
t1xt1 = t1a[S(i, a, nvira)] * t1b[S(j, b, nvirb)];
return t1xt1;
}
double t1xt2aaa(int i, int j, int k, int a, int b, int c, int nocc, int nvir, double *t1, double *t2aa)
{
double t1xt2 = 0.0;
t1xt2 += t1[S(i, a, nvir)] * t2aa[D(j, k, b, c, nocc, nvir)];
t1xt2 -= t1[S(i, b, nvir)] * t2aa[D(j, k, a, c, nocc, nvir)];
t1xt2 += t1[S(i, c, nvir)] * t2aa[D(j, k, a, b, nocc, nvir)];
t1xt2 -= t1[S(j, a, nvir)] * t2aa[D(i, k, b, c, nocc, nvir)];
t1xt2 += t1[S(j, b, nvir)] * t2aa[D(i, k, a, c, nocc, nvir)];
t1xt2 -= t1[S(j, c, nvir)] * t2aa[D(i, k, a, b, nocc, nvir)];
t1xt2 += t1[S(k, a, nvir)] * t2aa[D(i, j, b, c, nocc, nvir)];
t1xt2 -= t1[S(k, b, nvir)] * t2aa[D(i, j, a, c, nocc, nvir)];
t1xt2 += t1[S(k, c, nvir)] * t2aa[D(i, j, a, b, nocc, nvir)];
return t1xt2;
}
double t1xt1xt1aaa(int i, int j, int k, int a, int b, int c, int nocc, int nvir, double *t1)
{
double t1xt1xt1 = 0.0;
t1xt1xt1 += t1[S(i, a, nvir)] * t1[S(j, b, nvir)] * t1[S(k, c, nvir)];
t1xt1xt1 -= t1[S(i, a, nvir)] * t1[S(j, c, nvir)] * t1[S(k, b, nvir)];
t1xt1xt1 -= t1[S(i, b, nvir)] * t1[S(j, a, nvir)] * t1[S(k, c, nvir)];
t1xt1xt1 += t1[S(i, b, nvir)] * t1[S(j, c, nvir)] * t1[S(k, a, nvir)];
t1xt1xt1 += t1[S(i, c, nvir)] * t1[S(j, a, nvir)] * t1[S(k, b, nvir)];
t1xt1xt1 -= t1[S(i, c, nvir)] * t1[S(j, b, nvir)] * t1[S(k, a, nvir)];
return t1xt1xt1;
}
double t1xt2aab(int i, int j, int k, int a, int b, int c, int nocc, int nvir, double *t1, double *t2aa, double *t2ab)
{
double t1xt2 = 0.0;
t1xt2 += t1[S(i, a, nvir)] * t2ab[D(j, k, b, c, nocc, nvir)];
t1xt2 -= t1[S(i, b, nvir)] * t2ab[D(j, k, a, c, nocc, nvir)];
t1xt2 -= t1[S(j, a, nvir)] * t2ab[D(i, k, b, c, nocc, nvir)];
t1xt2 += t1[S(j, b, nvir)] * t2ab[D(i, k, a, c, nocc, nvir)];
t1xt2 += t1[S(k, c, nvir)] * t2aa[D(i, j, a, b, nocc, nvir)];
return t1xt2;
}
double t1xt1xt1aab(int i, int j, int k, int a, int b, int c, int nocc, int nvir, double *t1)
{
double t1xt1xt1 = 0.0;
t1xt1xt1 += t1[S(i, a, nvir)] * t1[S(j, b, nvir)] * t1[S(k, c, nvir)];
t1xt1xt1 -= t1[S(i, b, nvir)] * t1[S(j, a, nvir)] * t1[S(k, c, nvir)];
return t1xt1xt1;
}
double t1xt3aaab(int i, int j, int k, int l, int a, int b, int c, int d, int nocc, int nvir, double *t1, double *t3aaa, double *t3aab)
{
double t1xt3 = 0.0;
t1xt3 += t1[S(i, a, nvir)] * t3aab[T(j, k, l, b, c, d, nocc, nvir)];
t1xt3 -= t1[S(i, b, nvir)] * t3aab[T(j, k, l, a, c, d, nocc, nvir)];
t1xt3 += t1[S(i, c, nvir)] * t3aab[T(j, k, l, a, b, d, nocc, nvir)];
t1xt3 -= t1[S(j, a, nvir)] * t3aab[T(i, k, l, b, c, d, nocc, nvir)];
t1xt3 += t1[S(j, b, nvir)] * t3aab[T(i, k, l, a, c, d, nocc, nvir)];
t1xt3 -= t1[S(j, c, nvir)] * t3aab[T(i, k, l, a, b, d, nocc, nvir)];
t1xt3 += t1[S(k, a, nvir)] * t3aab[T(i, j, l, b, c, d, nocc, nvir)];
t1xt3 -= t1[S(k, b, nvir)] * t3aab[T(i, j, l, a, c, d, nocc, nvir)];
t1xt3 += t1[S(k, c, nvir)] * t3aab[T(i, j, l, a, b, d, nocc, nvir)];
t1xt3 += t1[S(l, d, nvir)] * t3aaa[T(i, j, k, a, b, c, nocc, nvir)];
return t1xt3;
}
double c3tot3aab(int i, int j, int k, int a, int b, int c, int nocc, int nocc2, int nvir, double *t1, double *t2aa, double *t2ab, double *c3aab, double c0)
{
double t3 = 0.0;
double parity = 1;
if ( i > j ) {
printf("parity change!");
}
if ( a > b ) {
printf("parity change!");
}
// interm norm of c3
t3 = c3aab[DSc(i, j, k, a, b, c, nocc, nvir, nocc2)] / c0;
t3-= t1xt2aab(i, j, k, a, b, c, nocc, nvir, t1, t2aa, t2ab);
t3-= t1xt1xt1aab(i, j, k, a, b, c, nocc, nvir, t1);
return t3 * parity;
}
double c3tot3aaa(int i, int j, int k, int a, int b, int c, int nocc, int nocc3, int nvir, double *t1, double *t2aa, double *c3aaa, double c0)
{
double t3 = 0.0;
double parity = 1;
if ( i > j || j > k || i > k ) {
printf("parity change!: c3tott3aaa");
}
if ( a > b || b > c || a > c ) {
printf("parity change!: c3tott3aaa");
}
// interm norm of c3
t3 = c3aaa[Tc(i, j, k, a, b, c, nocc3)] / c0;
t3-= t1xt2aaa (i, j, k, a, b, c, nocc, nvir, t1, t2aa);
t3-= t1xt1xt1aaa (i, j, k, a, b, c, nocc, nvir, t1);
return t3;
}
double t1xc3aaab(int i, int j, int k, int l, int a, int b, int c, int d, int nocc, int nocc2, int nocc3, int nvir, double *t1, double *t2aa, double *t2ab, double *c3aaa, double *c3aab, double c0)
{
double t1xt3 = 0.0;
t1xt3 += t1[S(i, a, nvir)] * c3tot3aab(j, k, l, b, c, d, nocc, nocc2, nvir, t1, t2aa, t2ab, c3aab, c0);
t1xt3 -= t1[S(i, b, nvir)] * c3tot3aab(j, k, l, a, c, d, nocc, nocc2, nvir, t1, t2aa, t2ab, c3aab, c0);
t1xt3 += t1[S(i, c, nvir)] * c3tot3aab(j, k, l, a, b, d, nocc, nocc2, nvir, t1, t2aa, t2ab, c3aab, c0);
t1xt3 -= t1[S(j, a, nvir)] * c3tot3aab(i, k, l, b, c, d, nocc, nocc2, nvir, t1, t2aa, t2ab, c3aab, c0);
t1xt3 += t1[S(j, b, nvir)] * c3tot3aab(i, k, l, a, c, d, nocc, nocc2, nvir, t1, t2aa, t2ab, c3aab, c0);
t1xt3 -= t1[S(j, c, nvir)] * c3tot3aab(i, k, l, a, b, d, nocc, nocc2, nvir, t1, t2aa, t2ab, c3aab, c0);
t1xt3 += t1[S(k, a, nvir)] * c3tot3aab(i, j, l, b, c, d, nocc, nocc2, nvir, t1, t2aa, t2ab, c3aab, c0);
t1xt3 -= t1[S(k, b, nvir)] * c3tot3aab(i, j, l, a, c, d, nocc, nocc2, nvir, t1, t2aa, t2ab, c3aab, c0);
t1xt3 += t1[S(k, c, nvir)] * c3tot3aab(i, j, l, a, b, d, nocc, nocc2, nvir, t1, t2aa, t2ab, c3aab, c0);
t1xt3 += t1[S(l, d, nvir)] * c3tot3aaa(i, j, k, a, b, c, nocc, nocc3, nvir, t1, t2aa, c3aaa, c0);
return t1xt3;
}
double t1xt3aabb(int i, int j, int k, int l, int a, int b, int c, int d, int nocc, int nvir, double *t1, double *t3aab)
{
double t1xt3 = 0.0;
t1xt3 += t1[S(i, a, nvir)] * t3aab[T(k, l, j, c, d, b, nocc, nvir)];
t1xt3 -= t1[S(i, b, nvir)] * t3aab[T(k, l, j, c, d, a, nocc, nvir)];
t1xt3 -= t1[S(j, a, nvir)] * t3aab[T(k, l, i, c, d, b, nocc, nvir)];
t1xt3 += t1[S(j, b, nvir)] * t3aab[T(k, l, i, c, d, a, nocc, nvir)];
t1xt3 += t1[S(k, c, nvir)] * t3aab[T(i, j, l, a, b, d, nocc, nvir)];
t1xt3 -= t1[S(k, d, nvir)] * t3aab[T(i, j, l, a, b, c, nocc, nvir)];
t1xt3 -= t1[S(l, c, nvir)] * t3aab[T(i, j, k, a, b, d, nocc, nvir)];
t1xt3 += t1[S(l, d, nvir)] * t3aab[T(i, j, k, a, b, c, nocc, nvir)];
return t1xt3;
}
double t1xc3aabb(int i, int j, int k, int l, int a, int b, int c, int d, int nocc, int nocc2, int nvir, double *t1, double *t2aa, double *t2ab, double *c3aab, double c0)
{
double t1xt3 = 0.0;
t1xt3 += t1[S(i, a, nvir)] * c3tot3aab(k, l, j, c, d, b, nocc, nocc2, nvir, t1, t2aa, t2ab, c3aab, c0);
t1xt3 -= t1[S(i, b, nvir)] * c3tot3aab(k, l, j, c, d, a, nocc, nocc2, nvir, t1, t2aa, t2ab, c3aab, c0);
t1xt3 -= t1[S(j, a, nvir)] * c3tot3aab(k, l, i, c, d, b, nocc, nocc2, nvir, t1, t2aa, t2ab, c3aab, c0);
t1xt3 += t1[S(j, b, nvir)] * c3tot3aab(k, l, i, c, d, a, nocc, nocc2, nvir, t1, t2aa, t2ab, c3aab, c0);
t1xt3 += t1[S(k, c, nvir)] * c3tot3aab(i, j, l, a, b, d, nocc, nocc2, nvir, t1, t2aa, t2ab, c3aab, c0);
t1xt3 -= t1[S(k, d, nvir)] * c3tot3aab(i, j, l, a, b, c, nocc, nocc2, nvir, t1, t2aa, t2ab, c3aab, c0);
t1xt3 -= t1[S(l, c, nvir)] * c3tot3aab(i, j, k, a, b, d, nocc, nocc2, nvir, t1, t2aa, t2ab, c3aab, c0);
t1xt3 += t1[S(l, d, nvir)] * c3tot3aab(i, j, k, a, b, c, nocc, nocc2, nvir, t1, t2aa, t2ab, c3aab, c0);
return t1xt3;
}
double t2xt2aaab(int i, int j, int k, int l, int a, int b, int c, int d, int nocc, int nvir, double *t2aa, double *t2ab)
{
double t2xt2 = 0.0;
t2xt2 += t2aa[D(i, j, a, b, nocc, nvir)] * t2ab[D(k, l, c, d, nocc, nvir)];
t2xt2 -= t2aa[D(i, j, a, c, nocc, nvir)] * t2ab[D(k, l, b, d, nocc, nvir)];
t2xt2 += t2aa[D(i, j, b, c, nocc, nvir)] * t2ab[D(k, l, a, d, nocc, nvir)];
t2xt2 -= t2aa[D(i, k, a, b, nocc, nvir)] * t2ab[D(j, l, c, d, nocc, nvir)];
t2xt2 += t2aa[D(i, k, a, c, nocc, nvir)] * t2ab[D(j, l, b, d, nocc, nvir)];
t2xt2 -= t2aa[D(i, k, b, c, nocc, nvir)] * t2ab[D(j, l, a, d, nocc, nvir)];
t2xt2 += t2ab[D(i, l, a, d, nocc, nvir)] * t2aa[D(j, k, b, c, nocc, nvir)];
t2xt2 -= t2ab[D(i, l, b, d, nocc, nvir)] * t2aa[D(j, k, a, c, nocc, nvir)];
t2xt2 += t2ab[D(i, l, c, d, nocc, nvir)] * t2aa[D(j, k, a, b, nocc, nvir)];
return t2xt2;
}
double t2xt2aabb(int i, int j, int k, int l, int a, int b, int c, int d, int nocc, int nvir, double *t2aa, double *t2ab)
{
double t2xt2 = 0.0;
t2xt2 += t2aa[D(i, j, a, b, nocc, nvir)] * t2aa[D(k, l, c, d, nocc, nvir)];
t2xt2 += t2ab[D(i, k, a, c, nocc, nvir)] * t2ab[D(j, l, b, d, nocc, nvir)];
t2xt2 -= t2ab[D(i, k, a, d, nocc, nvir)] * t2ab[D(j, l, b, c, nocc, nvir)];
t2xt2 -= t2ab[D(i, k, b, c, nocc, nvir)] * t2ab[D(j, l, a, d, nocc, nvir)];
t2xt2 += t2ab[D(i, k, b, d, nocc, nvir)] * t2ab[D(j, l, a, c, nocc, nvir)];
t2xt2 -= t2ab[D(i, l, a, c, nocc, nvir)] * t2ab[D(j, k, b, d, nocc, nvir)];
t2xt2 += t2ab[D(i, l, a, d, nocc, nvir)] * t2ab[D(j, k, b, c, nocc, nvir)];
t2xt2 += t2ab[D(i, l, b, c, nocc, nvir)] * t2ab[D(j, k, a, d, nocc, nvir)];
t2xt2 -= t2ab[D(i, l, b, d, nocc, nvir)] * t2ab[D(j, k, a, c, nocc, nvir)];
return t2xt2;
}
double t1xt1xt2aaab(int i, int j, int k, int l, int a, int b, int c, int d, int nocc, int nvir, double *t1, double *t2aa, double *t2ab)
{
double t1xt1xt2 = 0.0;
t1xt1xt2 += t1[S(i,a,nvir)] * t1[S(j,b,nvir)] * t2ab[D(k,l,c,d,nocc,nvir)];
t1xt1xt2 -= t1[S(i,a,nvir)] * t1[S(j,c,nvir)] * t2ab[D(k,l,b,d,nocc,nvir)];
t1xt1xt2 += t1[S(i,b,nvir)] * t1[S(j,c,nvir)] * t2ab[D(k,l,a,d,nocc,nvir)];
t1xt1xt2 -= t1[S(i,a,nvir)] * t1[S(k,b,nvir)] * t2ab[D(j,l,c,d,nocc,nvir)];
t1xt1xt2 += t1[S(i,a,nvir)] * t1[S(k,c,nvir)] * t2ab[D(j,l,b,d,nocc,nvir)];
t1xt1xt2 -= t1[S(i,b,nvir)] * t1[S(k,c,nvir)] * t2ab[D(j,l,a,d,nocc,nvir)];
t1xt1xt2 += t1[S(i,a,nvir)] * t1[S(l,d,nvir)] * t2aa[D(j,k,b,c,nocc,nvir)];
t1xt1xt2 -= t1[S(i,b,nvir)] * t1[S(l,d,nvir)] * t2aa[D(j,k,a,c,nocc,nvir)];
t1xt1xt2 += t1[S(i,c,nvir)] * t1[S(l,d,nvir)] * t2aa[D(j,k,a,b,nocc,nvir)];
t1xt1xt2 += t1[S(j,a,nvir)] * t1[S(k,b,nvir)] * t2ab[D(i,l,c,d,nocc,nvir)];
t1xt1xt2 -= t1[S(j,a,nvir)] * t1[S(k,c,nvir)] * t2ab[D(i,l,b,d,nocc,nvir)];
t1xt1xt2 += t1[S(j,b,nvir)] * t1[S(k,c,nvir)] * t2ab[D(i,l,a,d,nocc,nvir)];
t1xt1xt2 -= t1[S(j,a,nvir)] * t1[S(l,d,nvir)] * t2aa[D(i,k,b,c,nocc,nvir)];
t1xt1xt2 += t1[S(j,b,nvir)] * t1[S(l,d,nvir)] * t2aa[D(i,k,a,c,nocc,nvir)];
t1xt1xt2 -= t1[S(j,c,nvir)] * t1[S(l,d,nvir)] * t2aa[D(i,k,a,b,nocc,nvir)];
t1xt1xt2 += t1[S(k,a,nvir)] * t1[S(l,d,nvir)] * t2aa[D(i,j,b,c,nocc,nvir)];
t1xt1xt2 -= t1[S(k,b,nvir)] * t1[S(l,d,nvir)] * t2aa[D(i,j,a,c,nocc,nvir)];
t1xt1xt2 += t1[S(k,c,nvir)] * t1[S(l,d,nvir)] * t2aa[D(i,j,a,b,nocc,nvir)];
t1xt1xt2 -= t1[S(j,a,nvir)] * t1[S(i,b,nvir)] * t2ab[D(k,l,c,d,nocc,nvir)];
t1xt1xt2 += t1[S(j,a,nvir)] * t1[S(i,c,nvir)] * t2ab[D(k,l,b,d,nocc,nvir)];
t1xt1xt2 -= t1[S(j,b,nvir)] * t1[S(i,c,nvir)] * t2ab[D(k,l,a,d,nocc,nvir)];
t1xt1xt2 += t1[S(k,a,nvir)] * t1[S(i,b,nvir)] * t2ab[D(j,l,c,d,nocc,nvir)];
t1xt1xt2 -= t1[S(k,a,nvir)] * t1[S(i,c,nvir)] * t2ab[D(j,l,b,d,nocc,nvir)];
t1xt1xt2 += t1[S(k,b,nvir)] * t1[S(i,c,nvir)] * t2ab[D(j,l,a,d,nocc,nvir)];
t1xt1xt2 -= t1[S(k,a,nvir)] * t1[S(j,b,nvir)] * t2ab[D(i,l,c,d,nocc,nvir)];
t1xt1xt2 += t1[S(k,a,nvir)] * t1[S(j,c,nvir)] * t2ab[D(i,l,b,d,nocc,nvir)];
t1xt1xt2 -= t1[S(k,b,nvir)] * t1[S(j,c,nvir)] * t2ab[D(i,l,a,d,nocc,nvir)];
return t1xt1xt2;
}
double t1xt1xt2aabb(int i, int j, int k, int l, int a, int b, int c, int d, int nocc, int nvir, double *t1, double *t2aa, double *t2ab)
{
double t1xt1xt2 = 0.0;
t1xt1xt2 += t1[S(i,a,nvir)] * t1[S(j,b,nvir)] * t2aa[D(k,l,c,d,nocc,nvir)];
t1xt1xt2 += t1[S(k,c,nvir)] * t1[S(l,d,nvir)] * t2aa[D(i,j,a,b,nocc,nvir)];
t1xt1xt2 -= t1[S(j,a,nvir)] * t1[S(i,b,nvir)] * t2aa[D(k,l,c,d,nocc,nvir)];
t1xt1xt2 -= t1[S(l,c,nvir)] * t1[S(k,d,nvir)] * t2aa[D(i,j,a,b,nocc,nvir)];
t1xt1xt2 += t1[S(i,a,nvir)] * t1[S(k,c,nvir)] * t2ab[D(j,l,b,d,nocc,nvir)];
t1xt1xt2 -= t1[S(i,a,nvir)] * t1[S(k,d,nvir)] * t2ab[D(j,l,b,c,nocc,nvir)];
t1xt1xt2 -= t1[S(i,b,nvir)] * t1[S(k,c,nvir)] * t2ab[D(j,l,a,d,nocc,nvir)];
t1xt1xt2 += t1[S(i,b,nvir)] * t1[S(k,d,nvir)] * t2ab[D(j,l,a,c,nocc,nvir)];
t1xt1xt2 -= t1[S(i,a,nvir)] * t1[S(l,c,nvir)] * t2ab[D(j,k,b,d,nocc,nvir)];
t1xt1xt2 += t1[S(i,a,nvir)] * t1[S(l,d,nvir)] * t2ab[D(j,k,b,c,nocc,nvir)];
t1xt1xt2 += t1[S(i,b,nvir)] * t1[S(l,c,nvir)] * t2ab[D(j,k,a,d,nocc,nvir)];
t1xt1xt2 -= t1[S(i,b,nvir)] * t1[S(l,d,nvir)] * t2ab[D(j,k,a,c,nocc,nvir)];
t1xt1xt2 -= t1[S(j,a,nvir)] * t1[S(k,c,nvir)] * t2ab[D(i,l,b,d,nocc,nvir)];
t1xt1xt2 += t1[S(j,a,nvir)] * t1[S(k,d,nvir)] * t2ab[D(i,l,b,c,nocc,nvir)];
t1xt1xt2 += t1[S(j,b,nvir)] * t1[S(k,c,nvir)] * t2ab[D(i,l,a,d,nocc,nvir)];
t1xt1xt2 -= t1[S(j,b,nvir)] * t1[S(k,d,nvir)] * t2ab[D(i,l,a,c,nocc,nvir)];
t1xt1xt2 += t1[S(j,a,nvir)] * t1[S(l,c,nvir)] * t2ab[D(i,k,b,d,nocc,nvir)];
t1xt1xt2 -= t1[S(j,a,nvir)] * t1[S(l,d,nvir)] * t2ab[D(i,k,b,c,nocc,nvir)];
t1xt1xt2 -= t1[S(j,b,nvir)] * t1[S(l,c,nvir)] * t2ab[D(i,k,a,d,nocc,nvir)];
t1xt1xt2 += t1[S(j,b,nvir)] * t1[S(l,d,nvir)] * t2ab[D(i,k,a,c,nocc,nvir)];
return t1xt1xt2;
}
double t1xt1xt1xt1aaab(int i, int j, int k, int l, int a, int b, int c, int d, int nocc, int nvir, double *t1)
{
double t1xt1xt1xt1 = 0.0;
t1xt1xt1xt1 += t1[S(i, a, nvir)] * t1[S(j, b, nvir)] * t1[S(k, c, nvir)] * t1[S(l, d, nvir)];
t1xt1xt1xt1 -= t1[S(i, a, nvir)] * t1[S(j, c, nvir)] * t1[S(k, b, nvir)] * t1[S(l, d, nvir)];
t1xt1xt1xt1 -= t1[S(i, b, nvir)] * t1[S(j, a, nvir)] * t1[S(k, c, nvir)] * t1[S(l, d, nvir)];
t1xt1xt1xt1 += t1[S(i, b, nvir)] * t1[S(j, c, nvir)] * t1[S(k, a, nvir)] * t1[S(l, d, nvir)];
t1xt1xt1xt1 += t1[S(i, c, nvir)] * t1[S(j, a, nvir)] * t1[S(k, b, nvir)] * t1[S(l, d, nvir)];
t1xt1xt1xt1 -= t1[S(i, c, nvir)] * t1[S(j, b, nvir)] * t1[S(k, a, nvir)] * t1[S(l, d, nvir)];
return t1xt1xt1xt1;
}
double t1xt1xt1xt1aabb(int i, int j, int k, int l, int a, int b, int c, int d, int nocc, int nvir, double *t1)
{
double t1xt1xt1xt1 = 0.0;
t1xt1xt1xt1 += t1[S(i, a, nvir)] * t1[S(j, b, nvir)] * t1[S(k, c, nvir)] * t1[S(l, d, nvir)];
t1xt1xt1xt1 -= t1[S(i, a, nvir)] * t1[S(j, b, nvir)] * t1[S(k, d, nvir)] * t1[S(l, c, nvir)];
return t1xt1xt1xt1;
}
void c1_to_t1(double *t1, double *c1, int nocc, int nvir)
{
int i, a, ia_c, ia_t;
ia_c = -1;
for (a = 0; a < nvir; a++) {
for (i = nocc-1; i > -1; i--) {
ia_c += 1;
ia_t = i * nvir + a;
t1[ia_t] = c1[ia_c];
}
}
}
void c2_to_t2(double *t2aa, double *t2ab, double *c2aa, double *c2ab, double *t1, int nocc, int nvir)
{
double numzero = 1e-7;
int i, j, a, b, ijab_c, ijab_t1, ijab_t2, ijab_t3, ijab_t4;
int ia, jb, iajb_c, ijab_t;
double tmp;
// t2aa
ijab_c = -1;
for (b = 1; b < nvir; b++) {
for (a = 0; a < b; a++) {
for (j = nocc-1; j > 0; j--) {
for (i = j-1; i > -1; i--) {
ijab_c += 1;
ijab_t1 = ((i*nocc+j)*nvir+a)*nvir+b;
ijab_t2 = ((i*nocc+j)*nvir+b)*nvir+a;
ijab_t3 = ((j*nocc+i)*nvir+a)*nvir+b;
ijab_t4 = ((j*nocc+i)*nvir+b)*nvir+a;
tmp = c2aa[ijab_c];
if(fabs(tmp) > numzero)
{
tmp -= t1xt1aa (i, j, a, b, nocc, nvir, t1);
t2aa[ijab_t1] = tmp;
t2aa[ijab_t2] = -tmp;
t2aa[ijab_t3] = -tmp;
t2aa[ijab_t4] = tmp;
}
}
}
}
}
ia = -1;
for (a = 0; a < nvir; a++) {
for (i = nocc-1; i > -1; i--) {
ia += 1;
jb =-1;
for (b = 0; b < nvir; b++) {
for (j = nocc-1; j > -1; j--) {
jb += 1;
iajb_c = ia * nocc*nvir + jb;
ijab_t = ((i*nocc+j)*nvir+a)*nvir+b;
tmp = c2ab[iajb_c];
if(fabs(tmp) > numzero)
{
tmp -= t1xt1ab (i, j, a, b, nocc, nvir, t1);
t2ab[ijab_t] = tmp;
}
}
}
}
}
}
void c3_to_t3(double *t3aaa, double *t3aab, double *c3aaa, double *c3aab, double *t1, double *t2aa, double *t2ab, int nocc, int nvir, double numzero)
{
int i, j, k, a, b, c;
size_t ijkabc_t11, ijkabc_t21, ijkabc_t31, ijkabc_t41, ijkabc_t51, ijkabc_t61;
size_t ijkabc_t12, ijkabc_t22, ijkabc_t32, ijkabc_t42, ijkabc_t52, ijkabc_t62;
size_t ijkabc_t13, ijkabc_t23, ijkabc_t33, ijkabc_t43, ijkabc_t53, ijkabc_t63;
size_t ijkabc_t14, ijkabc_t24, ijkabc_t34, ijkabc_t44, ijkabc_t54, ijkabc_t64;
size_t ijkabc_t15, ijkabc_t25, ijkabc_t35, ijkabc_t45, ijkabc_t55, ijkabc_t65;
size_t ijkabc_t16, ijkabc_t26, ijkabc_t36, ijkabc_t46, ijkabc_t56, ijkabc_t66;
size_t ijab, kc, ijabkc_c, ijkabc_c;
double tmp, tmp2;
// t3aaa
ijkabc_c = -1;
for (c = 2; c < nvir; c++) {
for (b = 1; b < c; b++) {
for (a = 0; a < b; a++) {
for (k = nocc-1; k > 1; k--) {
for (j = k-1; j > 0; j--) {
for (i = j-1; i > -1; i--) {
ijkabc_c += 1;
// //lsh dbg
// printf("c3aaa, %d \n", ijkabc_c);
tmp = c3aaa[ijkabc_c];
// if(fabs(tmp)-fabs(tmp2) > numzero)
if(fabs(tmp) > numzero)
{
tmp2 = t1xt2aaa (i, j, k, a, b, c, nocc, nvir, t1, t2aa);
tmp2+= t1xt1xt1aaa (i, j, k, a, b, c, nocc, nvir, t1);
tmp -= tmp2;
ijkabc_t11 = T(i, j, k, a, b, c, nocc, nvir);
ijkabc_t12 = T(i, j, k, b, c, a, nocc, nvir);
ijkabc_t13 = T(i, j, k, c, a, b, nocc, nvir);
ijkabc_t14 = T(i, j, k, a, c, b, nocc, nvir);
ijkabc_t15 = T(i, j, k, b, a, c, nocc, nvir);
ijkabc_t16 = T(i, j, k, c, b, a, nocc, nvir);
t3aaa[ijkabc_t11] = tmp;
t3aaa[ijkabc_t12] = tmp;
t3aaa[ijkabc_t13] = tmp;
t3aaa[ijkabc_t14] = -tmp;
t3aaa[ijkabc_t15] = -tmp;
t3aaa[ijkabc_t16] = -tmp;
ijkabc_t21 = T(j, k, i, a, b, c, nocc, nvir);
ijkabc_t22 = T(j, k, i, b, c, a, nocc, nvir);
ijkabc_t23 = T(j, k, i, c, a, b, nocc, nvir);
ijkabc_t24 = T(j, k, i, a, c, b, nocc, nvir);
ijkabc_t25 = T(j, k, i, b, a, c, nocc, nvir);
ijkabc_t26 = T(j, k, i, c, b, a, nocc, nvir);
t3aaa[ijkabc_t21] = tmp;
t3aaa[ijkabc_t22] = tmp;
t3aaa[ijkabc_t23] = tmp;
t3aaa[ijkabc_t24] = -tmp;
t3aaa[ijkabc_t25] = -tmp;
t3aaa[ijkabc_t26] = -tmp;
ijkabc_t31 = T(k, i, j, a, b, c, nocc, nvir);
ijkabc_t32 = T(k, i, j, b, c, a, nocc, nvir);
ijkabc_t33 = T(k, i, j, c, a, b, nocc, nvir);
ijkabc_t34 = T(k, i, j, a, c, b, nocc, nvir);
ijkabc_t35 = T(k, i, j, b, a, c, nocc, nvir);
ijkabc_t36 = T(k, i, j, c, b, a, nocc, nvir);
t3aaa[ijkabc_t31] = tmp;
t3aaa[ijkabc_t32] = tmp;
t3aaa[ijkabc_t33] = tmp;
t3aaa[ijkabc_t34] = -tmp;
t3aaa[ijkabc_t35] = -tmp;
t3aaa[ijkabc_t36] = -tmp;
ijkabc_t41 = T(i, k, j, a, b, c, nocc, nvir);
ijkabc_t42 = T(i, k, j, b, c, a, nocc, nvir);
ijkabc_t43 = T(i, k, j, c, a, b, nocc, nvir);
ijkabc_t44 = T(i, k, j, a, c, b, nocc, nvir);
ijkabc_t45 = T(i, k, j, b, a, c, nocc, nvir);
ijkabc_t46 = T(i, k, j, c, b, a, nocc, nvir);
t3aaa[ijkabc_t41] = -tmp;
t3aaa[ijkabc_t42] = -tmp;
t3aaa[ijkabc_t43] = -tmp;
t3aaa[ijkabc_t44] = tmp;
t3aaa[ijkabc_t45] = tmp;
t3aaa[ijkabc_t46] = tmp;
ijkabc_t51 = T(j, i, k, a, b, c, nocc, nvir);
ijkabc_t52 = T(j, i, k, b, c, a, nocc, nvir);
ijkabc_t53 = T(j, i, k, c, a, b, nocc, nvir);
ijkabc_t54 = T(j, i, k, a, c, b, nocc, nvir);
ijkabc_t55 = T(j, i, k, b, a, c, nocc, nvir);
ijkabc_t56 = T(j, i, k, c, b, a, nocc, nvir);
t3aaa[ijkabc_t51] = -tmp;
t3aaa[ijkabc_t52] = -tmp;
t3aaa[ijkabc_t53] = -tmp;
t3aaa[ijkabc_t54] = tmp;
t3aaa[ijkabc_t55] = tmp;
t3aaa[ijkabc_t56] = tmp;
ijkabc_t61 = T(k, j, i, a, b, c, nocc, nvir);
ijkabc_t62 = T(k, j, i, b, c, a, nocc, nvir);
ijkabc_t63 = T(k, j, i, c, a, b, nocc, nvir);
ijkabc_t64 = T(k, j, i, a, c, b, nocc, nvir);
ijkabc_t65 = T(k, j, i, b, a, c, nocc, nvir);
ijkabc_t66 = T(k, j, i, c, b, a, nocc, nvir);
t3aaa[ijkabc_t61] = -tmp;
t3aaa[ijkabc_t62] = -tmp;
t3aaa[ijkabc_t63] = -tmp;
t3aaa[ijkabc_t64] = tmp;
t3aaa[ijkabc_t65] = tmp;
t3aaa[ijkabc_t66] = tmp;
}
}
}
}
}
}
}
ijab = -1;
for (b = 1; b < nvir; b++) {
for (a = 0; a < b; a++) {
for (j = nocc-1; j > 0; j--) {
for (i = j-1; i > -1; i--) {
ijab += 1;
kc =-1;
for (c = 0; c < nvir; c++) {
for (k = nocc-1; k > -1; k--) {
kc += 1;
ijabkc_c = ijab * nocc*nvir + kc;
// //lsh dbg
// printf("c3aab, %d \n", ijabkc_c);
tmp = c3aab[ijabkc_c];
// if(fabs(tmp)-fabs(tmp2) > numzero)
if(fabs(tmp) > numzero)
{
tmp2 = t1xt2aab(i, j, k, a, b, c, nocc, nvir, t1, t2aa, t2ab);
tmp2+= t1xt1xt1aab(i, j, k, a, b, c, nocc, nvir, t1);
tmp -= tmp2;
ijkabc_t11 = T(i, j, k, a, b, c, nocc, nvir);
ijkabc_t12 = T(i, j, k, b, a, c, nocc, nvir);
t3aab[ijkabc_t11] = tmp;
t3aab[ijkabc_t12] = -tmp;
ijkabc_t21 = T(j, i, k, a, b, c, nocc, nvir);
ijkabc_t22 = T(j, i, k, b, a, c, nocc, nvir);
t3aab[ijkabc_t21] = -tmp;
t3aab[ijkabc_t22] = tmp;
}
}
}
}
}
}
}
}
void c3_to_t3_ecT(double *t3aaa, double *t3aab, double *c3aaa, double *c3aab, double *t1, double *t2aa, double *t2ab, int nc_ref, int nvir_ref, int nocc, int nvir, double numzero)
{
int i, j, k, a, b, c;
size_t ijkabc_t11, ijkabc_t21, ijkabc_t31, ijkabc_t41, ijkabc_t51, ijkabc_t61;
size_t ijkabc_t12, ijkabc_t22, ijkabc_t32, ijkabc_t42, ijkabc_t52, ijkabc_t62;
size_t ijkabc_t13, ijkabc_t23, ijkabc_t33, ijkabc_t43, ijkabc_t53, ijkabc_t63;
size_t ijkabc_t14, ijkabc_t24, ijkabc_t34, ijkabc_t44, ijkabc_t54, ijkabc_t64;
size_t ijkabc_t15, ijkabc_t25, ijkabc_t35, ijkabc_t45, ijkabc_t55, ijkabc_t65;
size_t ijkabc_t16, ijkabc_t26, ijkabc_t36, ijkabc_t46, ijkabc_t56, ijkabc_t66;
size_t ijab, kc, ijabkc_c, ijkabc_c;
double tmp, tmp2;
// t3aaa
ijkabc_c = -1;
for (c = 2; c < nvir; c++) {
for (b = 1; b < c; b++) {
for (a = 0; a < b; a++) {
for (k = nocc-1; k > 1; k--) {
for (j = k-1; j > 0; j--) {
for (i = j-1; i > -1; i--) {
ijkabc_c += 1;
// exclude inactive space
if (i <= nc_ref && j <= nc_ref && k <= nc_ref && \
a > nvir_ref && b > nvir_ref && c > nvir_ref) continue;
// //lsh dbg
// printf("c3aaa, %d \n", ijkabc_c);
tmp = c3aaa[ijkabc_c];
// if(fabs(tmp)-fabs(tmp2) > numzero)
if(fabs(tmp) > numzero)
{
tmp2 = t1xt2aaa (i, j, k, a, b, c, nocc, nvir, t1, t2aa);
tmp2+= t1xt1xt1aaa (i, j, k, a, b, c, nocc, nvir, t1);
tmp -= tmp2;
ijkabc_t11 = T(i, j, k, a, b, c, nocc, nvir);
ijkabc_t12 = T(i, j, k, b, c, a, nocc, nvir);
ijkabc_t13 = T(i, j, k, c, a, b, nocc, nvir);
ijkabc_t14 = T(i, j, k, a, c, b, nocc, nvir);
ijkabc_t15 = T(i, j, k, b, a, c, nocc, nvir);
ijkabc_t16 = T(i, j, k, c, b, a, nocc, nvir);
t3aaa[ijkabc_t11] = tmp;
t3aaa[ijkabc_t12] = tmp;
t3aaa[ijkabc_t13] = tmp;
t3aaa[ijkabc_t14] = -tmp;
t3aaa[ijkabc_t15] = -tmp;
t3aaa[ijkabc_t16] = -tmp;
ijkabc_t21 = T(j, k, i, a, b, c, nocc, nvir);
ijkabc_t22 = T(j, k, i, b, c, a, nocc, nvir);
ijkabc_t23 = T(j, k, i, c, a, b, nocc, nvir);
ijkabc_t24 = T(j, k, i, a, c, b, nocc, nvir);
ijkabc_t25 = T(j, k, i, b, a, c, nocc, nvir);
ijkabc_t26 = T(j, k, i, c, b, a, nocc, nvir);
t3aaa[ijkabc_t21] = tmp;
t3aaa[ijkabc_t22] = tmp;
t3aaa[ijkabc_t23] = tmp;
t3aaa[ijkabc_t24] = -tmp;
t3aaa[ijkabc_t25] = -tmp;
t3aaa[ijkabc_t26] = -tmp;
ijkabc_t31 = T(k, i, j, a, b, c, nocc, nvir);
ijkabc_t32 = T(k, i, j, b, c, a, nocc, nvir);
ijkabc_t33 = T(k, i, j, c, a, b, nocc, nvir);
ijkabc_t34 = T(k, i, j, a, c, b, nocc, nvir);
ijkabc_t35 = T(k, i, j, b, a, c, nocc, nvir);
ijkabc_t36 = T(k, i, j, c, b, a, nocc, nvir);
t3aaa[ijkabc_t31] = tmp;
t3aaa[ijkabc_t32] = tmp;
t3aaa[ijkabc_t33] = tmp;
t3aaa[ijkabc_t34] = -tmp;
t3aaa[ijkabc_t35] = -tmp;
t3aaa[ijkabc_t36] = -tmp;
ijkabc_t41 = T(i, k, j, a, b, c, nocc, nvir);
ijkabc_t42 = T(i, k, j, b, c, a, nocc, nvir);
ijkabc_t43 = T(i, k, j, c, a, b, nocc, nvir);
ijkabc_t44 = T(i, k, j, a, c, b, nocc, nvir);
ijkabc_t45 = T(i, k, j, b, a, c, nocc, nvir);
ijkabc_t46 = T(i, k, j, c, b, a, nocc, nvir);
t3aaa[ijkabc_t41] = -tmp;
t3aaa[ijkabc_t42] = -tmp;
t3aaa[ijkabc_t43] = -tmp;
t3aaa[ijkabc_t44] = tmp;
t3aaa[ijkabc_t45] = tmp;
t3aaa[ijkabc_t46] = tmp;
ijkabc_t51 = T(j, i, k, a, b, c, nocc, nvir);
ijkabc_t52 = T(j, i, k, b, c, a, nocc, nvir);
ijkabc_t53 = T(j, i, k, c, a, b, nocc, nvir);
ijkabc_t54 = T(j, i, k, a, c, b, nocc, nvir);
ijkabc_t55 = T(j, i, k, b, a, c, nocc, nvir);
ijkabc_t56 = T(j, i, k, c, b, a, nocc, nvir);
t3aaa[ijkabc_t51] = -tmp;
t3aaa[ijkabc_t52] = -tmp;
t3aaa[ijkabc_t53] = -tmp;
t3aaa[ijkabc_t54] = tmp;
t3aaa[ijkabc_t55] = tmp;
t3aaa[ijkabc_t56] = tmp;
ijkabc_t61 = T(k, j, i, a, b, c, nocc, nvir);
ijkabc_t62 = T(k, j, i, b, c, a, nocc, nvir);
ijkabc_t63 = T(k, j, i, c, a, b, nocc, nvir);
ijkabc_t64 = T(k, j, i, a, c, b, nocc, nvir);
ijkabc_t65 = T(k, j, i, b, a, c, nocc, nvir);
ijkabc_t66 = T(k, j, i, c, b, a, nocc, nvir);
t3aaa[ijkabc_t61] = -tmp;
t3aaa[ijkabc_t62] = -tmp;
t3aaa[ijkabc_t63] = -tmp;
t3aaa[ijkabc_t64] = tmp;
t3aaa[ijkabc_t65] = tmp;
t3aaa[ijkabc_t66] = tmp;
}
}
}
}
}
}
}
ijab = -1;
for (b = 1; b < nvir; b++) {
for (a = 0; a < b; a++) {
for (j = nocc-1; j > 0; j--) {
for (i = j-1; i > -1; i--) {
ijab += 1;
kc =-1;
for (c = 0; c < nvir; c++) {
for (k = nocc-1; k > -1; k--) {
kc += 1;
ijabkc_c = ijab * nocc*nvir + kc;
// exclude inactive space
if (i <= nc_ref && j <= nc_ref && k <= nc_ref && \
a > nvir_ref && b > nvir_ref && c > nvir_ref) continue;
// //lsh dbg
// printf("c3aab, %d \n", ijabkc_c);
tmp = c3aab[ijabkc_c];
// if(fabs(tmp)-fabs(tmp2) > numzero)
if(fabs(tmp) > numzero)
{
tmp2 = t1xt2aab(i, j, k, a, b, c, nocc, nvir, t1, t2aa, t2ab);
tmp2+= t1xt1xt1aab(i, j, k, a, b, c, nocc, nvir, t1);
tmp -= tmp2;
ijkabc_t11 = T(i, j, k, a, b, c, nocc, nvir);
ijkabc_t12 = T(i, j, k, b, a, c, nocc, nvir);
t3aab[ijkabc_t11] = tmp;
t3aab[ijkabc_t12] = -tmp;
ijkabc_t21 = T(j, i, k, a, b, c, nocc, nvir);
ijkabc_t22 = T(j, i, k, b, a, c, nocc, nvir);
t3aab[ijkabc_t21] = -tmp;
t3aab[ijkabc_t22] = tmp;
}
}
}
}
}
}
}
}
void c4_to_t4(double *t4aaab, double *t4aabb, double *c4aaab, double *c4aabb, double *t1, double *t2aa, double *t2ab, double *t3aaa, double *t3aab, int nocc, int nvir, double numzero)
{
int i, j, k, l, a, b, c, d, m_ijab;
int ijkabc, ld, ijkabcld_c;
int ijklabcd_t11, ijklabcd_t21, ijklabcd_t31, ijklabcd_t41, ijklabcd_t51, ijklabcd_t61;
int ijklabcd_t12, ijklabcd_t22, ijklabcd_t32, ijklabcd_t42, ijklabcd_t52, ijklabcd_t62;
int ijklabcd_t13, ijklabcd_t23, ijklabcd_t33, ijklabcd_t43, ijklabcd_t53, ijklabcd_t63;
int ijklabcd_t14, ijklabcd_t24, ijklabcd_t34, ijklabcd_t44, ijklabcd_t54, ijklabcd_t64;
int ijklabcd_t15, ijklabcd_t25, ijklabcd_t35, ijklabcd_t45, ijklabcd_t55, ijklabcd_t65;
int ijklabcd_t16, ijklabcd_t26, ijklabcd_t36, ijklabcd_t46, ijklabcd_t56, ijklabcd_t66;
int ijab, klcd, ijabklcd_c;
double tmp, tmp2;
// t4aaab
ijkabc = -1;
for (c = 2; c < nvir; c++) {
for (b = 1; b < c; b++) {
for (a = 0; a < b; a++) {
for (k = nocc-1; k > 1; k--) {
for (j = k-1; j > 0; j--) {
for (i = j-1; i > -1; i--) {
ijkabc += 1;
ld = -1;
for (d = 0; d < nvir; d++) {
for (l = nocc-1; l > -1; l--) {
ld += 1;
ijkabcld_c = ijkabc * nocc*nvir + ld;
tmp = c4aaab[ijkabcld_c];
// if(fabs(tmp)-fabs(tmp2) > numzero)
if(fabs(tmp) > numzero)
{
tmp2 = t1xt3aaab (i, j, k, l, a, b, c, d, nocc, nvir, t1, t3aaa, t3aab); // may have 1e-5 bug
tmp2+= t2xt2aaab (i, j, k, l, a, b, c, d, nocc, nvir, t2aa, t2ab); // may have 1e-3 bug
tmp2+= t1xt1xt2aaab (i, j, k, l, a, b, c, d, nocc, nvir, t1, t2aa, t2ab); // may have 1e-5 bug
tmp2+= t1xt1xt1xt1aaab (i, j, k, l, a, b, c, d, nocc, nvir, t1); // may have 1e-6 bug
tmp -= tmp2;
ijklabcd_t11 = Q(i, j, k, l, a, b, c, d, nocc, nvir);
ijklabcd_t12 = Q(i, j, k, l, b, c, a, d, nocc, nvir);
ijklabcd_t13 = Q(i, j, k, l, c, a, b, d, nocc, nvir);
ijklabcd_t14 = Q(i, j, k, l, a, c, b, d, nocc, nvir);
ijklabcd_t15 = Q(i, j, k, l, b, a, c, d, nocc, nvir);
ijklabcd_t16 = Q(i, j, k, l, c, b, a, d, nocc, nvir);
t4aaab[ijklabcd_t11] = tmp;
t4aaab[ijklabcd_t12] = tmp;
t4aaab[ijklabcd_t13] = tmp;
t4aaab[ijklabcd_t14] = -tmp;
t4aaab[ijklabcd_t15] = -tmp;
t4aaab[ijklabcd_t16] = -tmp;
ijklabcd_t21 = Q(j, k, i, l, a, b, c, d, nocc, nvir);
ijklabcd_t22 = Q(j, k, i, l, b, c, a, d, nocc, nvir);
ijklabcd_t23 = Q(j, k, i, l, c, a, b, d, nocc, nvir);
ijklabcd_t24 = Q(j, k, i, l, a, c, b, d, nocc, nvir);
ijklabcd_t25 = Q(j, k, i, l, b, a, c, d, nocc, nvir);
ijklabcd_t26 = Q(j, k, i, l, c, b, a, d, nocc, nvir);
t4aaab[ijklabcd_t21] = tmp;
t4aaab[ijklabcd_t22] = tmp;
t4aaab[ijklabcd_t23] = tmp;
t4aaab[ijklabcd_t24] = -tmp;
t4aaab[ijklabcd_t25] = -tmp;
t4aaab[ijklabcd_t26] = -tmp;
ijklabcd_t31 = Q(k, i, j, l, a, b, c, d, nocc, nvir);
ijklabcd_t32 = Q(k, i, j, l, b, c, a, d, nocc, nvir);
ijklabcd_t33 = Q(k, i, j, l, c, a, b, d, nocc, nvir);
ijklabcd_t34 = Q(k, i, j, l, a, c, b, d, nocc, nvir);
ijklabcd_t35 = Q(k, i, j, l, b, a, c, d, nocc, nvir);
ijklabcd_t36 = Q(k, i, j, l, c, b, a, d, nocc, nvir);
t4aaab[ijklabcd_t31] = tmp;
t4aaab[ijklabcd_t32] = tmp;
t4aaab[ijklabcd_t33] = tmp;
t4aaab[ijklabcd_t34] = -tmp;
t4aaab[ijklabcd_t35] = -tmp;
t4aaab[ijklabcd_t36] = -tmp;
ijklabcd_t41 = Q(i, k, j, l, a, b, c, d, nocc, nvir);
ijklabcd_t42 = Q(i, k, j, l, b, c, a, d, nocc, nvir);
ijklabcd_t43 = Q(i, k, j, l, c, a, b, d, nocc, nvir);
ijklabcd_t44 = Q(i, k, j, l, a, c, b, d, nocc, nvir);
ijklabcd_t45 = Q(i, k, j, l, b, a, c, d, nocc, nvir);
ijklabcd_t46 = Q(i, k, j, l, c, b, a, d, nocc, nvir);
t4aaab[ijklabcd_t41] = -tmp;
t4aaab[ijklabcd_t42] = -tmp;
t4aaab[ijklabcd_t43] = -tmp;
t4aaab[ijklabcd_t44] = tmp;
t4aaab[ijklabcd_t45] = tmp;
t4aaab[ijklabcd_t46] = tmp;
ijklabcd_t51 = Q(j, i, k, l, a, b, c, d, nocc, nvir);
ijklabcd_t52 = Q(j, i, k, l, b, c, a, d, nocc, nvir);
ijklabcd_t53 = Q(j, i, k, l, c, a, b, d, nocc, nvir);
ijklabcd_t54 = Q(j, i, k, l, a, c, b, d, nocc, nvir);
ijklabcd_t55 = Q(j, i, k, l, b, a, c, d, nocc, nvir);
ijklabcd_t56 = Q(j, i, k, l, c, b, a, d, nocc, nvir);
t4aaab[ijklabcd_t51] = -tmp;
t4aaab[ijklabcd_t52] = -tmp;
t4aaab[ijklabcd_t53] = -tmp;
t4aaab[ijklabcd_t54] = tmp;
t4aaab[ijklabcd_t55] = tmp;
t4aaab[ijklabcd_t56] = tmp;
ijklabcd_t61 = Q(k, j, i, l, a, b, c, d, nocc, nvir);
ijklabcd_t62 = Q(k, j, i, l, b, c, a, d, nocc, nvir);
ijklabcd_t63 = Q(k, j, i, l, c, a, b, d, nocc, nvir);
ijklabcd_t64 = Q(k, j, i, l, a, c, b, d, nocc, nvir);
ijklabcd_t65 = Q(k, j, i, l, b, a, c, d, nocc, nvir);
ijklabcd_t66 = Q(k, j, i, l, c, b, a, d, nocc, nvir);
t4aaab[ijklabcd_t61] = -tmp;
t4aaab[ijklabcd_t62] = -tmp;
t4aaab[ijklabcd_t63] = -tmp;
t4aaab[ijklabcd_t64] = tmp;
t4aaab[ijklabcd_t65] = tmp;
t4aaab[ijklabcd_t66] = tmp;
}
}
}
}
}
}
}
}
}
// TODO lsh: reduce symmetry of t4, t3
//numzero = 1e-3;
// t4aabb
m_ijab = nocc*(nocc-1)/2 * nvir*(nvir-1)/2;
ijab = -1;
for (b = 1; b < nvir; b++) {
for (a = 0; a < b; a++) {
for (j = nocc-1; j > 0; j--) {
for (i = j-1; i > -1; i--) {
ijab += 1;
klcd =-1;
for (d = 1; d < nvir; d++) {
for (c = 0; c < d; c++) {
for (l = nocc-1; l > 0; l--) {
for (k = l-1; k > -1; k--) {
klcd += 1;
ijabklcd_c = ijab * m_ijab + klcd;
tmp = c4aabb[ijabklcd_c];
// if(fabs(tmp)-fabs(tmp2) > numzero)
if(fabs(tmp) > numzero)
{
tmp2 = 0.0;
tmp2 = t1xt3aabb(i, j, k, l, a, b, c, d, nocc, nvir, t1, t3aab);
tmp2+= t2xt2aabb(i, j, k, l, a, b, c, d, nocc, nvir, t2aa, t2ab);
tmp2+= t1xt1xt2aabb(i, j, k, l, a, b, c, d, nocc, nvir, t1, t2aa, t2ab);
tmp2+= t1xt1xt1xt1aabb(i, j, k, l, a, b, c, d, nocc, nvir, t1); // may have bug
tmp -= tmp2;
//printf("t4 slow %d %d %d %d %d %d %d %d %20.10f \n",i,j,k,l,a,b,c,d,tmp);
ijklabcd_t11 = Q(i, j, k, l, a, b, c, d, nocc, nvir);
ijklabcd_t12 = Q(j, i, k, l, b, a, c, d, nocc, nvir);
ijklabcd_t13 = Q(i, j, k, l, b, a, c, d, nocc, nvir);
ijklabcd_t14 = Q(j, i, k, l, a, b, c, d, nocc, nvir);
t4aabb[ijklabcd_t11] = tmp;
t4aabb[ijklabcd_t12] = tmp;
t4aabb[ijklabcd_t13] = -tmp;
t4aabb[ijklabcd_t14] = -tmp;
ijklabcd_t21 = Q(i, j, l, k, a, b, d, c, nocc, nvir);
ijklabcd_t22 = Q(j, i, l, k, b, a, d, c, nocc, nvir);
ijklabcd_t23 = Q(i, j, l, k, b, a, d, c, nocc, nvir);
ijklabcd_t24 = Q(j, i, l, k, a, b, d, c, nocc, nvir);
t4aabb[ijklabcd_t21] = tmp;
t4aabb[ijklabcd_t22] = tmp;
t4aabb[ijklabcd_t23] = -tmp;
t4aabb[ijklabcd_t24] = -tmp;
ijklabcd_t31 = Q(i, j, k, l, a, b, d, c, nocc, nvir);
ijklabcd_t32 = Q(j, i, k, l, b, a, d, c, nocc, nvir);
ijklabcd_t33 = Q(i, j, k, l, b, a, d, c, nocc, nvir);
ijklabcd_t34 = Q(j, i, k, l, a, b, d, c, nocc, nvir);
t4aabb[ijklabcd_t31] = -tmp;
t4aabb[ijklabcd_t32] = -tmp;
t4aabb[ijklabcd_t33] = tmp;
t4aabb[ijklabcd_t34] = tmp;
ijklabcd_t41 = Q(i, j, l, k, a, b, c, d, nocc, nvir);
ijklabcd_t42 = Q(j, i, l, k, b, a, c, d, nocc, nvir);
ijklabcd_t43 = Q(i, j, l, k, b, a, c, d, nocc, nvir);
ijklabcd_t44 = Q(j, i, l, k, a, b, c, d, nocc, nvir);
t4aabb[ijklabcd_t41] = -tmp;
t4aabb[ijklabcd_t42] = -tmp;
t4aabb[ijklabcd_t43] = tmp;
t4aabb[ijklabcd_t44] = tmp;
}
}
}
}
}
}
}
}
}
}
void c3_to_t3_thresh(double *t3aaa, double *t3aab, double *c3aaa, double *c3aab, double *t1, double *t2aa, double *t2ab, int nocc, int nvir, double numzero)
{
int i, j, k, a, b, c;
size_t ijkabc_t11, ijkabc_t21, ijkabc_t31, ijkabc_t41, ijkabc_t51, ijkabc_t61;
size_t ijkabc_t12, ijkabc_t22, ijkabc_t32, ijkabc_t42, ijkabc_t52, ijkabc_t62;
size_t ijkabc_t13, ijkabc_t23, ijkabc_t33, ijkabc_t43, ijkabc_t53, ijkabc_t63;
size_t ijkabc_t14, ijkabc_t24, ijkabc_t34, ijkabc_t44, ijkabc_t54, ijkabc_t64;
size_t ijkabc_t15, ijkabc_t25, ijkabc_t35, ijkabc_t45, ijkabc_t55, ijkabc_t65;
size_t ijkabc_t16, ijkabc_t26, ijkabc_t36, ijkabc_t46, ijkabc_t56, ijkabc_t66;
size_t ijab, kc, ijabkc_c, ijkabc_c;
double tmp, tmp2;
// t3aaa
ijkabc_c = -1;
for (c = 2; c < nvir; c++) {
for (b = 1; b < c; b++) {
for (a = 0; a < b; a++) {
for (k = nocc-1; k > 1; k--) {
for (j = k-1; j > 0; j--) {
for (i = j-1; i > -1; i--) {
ijkabc_c += 1;
// //lsh dbg
// printf("c3aaa, %d \n", ijkabc_c);
tmp = c3aaa[ijkabc_c];
tmp2 = t1xt2aaa (i, j, k, a, b, c, nocc, nvir, t1, t2aa);
tmp2+= t1xt1xt1aaa (i, j, k, a, b, c, nocc, nvir, t1);
//if(fabs((tmp-tmp2)/tmp) < numzero)
if(fabs(tmp)-fabs(tmp2) > -numzero)
{
tmp -= tmp2;
ijkabc_t11 = T(i, j, k, a, b, c, nocc, nvir);
ijkabc_t12 = T(i, j, k, b, c, a, nocc, nvir);
ijkabc_t13 = T(i, j, k, c, a, b, nocc, nvir);
ijkabc_t14 = T(i, j, k, a, c, b, nocc, nvir);
ijkabc_t15 = T(i, j, k, b, a, c, nocc, nvir);
ijkabc_t16 = T(i, j, k, c, b, a, nocc, nvir);
t3aaa[ijkabc_t11] = tmp;
t3aaa[ijkabc_t12] = tmp;
t3aaa[ijkabc_t13] = tmp;
t3aaa[ijkabc_t14] = -tmp;
t3aaa[ijkabc_t15] = -tmp;
t3aaa[ijkabc_t16] = -tmp;
ijkabc_t21 = T(j, k, i, a, b, c, nocc, nvir);
ijkabc_t22 = T(j, k, i, b, c, a, nocc, nvir);
ijkabc_t23 = T(j, k, i, c, a, b, nocc, nvir);
ijkabc_t24 = T(j, k, i, a, c, b, nocc, nvir);
ijkabc_t25 = T(j, k, i, b, a, c, nocc, nvir);
ijkabc_t26 = T(j, k, i, c, b, a, nocc, nvir);
t3aaa[ijkabc_t21] = tmp;
t3aaa[ijkabc_t22] = tmp;
t3aaa[ijkabc_t23] = tmp;
t3aaa[ijkabc_t24] = -tmp;
t3aaa[ijkabc_t25] = -tmp;
t3aaa[ijkabc_t26] = -tmp;
ijkabc_t31 = T(k, i, j, a, b, c, nocc, nvir);
ijkabc_t32 = T(k, i, j, b, c, a, nocc, nvir);
ijkabc_t33 = T(k, i, j, c, a, b, nocc, nvir);
ijkabc_t34 = T(k, i, j, a, c, b, nocc, nvir);
ijkabc_t35 = T(k, i, j, b, a, c, nocc, nvir);
ijkabc_t36 = T(k, i, j, c, b, a, nocc, nvir);
t3aaa[ijkabc_t31] = tmp;
t3aaa[ijkabc_t32] = tmp;
t3aaa[ijkabc_t33] = tmp;
t3aaa[ijkabc_t34] = -tmp;
t3aaa[ijkabc_t35] = -tmp;
t3aaa[ijkabc_t36] = -tmp;
ijkabc_t41 = T(i, k, j, a, b, c, nocc, nvir);
ijkabc_t42 = T(i, k, j, b, c, a, nocc, nvir);
ijkabc_t43 = T(i, k, j, c, a, b, nocc, nvir);
ijkabc_t44 = T(i, k, j, a, c, b, nocc, nvir);
ijkabc_t45 = T(i, k, j, b, a, c, nocc, nvir);
ijkabc_t46 = T(i, k, j, c, b, a, nocc, nvir);
t3aaa[ijkabc_t41] = -tmp;
t3aaa[ijkabc_t42] = -tmp;
t3aaa[ijkabc_t43] = -tmp;
t3aaa[ijkabc_t44] = tmp;
t3aaa[ijkabc_t45] = tmp;
t3aaa[ijkabc_t46] = tmp;
ijkabc_t51 = T(j, i, k, a, b, c, nocc, nvir);
ijkabc_t52 = T(j, i, k, b, c, a, nocc, nvir);
ijkabc_t53 = T(j, i, k, c, a, b, nocc, nvir);
ijkabc_t54 = T(j, i, k, a, c, b, nocc, nvir);
ijkabc_t55 = T(j, i, k, b, a, c, nocc, nvir);
ijkabc_t56 = T(j, i, k, c, b, a, nocc, nvir);
t3aaa[ijkabc_t51] = -tmp;
t3aaa[ijkabc_t52] = -tmp;
t3aaa[ijkabc_t53] = -tmp;
t3aaa[ijkabc_t54] = tmp;
t3aaa[ijkabc_t55] = tmp;
t3aaa[ijkabc_t56] = tmp;
ijkabc_t61 = T(k, j, i, a, b, c, nocc, nvir);
ijkabc_t62 = T(k, j, i, b, c, a, nocc, nvir);
ijkabc_t63 = T(k, j, i, c, a, b, nocc, nvir);
ijkabc_t64 = T(k, j, i, a, c, b, nocc, nvir);
ijkabc_t65 = T(k, j, i, b, a, c, nocc, nvir);
ijkabc_t66 = T(k, j, i, c, b, a, nocc, nvir);
t3aaa[ijkabc_t61] = -tmp;
t3aaa[ijkabc_t62] = -tmp;
t3aaa[ijkabc_t63] = -tmp;
t3aaa[ijkabc_t64] = tmp;
t3aaa[ijkabc_t65] = tmp;
t3aaa[ijkabc_t66] = tmp;
}
}
}
}
}
}
}
ijab = -1;
for (b = 1; b < nvir; b++) {
for (a = 0; a < b; a++) {
for (j = nocc-1; j > 0; j--) {
for (i = j-1; i > -1; i--) {
ijab += 1;
kc =-1;
for (c = 0; c < nvir; c++) {
for (k = nocc-1; k > -1; k--) {
kc += 1;
ijabkc_c = ijab * nocc*nvir + kc;
// //lsh dbg
// printf("c3aab, %d \n", ijabkc_c);
tmp = c3aab[ijabkc_c];
tmp2 = t1xt2aab(i, j, k, a, b, c, nocc, nvir, t1, t2aa, t2ab);
tmp2+= t1xt1xt1aab(i, j, k, a, b, c, nocc, nvir, t1);
//if(fabs((tmp-tmp2)/tmp) < numzero)
//if(fabs(fabs(tmp)-fabs(tmp2)) < numzero)
if(fabs(tmp)-fabs(tmp2) > -numzero)
{
tmp -= tmp2;
ijkabc_t11 = T(i, j, k, a, b, c, nocc, nvir);
ijkabc_t12 = T(i, j, k, b, a, c, nocc, nvir);
t3aab[ijkabc_t11] = tmp;
t3aab[ijkabc_t12] = -tmp;
ijkabc_t21 = T(j, i, k, a, b, c, nocc, nvir);
ijkabc_t22 = T(j, i, k, b, a, c, nocc, nvir);
t3aab[ijkabc_t21] = -tmp;
t3aab[ijkabc_t22] = tmp;
}
}
}
}
}
}
}
}
void c4_to_t4_thresh(double *t4aaab, double *t4aabb, double *c4aaab, double *c4aabb, double *t1, double *t2aa, double *t2ab, double *t3aaa, double *t3aab, int nocc, int nvir, double numzero)
{
int i, j, k, l, a, b, c, d, m_ijab;
int ijkabc, ld, ijkabcld_c;
int ijklabcd_t11, ijklabcd_t21, ijklabcd_t31, ijklabcd_t41, ijklabcd_t51, ijklabcd_t61;
int ijklabcd_t12, ijklabcd_t22, ijklabcd_t32, ijklabcd_t42, ijklabcd_t52, ijklabcd_t62;
int ijklabcd_t13, ijklabcd_t23, ijklabcd_t33, ijklabcd_t43, ijklabcd_t53, ijklabcd_t63;
int ijklabcd_t14, ijklabcd_t24, ijklabcd_t34, ijklabcd_t44, ijklabcd_t54, ijklabcd_t64;
int ijklabcd_t15, ijklabcd_t25, ijklabcd_t35, ijklabcd_t45, ijklabcd_t55, ijklabcd_t65;
int ijklabcd_t16, ijklabcd_t26, ijklabcd_t36, ijklabcd_t46, ijklabcd_t56, ijklabcd_t66;
int ijab, klcd, ijabklcd_c;
double tmp, tmp2;
// t4aaab
ijkabc = -1;
for (c = 2; c < nvir; c++) {
for (b = 1; b < c; b++) {
for (a = 0; a < b; a++) {
for (k = nocc-1; k > 1; k--) {
for (j = k-1; j > 0; j--) {
for (i = j-1; i > -1; i--) {
ijkabc += 1;
ld = -1;
for (d = 0; d < nvir; d++) {
for (l = nocc-1; l > -1; l--) {
ld += 1;
ijkabcld_c = ijkabc * nocc*nvir + ld;
tmp = c4aaab[ijkabcld_c];
tmp2 = t1xt3aaab (i, j, k, l, a, b, c, d, nocc, nvir, t1, t3aaa, t3aab); // may have 1e-5 bug
tmp2+= t2xt2aaab (i, j, k, l, a, b, c, d, nocc, nvir, t2aa, t2ab); // may have 1e-3 bug
tmp2+= t1xt1xt2aaab (i, j, k, l, a, b, c, d, nocc, nvir, t1, t2aa, t2ab); // may have 1e-5 bug
tmp2+= t1xt1xt1xt1aaab (i, j, k, l, a, b, c, d, nocc, nvir, t1); // may have 1e-6 bug
//if(fabs((tmp-tmp2)/tmp) < numzero)
//if(fabs(fabs(tmp)-fabs(tmp2)) < numzero)
if(fabs(tmp)-fabs(tmp2) > -numzero)
{
tmp -= tmp2;
ijklabcd_t11 = Q(i, j, k, l, a, b, c, d, nocc, nvir);
ijklabcd_t12 = Q(i, j, k, l, b, c, a, d, nocc, nvir);
ijklabcd_t13 = Q(i, j, k, l, c, a, b, d, nocc, nvir);
ijklabcd_t14 = Q(i, j, k, l, a, c, b, d, nocc, nvir);
ijklabcd_t15 = Q(i, j, k, l, b, a, c, d, nocc, nvir);
ijklabcd_t16 = Q(i, j, k, l, c, b, a, d, nocc, nvir);
t4aaab[ijklabcd_t11] = tmp;
t4aaab[ijklabcd_t12] = tmp;
t4aaab[ijklabcd_t13] = tmp;
t4aaab[ijklabcd_t14] = -tmp;
t4aaab[ijklabcd_t15] = -tmp;
t4aaab[ijklabcd_t16] = -tmp;
ijklabcd_t21 = Q(j, k, i, l, a, b, c, d, nocc, nvir);
ijklabcd_t22 = Q(j, k, i, l, b, c, a, d, nocc, nvir);
ijklabcd_t23 = Q(j, k, i, l, c, a, b, d, nocc, nvir);
ijklabcd_t24 = Q(j, k, i, l, a, c, b, d, nocc, nvir);
ijklabcd_t25 = Q(j, k, i, l, b, a, c, d, nocc, nvir);
ijklabcd_t26 = Q(j, k, i, l, c, b, a, d, nocc, nvir);
t4aaab[ijklabcd_t21] = tmp;
t4aaab[ijklabcd_t22] = tmp;
t4aaab[ijklabcd_t23] = tmp;
t4aaab[ijklabcd_t24] = -tmp;
t4aaab[ijklabcd_t25] = -tmp;
t4aaab[ijklabcd_t26] = -tmp;
ijklabcd_t31 = Q(k, i, j, l, a, b, c, d, nocc, nvir);
ijklabcd_t32 = Q(k, i, j, l, b, c, a, d, nocc, nvir);
ijklabcd_t33 = Q(k, i, j, l, c, a, b, d, nocc, nvir);
ijklabcd_t34 = Q(k, i, j, l, a, c, b, d, nocc, nvir);
ijklabcd_t35 = Q(k, i, j, l, b, a, c, d, nocc, nvir);
ijklabcd_t36 = Q(k, i, j, l, c, b, a, d, nocc, nvir);
t4aaab[ijklabcd_t31] = tmp;
t4aaab[ijklabcd_t32] = tmp;
t4aaab[ijklabcd_t33] = tmp;
t4aaab[ijklabcd_t34] = -tmp;
t4aaab[ijklabcd_t35] = -tmp;
t4aaab[ijklabcd_t36] = -tmp;
ijklabcd_t41 = Q(i, k, j, l, a, b, c, d, nocc, nvir);
ijklabcd_t42 = Q(i, k, j, l, b, c, a, d, nocc, nvir);
ijklabcd_t43 = Q(i, k, j, l, c, a, b, d, nocc, nvir);
ijklabcd_t44 = Q(i, k, j, l, a, c, b, d, nocc, nvir);
ijklabcd_t45 = Q(i, k, j, l, b, a, c, d, nocc, nvir);
ijklabcd_t46 = Q(i, k, j, l, c, b, a, d, nocc, nvir);
t4aaab[ijklabcd_t41] = -tmp;
t4aaab[ijklabcd_t42] = -tmp;
t4aaab[ijklabcd_t43] = -tmp;
t4aaab[ijklabcd_t44] = tmp;
t4aaab[ijklabcd_t45] = tmp;
t4aaab[ijklabcd_t46] = tmp;
ijklabcd_t51 = Q(j, i, k, l, a, b, c, d, nocc, nvir);
ijklabcd_t52 = Q(j, i, k, l, b, c, a, d, nocc, nvir);
ijklabcd_t53 = Q(j, i, k, l, c, a, b, d, nocc, nvir);
ijklabcd_t54 = Q(j, i, k, l, a, c, b, d, nocc, nvir);
ijklabcd_t55 = Q(j, i, k, l, b, a, c, d, nocc, nvir);
ijklabcd_t56 = Q(j, i, k, l, c, b, a, d, nocc, nvir);
t4aaab[ijklabcd_t51] = -tmp;
t4aaab[ijklabcd_t52] = -tmp;
t4aaab[ijklabcd_t53] = -tmp;
t4aaab[ijklabcd_t54] = tmp;
t4aaab[ijklabcd_t55] = tmp;
t4aaab[ijklabcd_t56] = tmp;
ijklabcd_t61 = Q(k, j, i, l, a, b, c, d, nocc, nvir);
ijklabcd_t62 = Q(k, j, i, l, b, c, a, d, nocc, nvir);
ijklabcd_t63 = Q(k, j, i, l, c, a, b, d, nocc, nvir);
ijklabcd_t64 = Q(k, j, i, l, a, c, b, d, nocc, nvir);
ijklabcd_t65 = Q(k, j, i, l, b, a, c, d, nocc, nvir);
ijklabcd_t66 = Q(k, j, i, l, c, b, a, d, nocc, nvir);
t4aaab[ijklabcd_t61] = -tmp;
t4aaab[ijklabcd_t62] = -tmp;
t4aaab[ijklabcd_t63] = -tmp;
t4aaab[ijklabcd_t64] = tmp;
t4aaab[ijklabcd_t65] = tmp;
t4aaab[ijklabcd_t66] = tmp;
}
}
}
}
}
}
}
}
}
// TODO lsh: reduce symmetry of t4, t3
// t4aabb
m_ijab = nocc*(nocc-1)/2 * nvir*(nvir-1)/2;
ijab = -1;
for (b = 1; b < nvir; b++) {
for (a = 0; a < b; a++) {
for (j = nocc-1; j > 0; j--) {
for (i = j-1; i > -1; i--) {
ijab += 1;
klcd =-1;
for (d = 1; d < nvir; d++) {
for (c = 0; c < d; c++) {
for (l = nocc-1; l > 0; l--) {
for (k = l-1; k > -1; k--) {
klcd += 1;
ijabklcd_c = ijab * m_ijab + klcd;
tmp = c4aabb[ijabklcd_c];
tmp2 = t1xt3aabb(i, j, k, l, a, b, c, d, nocc, nvir, t1, t3aab);
tmp2+= t2xt2aabb(i, j, k, l, a, b, c, d, nocc, nvir, t2aa, t2ab);
tmp2+= t1xt1xt2aabb(i, j, k, l, a, b, c, d, nocc, nvir, t1, t2aa, t2ab);
tmp2+= t1xt1xt1xt1aabb(i, j, k, l, a, b, c, d, nocc, nvir, t1); // may have bug
//if(fabs((tmp-tmp2)/tmp) < numzero)
//if(fabs(fabs(tmp)-fabs(tmp2)) < numzero)
if(fabs(tmp)-fabs(tmp2) > -numzero)
{
tmp -= tmp2;
ijklabcd_t11 = Q(i, j, k, l, a, b, c, d, nocc, nvir);
ijklabcd_t12 = Q(j, i, k, l, b, a, c, d, nocc, nvir);
ijklabcd_t13 = Q(i, j, k, l, b, a, c, d, nocc, nvir);
ijklabcd_t14 = Q(j, i, k, l, a, b, c, d, nocc, nvir);
t4aabb[ijklabcd_t11] = tmp;
t4aabb[ijklabcd_t12] = tmp;
t4aabb[ijklabcd_t13] = -tmp;
t4aabb[ijklabcd_t14] = -tmp;
ijklabcd_t21 = Q(i, j, l, k, a, b, d, c, nocc, nvir);
ijklabcd_t22 = Q(j, i, l, k, b, a, d, c, nocc, nvir);
ijklabcd_t23 = Q(i, j, l, k, b, a, d, c, nocc, nvir);
ijklabcd_t24 = Q(j, i, l, k, a, b, d, c, nocc, nvir);
t4aabb[ijklabcd_t21] = tmp;
t4aabb[ijklabcd_t22] = tmp;
t4aabb[ijklabcd_t23] = -tmp;
t4aabb[ijklabcd_t24] = -tmp;
ijklabcd_t31 = Q(i, j, k, l, a, b, d, c, nocc, nvir);
ijklabcd_t32 = Q(j, i, k, l, b, a, d, c, nocc, nvir);
ijklabcd_t33 = Q(i, j, k, l, b, a, d, c, nocc, nvir);
ijklabcd_t34 = Q(j, i, k, l, a, b, d, c, nocc, nvir);
t4aabb[ijklabcd_t31] = -tmp;
t4aabb[ijklabcd_t32] = -tmp;
t4aabb[ijklabcd_t33] = tmp;
t4aabb[ijklabcd_t34] = tmp;
ijklabcd_t41 = Q(i, j, l, k, a, b, c, d, nocc, nvir);
ijklabcd_t42 = Q(j, i, l, k, b, a, c, d, nocc, nvir);
ijklabcd_t43 = Q(i, j, l, k, b, a, c, d, nocc, nvir);
ijklabcd_t44 = Q(j, i, l, k, a, b, c, d, nocc, nvir);
t4aabb[ijklabcd_t41] = -tmp;
t4aabb[ijklabcd_t42] = -tmp;
t4aabb[ijklabcd_t43] = tmp;
t4aabb[ijklabcd_t44] = tmp;
}
}
}
}
}
}
}
}
}
}
int alpha_count(uint8_t det[], const uint8_t qi)
{
int ncount = 0;
int i;
for (i=0; i<qi; i++)
if (det[i] == 3 || det[i] == 1)
ncount += 1;
return ncount;
}
double parity_ab_str(uint8_t det[], int nmo)
{
// | 1_alpha 1_beta ... noc_alpha noc_beta >
// = (-1)**n * | 1_beta ... noc_beta > | 1_alpha ... noc_alpha >
// = (-1)**n * | noc_beta ... 1_beta > | noc_alpha ... 1_alpha >
int n=0, i;
for (i=0; i<nmo; i++)
if (det[i]==3 || det[i]==2)
n += alpha_count(det, i);
return pow((double)-1, n);
//return 1;
}
double parity_ci_to_cc(int sum_ijkl, int n_excite, int nocc)
{
// For example of singly-excited configuration, parity is
// | a noc noc-1 ... i+1 i-1 ... 2 1 > = parity_ci_to_cc * | 1 2 ... i-1 a i+1 ... noc-1 noc >
return pow((double)-1, n_excite * nocc - sum_ijkl - n_excite*(n_excite+1)/2);
}
typedef struct _darray
{
size_t size;
size_t actual_size;
double *content;
} darray;
void darray_create(darray *d)
{
d->actual_size = d->size = 0;
d->content = NULL;
}
void darray_append(darray *d, double v)
{
if (d->size+1 > d->actual_size)
{
size_t new_size;
if (!d->actual_size)
{
new_size = 1;
}
else
{
new_size = d->actual_size * 2;
}
double *temp = realloc(d->content, sizeof(double) * new_size);
if (!temp)
{
fprintf(stderr, "Failed to extend array (new_size=%zu)\n", new_size);
exit(EXIT_FAILURE);
}
d->actual_size = new_size;
d->content = temp;
}
d->content[d->size] = v;
d->size++;
}
const double* darray_data(darray *d)
{
return d->content;
}
void darray_destroy(darray *d)
{
free(d->content);
d->content = NULL;
d->size = d->actual_size = 0;
}
size_t darray_size(darray *d)
{
return d->size;
}
typedef struct _iarray
{
size_t size;
size_t actual_size;
int *content;
} iarray;
void iarray_create(iarray *d)
{
d->actual_size = d->size = 0;
d->content = NULL;
}
void iarray_append(iarray *d, int v)
{
if (d->size+1 > d->actual_size)
{
size_t new_size;
if (!d->actual_size)
{
new_size = 1;
}
else
{
new_size = d->actual_size * 2;
}
int *temp = realloc(d->content, sizeof(int) * new_size);
if (!temp)
{
fprintf(stderr, "Failed to extend array (new_size=%zu)\n", new_size);
exit(EXIT_FAILURE);
}
d->actual_size = new_size;
d->content = temp;
}
d->content[d->size] = v;
d->size++;
}
const int* iarray_data(iarray *d)
{
return d->content;
}
void iarray_destroy(iarray *d)
{
free(d->content);
d->content = NULL;
d->size = d->actual_size = 0;
}
size_t iarray_size(iarray *d)
{
return d->size;
}
//void t2t4c(double *t2t4c, int *p_aabb, int *q_aabb, int *r_aabb, int *s_aabb, int *t_aabb, int *u_aabb, int *v_aabb, int *w_aabb, double *c4_aabb, int *p_aaab, int *q_aaab, int *r_aaab, int *s_aaab, int *t_aaab, int *u_aaab, int *v_aaab, int *w_aaab, double *c4_aaab, double *t1, double *t2aa, double *t2ab, double *t3aaa, double *t3aab, double *e2ovov, double *tmp, int n_aabb, int n_aaab, int nocc, int nvir, double numzero, double c0)
void t2t4c_shci(double *t2t4c, double *t1, double *t2aa, double *t2ab, double *t3aaa, double *t3aab, double *e2ovov, const int nc, const int nocc, const int nvir, const double numzero, const double c0, double norm)
{
//double numzero = 1e-7;
int p, q, r, s, t, u, v, w, itmp, it, jt, at, bt;
double t4, parity, scale;
uint8_t Refdet[nocc+nvir], det_str[nocc+nvir];
for (itmp = 0; itmp < nocc+nvir; itmp++){
if (itmp<nocc) Refdet[itmp] = 3;
else Refdet[itmp] = 0;
}
double norm0SDT = norm;
double ****tmp;
tmp = (double ****)malloc(sizeof(double ***) * nocc);
for (it=0; it< nocc; it++){
tmp[it] = (double ***)malloc(sizeof(double **) * nocc);
for (jt=0; jt< nocc; jt++){
tmp[it][jt] = (double **)malloc(sizeof(double *) * nvir);
for (at=0; at< nvir; at++){
tmp[it][jt][at] = (double *)malloc(sizeof(double) * nvir);
}
}
}
// for (p=0; p<nocc; p++) {
// for (q=0; q<nocc; q++) {
// for (r=0; r<nvir; r++) {
// for (s=0; s<nvir; s++) {
// printf("%d %d %d %d %20.10lf\n",p,q,r,s,e2ovov[De(p,r,q,s,nocc,nvir)]);
// }
// }
// }
// }
FILE *fp;
char typ[4], line[255];
fp = fopen("CIcoeff_shci.out", "r");
fscanf(fp, "%s\n", line);
if (fp) {
while ( !feof(fp) ){
fscanf(fp, "%c%c%c%c,%s\n", &(typ[0]), &(typ[1]), &(typ[2]), &(typ[3]), line);
fscanf(fp, "%lf\n", &t4);
//printf ("typ=%c%c%c%c line=%s\n",typ[0],typ[1],typ[2],typ[3], line);
if (strncmp(typ, "aabb", 4) == 0 && fabs(t4) > numzero){
norm += t4*t4;
sscanf(line,"%d,%d,%d,%d,%d,%d,%d,%d",&p,&q,&t,&u,&r,&s,&v,&w);
p += nc;
q += nc;
r += nc;
s += nc;
t += - nocc + nc;
u += - nocc + nc;
v += - nocc + nc;
w += - nocc + nc;
// if(!(p == 2 && q == 3 && t == 0 && u == 1 && \
// r == 2 && s == 3 && v == 0 && w == 1)) continue;
for (itmp = 0; itmp < nocc+nvir; itmp++)
det_str[itmp] = Refdet[itmp];
det_str[p] = 2;
det_str[q] = 2;
det_str[t+nocc] = 1;
det_str[u+nocc] = 1;
if (p != r && q != r) det_str[r] = 1;
else det_str[r] = 0;
if (p != s && q != s) det_str[s] = 1;
else det_str[s] = 0;
if (t != v && u != v) det_str[v+nocc] = 2;
else det_str[v+nocc] = 3;
if (t != w && u != w) det_str[w+nocc] = 2;
else det_str[w+nocc] = 3;
//parity = parity_ab_str(det_str, nocc+nvir);
parity = parity_ci_to_cc(p+q, 2, nocc);
parity *= parity_ci_to_cc(r+s, 2, nocc);
// interm norm of c4
t4 = parity * t4 / c0;
// lsh test
// printf("c4 mem %20.10f \n",t4);
// extract t4
t4-= t1xt3aabb(p, q, r, s, t, u, v, w, nocc, nvir, t1, t3aab);
t4-= t2xt2aabb(p, q, r, s, t, u, v, w, nocc, nvir, t2aa, t2ab);
t4-= t1xt1xt2aabb(p, q, r, s, t, u, v, w, nocc, nvir, t1, t2aa, t2ab);
t4-= t1xt1xt1xt1aabb(p, q, r, s, t, u, v, w, nocc, nvir, t1); // may have bug
// lsh test
// printf("t4 mem %20.10f \n",t4);
for (it=0; it< nocc; it++){
for (jt=0; jt< nocc; jt++){
for (at=0; at< nvir; at++){
for (bt=0; bt< nvir; bt++){
tmp[it][jt][at][bt] = 0.0;
}
}
}
}
// printf("eris_ovov mem %20.10f \n",e2ovov[De(p,t,r,v,nocc,nvir)]);
if (p<r && t<v) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4;
if (q<r && t<v) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4;
if (p<s && t<v) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4;
if (q<s && t<v) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4;
if (p<r && u<v) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4;
if (q<r && u<v) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4;
if (p<s && u<v) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4;
if (q<s && u<v) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4;
if (p<r && t<w) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4;
if (q<r && t<w) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4;
if (p<s && t<w) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4;
if (q<s && t<w) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4;
if (p<r && u<w) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4;
if (q<r && u<w) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4;
if (p<s && u<w) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4;
if (q<s && u<w) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4;
if (p<r && v<t) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4;
if (q<r && v<t) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4;
if (p<s && v<t) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4;
if (q<s && v<t) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4;
if (p<r && v<u) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4;
if (q<r && v<u) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4;
if (p<s && v<u) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4;
if (q<s && v<u) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4;
if (p<r && w<t) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4;
if (q<r && w<t) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4;
if (p<s && w<t) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4;
if (q<s && w<t) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4;
if (p<r && w<u) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4;
if (q<r && w<u) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4;
if (p<s && w<u) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4;
if (q<s && w<u) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4;
scale = 0.5;
if (p==r && t<v) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
if (q==r && t<v) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
if (p==s && t<v) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
if (q==s && t<v) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
if (p==r && u<v) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
if (q==r && u<v) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
if (p==s && u<v) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
if (q==s && u<v) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
if (p==r && t<w) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
if (q==r && t<w) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
if (p==s && t<w) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
if (q==s && t<w) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
if (p==r && u<w) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
if (q==r && u<w) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
if (p==s && u<w) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
if (q==s && u<w) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
if (p==r && v<t) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
if (q==r && v<t) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
if (p==s && v<t) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
if (q==s && v<t) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
if (p==r && v<u) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
if (q==r && v<u) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
if (p==s && v<u) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
if (q==s && v<u) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
if (p==r && w<t) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
if (q==r && w<t) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
if (p==s && w<t) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
if (q==s && w<t) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
if (p==r && w<u) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
if (q==r && w<u) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
if (p==s && w<u) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
if (q==s && w<u) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
scale = 0.5;
if (p<r && t==v) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
if (q<r && t==v) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
if (p<s && t==v) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
if (q<s && t==v) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
if (p<r && u==v) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
if (q<r && u==v) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
if (p<s && u==v) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
if (q<s && u==v) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
if (p<r && t==w) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
if (q<r && t==w) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
if (p<s && t==w) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
if (q<s && t==w) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
if (p<r && u==w) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
if (q<r && u==w) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
if (p<s && u==w) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
if (q<s && u==w) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
if (p<r && v==t) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
if (q<r && v==t) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
if (p<s && v==t) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
if (q<s && v==t) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
if (p<r && v==u) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
if (q<r && v==u) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
if (p<s && v==u) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
if (q<s && v==u) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
if (p<r && w==t) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
if (q<r && w==t) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
if (p<s && w==t) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
if (q<s && w==t) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
if (p<r && w==u) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
if (q<r && w==u) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
if (p<s && w==u) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
if (q<s && w==u) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
scale = 0.25;
if (p==r && t==v) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
if (q==r && t==v) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
if (p==s && t==v) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
if (q==s && t==v) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
if (p==r && u==v) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
if (q==r && u==v) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
if (p==s && u==v) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
if (q==s && u==v) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
if (p==r && t==w) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
if (q==r && t==w) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
if (p==s && t==w) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
if (q==s && t==w) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
if (p==r && u==w) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
if (q==r && u==w) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
if (p==s && u==w) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
if (q==s && u==w) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
if (p==r && v==t) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
if (q==r && v==t) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
if (p==s && v==t) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
if (q==s && v==t) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
if (p==r && v==u) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
if (q==r && v==u) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
if (p==s && v==u) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
if (q==s && v==u) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
if (p==r && w==t) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
if (q==r && w==t) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
if (p==s && w==t) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
if (q==s && w==t) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
if (p==r && w==u) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
if (q==r && w==u) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
if (p==s && w==u) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
if (q==s && w==u) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
for (it = 0; it < nocc; it++)
for (jt = 0; jt < nocc; jt++)
for (at = 0; at < nvir; at++)
for (bt = 0; bt < nvir; bt++)
t2t4c[D(it,jt,at,bt,nocc,nvir)] += tmp[it][jt][at][bt] + tmp[jt][it][bt][at];
}
else if (strncmp(typ, "aaab", 4) == 0 && fabs(t4) > numzero){
norm += 2.0*t4*t4;
//lsh test
//printf ("typ=%c%c%c%c line=%s c4=%lf\n",typ[0],typ[1],typ[2],typ[3], line, t4);
sscanf(line,"%d,%d,%d,%d,%d,%d,%d,%d",&p,&q,&r,&t,&u,&v,&s,&w);
p += nc;
q += nc;
r += nc;
s += nc;
t += - nocc + nc;
u += - nocc + nc;
v += - nocc + nc;
w += - nocc + nc;
//printf ("p=%d, q=%d, r=%d, t=%d, u=%d, v=%d, s=%d, w=%d",p,q,r,t,u,v,s,w);
for (itmp = 0; itmp < nocc+nvir; itmp++)
det_str[itmp] = Refdet[itmp];
det_str[p] = 2;
det_str[q] = 2;
det_str[r] = 2;
det_str[t+nocc] = 1;
det_str[u+nocc] = 1;
det_str[v+nocc] = 1;
if (p != s && q != s && r != s) det_str[s] = 1;
else det_str[s] = 0;
if (t != w && u != w && v != w) det_str[w+nocc] = 2;
else det_str[w+nocc] = 3;
//parity = parity_ab_str(det_str, nocc+nvir);
parity = parity_ci_to_cc(p+q+r, 3, nocc);
parity *= parity_ci_to_cc(s, 1, nocc);
// interm norm of c4
t4 = parity * t4 / c0;
// extract t4
t4-= t1xt3aaab (p, q, r, s, t, u, v, w, nocc, nvir, t1, t3aaa, t3aab); // may have 1e-5 bug
t4-= t2xt2aaab (p, q, r, s, t, u, v, w, nocc, nvir, t2aa, t2ab); // may have 1e-3 bug
t4-= t1xt1xt2aaab (p, q, r, s, t, u, v, w, nocc, nvir, t1, t2aa, t2ab); // may have 1e-5 bug
t4-= t1xt1xt1xt1aaab (p, q, r, s, t, u, v, w, nocc, nvir, t1); // may have 1e-6 bug
for (it=0; it< nocc; it++){
for (jt=0; jt< nocc; jt++){
for (at=0; at< nvir; at++){
for (bt=0; bt< nvir; bt++){
tmp[it][jt][at][bt] = 0.0;
}
}
}
}
tmp[r][s][v][w] += (e2ovov[De(p,t,q,u,nocc,nvir)]-e2ovov[De(p,u,q,t,nocc,nvir)]) * t4;
tmp[q][s][v][w] -= (e2ovov[De(p,t,r,u,nocc,nvir)]-e2ovov[De(p,u,r,t,nocc,nvir)]) * t4;
tmp[p][s][v][w] += (e2ovov[De(q,t,s,u,nocc,nvir)]-e2ovov[De(q,u,s,t,nocc,nvir)]) * t4;
tmp[r][s][u][w] -= (e2ovov[De(p,t,q,v,nocc,nvir)]-e2ovov[De(p,v,q,t,nocc,nvir)]) * t4;
tmp[q][s][u][w] += (e2ovov[De(p,t,r,v,nocc,nvir)]-e2ovov[De(p,v,r,t,nocc,nvir)]) * t4;
tmp[p][s][u][w] -= (e2ovov[De(q,t,s,v,nocc,nvir)]-e2ovov[De(q,v,s,t,nocc,nvir)]) * t4;
tmp[r][s][t][w] += (e2ovov[De(p,u,q,v,nocc,nvir)]-e2ovov[De(p,v,q,u,nocc,nvir)]) * t4;
tmp[q][s][t][w] -= (e2ovov[De(p,u,r,v,nocc,nvir)]-e2ovov[De(p,v,r,u,nocc,nvir)]) * t4;
tmp[p][s][t][w] += (e2ovov[De(q,u,s,v,nocc,nvir)]-e2ovov[De(q,v,s,u,nocc,nvir)]) * t4;
for (it = 0; it < nocc; it++)
for (jt = 0; jt < nocc; jt++)
for (at = 0; at < nvir; at++)
for (bt = 0; bt < nvir; bt++)
t2t4c[D(it,jt,at,bt,nocc,nvir)] += tmp[it][jt][at][bt] + tmp[jt][it][bt][at];
}
}
fclose(fp);
}
else
{
// error message
}
for (it=0; it< nocc; it++){
for (jt=0; jt< nocc; jt++){
for (at=0; at< nvir; at++){
free(tmp[it][jt][at]);
}
free(tmp[it][jt]);
}
free(tmp[it]);
}
free(tmp);
printf ("0SDTQ (Q) = %f ( %f )\n", norm, norm-norm0SDT);
// for (itmp = 0; itmp < nocc+nvir; itmp++){
// if (itmp<nocc) Refdet[itmp] = 3;
// else Refdet[itmp] = 0;
// }
//
// // t2t4c += e2ovov * t4aabb
// for (idet = 0; idet < n_aabb; idet++) {
// t4 = c4_aabb[idet];
// if(fabs(t4) > numzero)
// {
// p = p_aabb[idet];
// q = q_aabb[idet];
// r = r_aabb[idet];
// s = s_aabb[idet];
// t = t_aabb[idet];
// u = u_aabb[idet];
// v = v_aabb[idet];
// w = w_aabb[idet];
//
//
// if(!(p == 2 && q == 3 && t == 0 && u == 1 && \
// r == 2 && s == 3 && v == 0 && w == 1)) continue;
//
// for (itmp = 0; itmp < nocc+nvir; itmp++)
// det_str[itmp] = Refdet[itmp];
//
// det_str[p] = 2;
// det_str[q] = 2;
// det_str[t+nocc] = 1;
// det_str[u+nocc] = 1;
//
// if (p != r && q != r) det_str[r] = 1;
// else det_str[r] = 0;
// if (p != s && q != s) det_str[s] = 1;
// else det_str[s] = 0;
// if (t != v && u != v) det_str[v+nocc] = 2;
// else det_str[v+nocc] = 3;
// if (t != w && u != w) det_str[w+nocc] = 2;
// else det_str[w+nocc] = 3;
//
// parity = parity_ab_str(det_str, nocc+nvir);
// parity *= parity_ci_to_cc(p+q, 2, nocc);
// parity *= parity_ci_to_cc(r+s, 2, nocc);
//
// // interm norm of c4
// t4 = parity * t4 / c0;
// // lsh test
// printf("c4 mem %20.10f \n",t4);
//
//
// // extract t4
// t4-= t1xt3aabb(p, q, r, s, t, u, v, w, nocc, nvir, t1, t3aab);
// t4-= t2xt2aabb(p, q, r, s, t, u, v, w, nocc, nvir, t2aa, t2ab);
// t4-= t1xt1xt2aabb(p, q, r, s, t, u, v, w, nocc, nvir, t1, t2aa, t2ab);
// t4-= t1xt1xt1xt1aabb(p, q, r, s, t, u, v, w, nocc, nvir, t1); // may have bug
//
// // lsh test
// printf("t4 mem %20.10f \n",t4);
//
// for (itmp = 0; itmp < dlen; itmp++)
// tmp[itmp] = 0.0;
//
// if (p<r && t<v) tmp[D(q,s,u,w,nocc,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4;
// if (q<r && t<v) tmp[D(p,s,u,w,nocc,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4;
// if (p<s && t<v) tmp[D(q,r,u,w,nocc,nvir)] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4;
// if (q<s && t<v) tmp[D(p,r,u,w,nocc,nvir)] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4;
// if (p<r && u<v) tmp[D(q,s,t,w,nocc,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4;
// if (q<r && u<v) tmp[D(p,s,t,w,nocc,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4;
// if (p<s && u<v) tmp[D(q,r,t,w,nocc,nvir)] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4;
// if (q<s && u<v) tmp[D(p,r,t,w,nocc,nvir)] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4;
// if (p<r && t<w) tmp[D(q,s,u,v,nocc,nvir)] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4;
// if (q<r && t<w) tmp[D(p,s,u,v,nocc,nvir)] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4;
// if (p<s && t<w) tmp[D(q,r,u,v,nocc,nvir)] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4;
// if (q<s && t<w) tmp[D(p,r,u,v,nocc,nvir)] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4;
// if (p<r && u<w) tmp[D(q,s,t,v,nocc,nvir)] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4;
// if (q<r && u<w) tmp[D(p,s,t,v,nocc,nvir)] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4;
// if (p<s && u<w) tmp[D(q,r,t,v,nocc,nvir)] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4;
// if (q<s && u<w) tmp[D(p,r,t,v,nocc,nvir)] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4;
// if (p<r && v<t) tmp[D(q,s,u,w,nocc,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4;
// if (q<r && v<t) tmp[D(p,s,u,w,nocc,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4;
// if (p<s && v<t) tmp[D(q,r,u,w,nocc,nvir)] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4;
// if (q<s && v<t) tmp[D(p,r,u,w,nocc,nvir)] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4;
// if (p<r && v<u) tmp[D(q,s,t,w,nocc,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4;
// if (q<r && v<u) tmp[D(p,s,t,w,nocc,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4;
// if (p<s && v<u) tmp[D(q,r,t,w,nocc,nvir)] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4;
// if (q<s && v<u) tmp[D(p,r,t,w,nocc,nvir)] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4;
// if (p<r && w<t) tmp[D(q,s,u,v,nocc,nvir)] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4;
// if (q<r && w<t) tmp[D(p,s,u,v,nocc,nvir)] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4;
// if (p<s && w<t) tmp[D(q,r,u,v,nocc,nvir)] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4;
// if (q<s && w<t) tmp[D(p,r,u,v,nocc,nvir)] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4;
// if (p<r && w<u) tmp[D(q,s,t,v,nocc,nvir)] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4;
// if (q<r && w<u) tmp[D(p,s,t,v,nocc,nvir)] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4;
// if (p<s && w<u) tmp[D(q,r,t,v,nocc,nvir)] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4;
// if (q<s && w<u) tmp[D(p,r,t,v,nocc,nvir)] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4;
//
// scale = 0.5;
// if (p==r && t<v) tmp[D(q,s,u,w,nocc,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && t<v) tmp[D(p,s,u,w,nocc,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && t<v) tmp[D(q,r,u,w,nocc,nvir)] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && t<v) tmp[D(p,r,u,w,nocc,nvir)] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && u<v) tmp[D(q,s,t,w,nocc,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && u<v) tmp[D(p,s,t,w,nocc,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && u<v) tmp[D(q,r,t,w,nocc,nvir)] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && u<v) tmp[D(p,r,t,w,nocc,nvir)] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && t<w) tmp[D(q,s,u,v,nocc,nvir)] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && t<w) tmp[D(p,s,u,v,nocc,nvir)] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && t<w) tmp[D(q,r,u,v,nocc,nvir)] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && t<w) tmp[D(p,r,u,v,nocc,nvir)] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
// if (p==r && u<w) tmp[D(q,s,t,v,nocc,nvir)] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && u<w) tmp[D(p,s,t,v,nocc,nvir)] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && u<w) tmp[D(q,r,t,v,nocc,nvir)] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && u<w) tmp[D(p,r,t,v,nocc,nvir)] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
// if (p==r && v<t) tmp[D(q,s,u,w,nocc,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && v<t) tmp[D(p,s,u,w,nocc,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && v<t) tmp[D(q,r,u,w,nocc,nvir)] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && v<t) tmp[D(p,r,u,w,nocc,nvir)] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && v<u) tmp[D(q,s,t,w,nocc,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && v<u) tmp[D(p,s,t,w,nocc,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && v<u) tmp[D(q,r,t,w,nocc,nvir)] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && v<u) tmp[D(p,r,t,w,nocc,nvir)] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && w<t) tmp[D(q,s,u,v,nocc,nvir)] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && w<t) tmp[D(p,s,u,v,nocc,nvir)] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && w<t) tmp[D(q,r,u,v,nocc,nvir)] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && w<t) tmp[D(p,r,u,v,nocc,nvir)] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
// if (p==r && w<u) tmp[D(q,s,t,v,nocc,nvir)] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && w<u) tmp[D(p,s,t,v,nocc,nvir)] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && w<u) tmp[D(q,r,t,v,nocc,nvir)] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && w<u) tmp[D(p,r,t,v,nocc,nvir)] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
//
// scale = 0.5;
// if (p<r && t==v) tmp[D(q,s,u,w,nocc,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
// if (q<r && t==v) tmp[D(p,s,u,w,nocc,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
// if (p<s && t==v) tmp[D(q,r,u,w,nocc,nvir)] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
// if (q<s && t==v) tmp[D(p,r,u,w,nocc,nvir)] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
// if (p<r && u==v) tmp[D(q,s,t,w,nocc,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
// if (q<r && u==v) tmp[D(p,s,t,w,nocc,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
// if (p<s && u==v) tmp[D(q,r,t,w,nocc,nvir)] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
// if (q<s && u==v) tmp[D(p,r,t,w,nocc,nvir)] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
// if (p<r && t==w) tmp[D(q,s,u,v,nocc,nvir)] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
// if (q<r && t==w) tmp[D(p,s,u,v,nocc,nvir)] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
// if (p<s && t==w) tmp[D(q,r,u,v,nocc,nvir)] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
// if (q<s && t==w) tmp[D(p,r,u,v,nocc,nvir)] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
// if (p<r && u==w) tmp[D(q,s,t,v,nocc,nvir)] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
// if (q<r && u==w) tmp[D(p,s,t,v,nocc,nvir)] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
// if (p<s && u==w) tmp[D(q,r,t,v,nocc,nvir)] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
// if (q<s && u==w) tmp[D(p,r,t,v,nocc,nvir)] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
// if (p<r && v==t) tmp[D(q,s,u,w,nocc,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
// if (q<r && v==t) tmp[D(p,s,u,w,nocc,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
// if (p<s && v==t) tmp[D(q,r,u,w,nocc,nvir)] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
// if (q<s && v==t) tmp[D(p,r,u,w,nocc,nvir)] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
// if (p<r && v==u) tmp[D(q,s,t,w,nocc,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
// if (q<r && v==u) tmp[D(p,s,t,w,nocc,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
// if (p<s && v==u) tmp[D(q,r,t,w,nocc,nvir)] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
// if (q<s && v==u) tmp[D(p,r,t,w,nocc,nvir)] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
// if (p<r && w==t) tmp[D(q,s,u,v,nocc,nvir)] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
// if (q<r && w==t) tmp[D(p,s,u,v,nocc,nvir)] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
// if (p<s && w==t) tmp[D(q,r,u,v,nocc,nvir)] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
// if (q<s && w==t) tmp[D(p,r,u,v,nocc,nvir)] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
// if (p<r && w==u) tmp[D(q,s,t,v,nocc,nvir)] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
// if (q<r && w==u) tmp[D(p,s,t,v,nocc,nvir)] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
// if (p<s && w==u) tmp[D(q,r,t,v,nocc,nvir)] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
// if (q<s && w==u) tmp[D(p,r,t,v,nocc,nvir)] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
//
// scale = 0.25;
// if (p==r && t==v) tmp[D(q,s,u,w,nocc,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && t==v) tmp[D(p,s,u,w,nocc,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && t==v) tmp[D(q,r,u,w,nocc,nvir)] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && t==v) tmp[D(p,r,u,w,nocc,nvir)] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && u==v) tmp[D(q,s,t,w,nocc,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && u==v) tmp[D(p,s,t,w,nocc,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && u==v) tmp[D(q,r,t,w,nocc,nvir)] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && u==v) tmp[D(p,r,t,w,nocc,nvir)] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && t==w) tmp[D(q,s,u,v,nocc,nvir)] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && t==w) tmp[D(p,s,u,v,nocc,nvir)] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && t==w) tmp[D(q,r,u,v,nocc,nvir)] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && t==w) tmp[D(p,r,u,v,nocc,nvir)] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
// if (p==r && u==w) tmp[D(q,s,t,v,nocc,nvir)] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && u==w) tmp[D(p,s,t,v,nocc,nvir)] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && u==w) tmp[D(q,r,t,v,nocc,nvir)] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && u==w) tmp[D(p,r,t,v,nocc,nvir)] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
// if (p==r && v==t) tmp[D(q,s,u,w,nocc,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && v==t) tmp[D(p,s,u,w,nocc,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && v==t) tmp[D(q,r,u,w,nocc,nvir)] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && v==t) tmp[D(p,r,u,w,nocc,nvir)] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && v==u) tmp[D(q,s,t,w,nocc,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && v==u) tmp[D(p,s,t,w,nocc,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && v==u) tmp[D(q,r,t,w,nocc,nvir)] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && v==u) tmp[D(p,r,t,w,nocc,nvir)] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && w==t) tmp[D(q,s,u,v,nocc,nvir)] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && w==t) tmp[D(p,s,u,v,nocc,nvir)] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && w==t) tmp[D(q,r,u,v,nocc,nvir)] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && w==t) tmp[D(p,r,u,v,nocc,nvir)] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
// if (p==r && w==u) tmp[D(q,s,t,v,nocc,nvir)] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && w==u) tmp[D(p,s,t,v,nocc,nvir)] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && w==u) tmp[D(q,r,t,v,nocc,nvir)] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && w==u) tmp[D(p,r,t,v,nocc,nvir)] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
//
// for (it = 0; it < nocc; it++)
// for (jt = 0; jt < nocc; jt++)
// for (at = 0; at < nvir; at++)
// for (bt = 0; bt < nvir; bt++)
// t2t4c[D(it,jt,at,bt,nocc,nvir)] += tmp[D(it,jt,at,bt,nocc,nvir)] + tmp[D(jt,it,bt,at,nocc,nvir)];
// }
// }
// // t2t4c += e2ovov * t4aaab
// for (idet = 0; idet < n_aaab; idet++) {
// t4 = c4_aaab[idet];
// if(fabs(t4) > numzero)
// {
// p = p_aaab[idet];
// q = q_aaab[idet];
// r = r_aaab[idet];
// s = s_aaab[idet];
// t = t_aaab[idet];
// u = u_aaab[idet];
// v = v_aaab[idet];
// w = w_aaab[idet];
//
// for (itmp = 0; itmp < nocc+nvir; itmp++)
// det_str[itmp] = Refdet[itmp];
// det_str[p] = 2;
// det_str[q] = 2;
// det_str[r] = 2;
// det_str[t+nocc] = 1;
// det_str[u+nocc] = 1;
// det_str[v+nocc] = 1;
//
// if (p != s && q != s && r != s) det_str[s] = 1;
// else det_str[s] = 0;
// if (t != w && u != w && v != w) det_str[w+nocc] = 2;
// else det_str[w+nocc] = 3;
// parity = parity_ab_str(det_str, nocc+nvir);
// parity *= parity_ci_to_cc(p+q+r, 3, nocc);
// parity *= parity_ci_to_cc(s, 1, nocc);
//
// // interm norm of c4
// t4 = parity * t4 / c0;
//
// // extract t4
// t4-= t1xt3aaab (p, q, r, s, t, u, v, w, nocc, nvir, t1, t3aaa, t3aab); // may have 1e-5 bug
// t4-= t2xt2aaab (p, q, r, s, t, u, v, w, nocc, nvir, t2aa, t2ab); // may have 1e-3 bug
// t4-= t1xt1xt2aaab (p, q, r, s, t, u, v, w, nocc, nvir, t1, t2aa, t2ab); // may have 1e-5 bug
// t4-= t1xt1xt1xt1aaab (p, q, r, s, t, u, v, w, nocc, nvir, t1); // may have 1e-6 bug
//
// for (itmp = 0; itmp < dlen; itmp++)
// tmp[itmp] = 0.0;
//
// tmp[D(r,s,v,w,nocc,nvir)] += (e2ovov[De(p,t,q,u,nocc,nvir)]-e2ovov[De(p,u,q,t,nocc,nvir)]) * t4;
// tmp[D(q,s,v,w,nocc,nvir)] -= (e2ovov[De(p,t,r,u,nocc,nvir)]-e2ovov[De(p,u,r,t,nocc,nvir)]) * t4;
// tmp[D(p,s,v,w,nocc,nvir)] += (e2ovov[De(q,t,s,u,nocc,nvir)]-e2ovov[De(q,u,s,t,nocc,nvir)]) * t4;
// tmp[D(r,s,u,w,nocc,nvir)] -= (e2ovov[De(p,t,q,v,nocc,nvir)]-e2ovov[De(p,v,q,t,nocc,nvir)]) * t4;
// tmp[D(q,s,u,w,nocc,nvir)] += (e2ovov[De(p,t,r,v,nocc,nvir)]-e2ovov[De(p,v,r,t,nocc,nvir)]) * t4;
// tmp[D(p,s,u,w,nocc,nvir)] -= (e2ovov[De(q,t,s,v,nocc,nvir)]-e2ovov[De(q,v,s,t,nocc,nvir)]) * t4;
// tmp[D(r,s,t,w,nocc,nvir)] += (e2ovov[De(p,u,q,v,nocc,nvir)]-e2ovov[De(p,v,q,u,nocc,nvir)]) * t4;
// tmp[D(q,s,t,w,nocc,nvir)] -= (e2ovov[De(p,u,r,v,nocc,nvir)]-e2ovov[De(p,v,r,u,nocc,nvir)]) * t4;
// tmp[D(p,s,t,w,nocc,nvir)] += (e2ovov[De(q,u,s,v,nocc,nvir)]-e2ovov[De(q,v,s,u,nocc,nvir)]) * t4;
//
// for (it = 0; it < nocc; it++)
// for (jt = 0; jt < nocc; jt++)
// for (at = 0; at < nvir; at++)
// for (bt = 0; bt < nvir; bt++)
// t2t4c[D(it,jt,at,bt,nocc,nvir)] += tmp[D(it,jt,at,bt,nocc,nvir)] + tmp[D(jt,it,bt,at,nocc,nvir)];
// }
// }
}
void t2t4c_shci_ecT(double *t2t4c, double *t1, double *t2aa, double *t2ab, double *t3aaa, double *t3aab, double *e2ovov, const int nc, const int nc_ref, const int nvir_ref, const int nocc, const int nvir, const double numzero, const double c0, double norm)
{
//double numzero = 1e-7;
int p, q, r, s, t, u, v, w, itmp, it, jt, at, bt;
double t4, parity, scale;
uint8_t Refdet[nocc+nvir], det_str[nocc+nvir];
for (itmp = 0; itmp < nocc+nvir; itmp++){
if (itmp<nocc) Refdet[itmp] = 3;
else Refdet[itmp] = 0;
}
double norm0SDT = norm;
double ****tmp;
tmp = (double ****)malloc(sizeof(double ***) * nocc);
for (it=0; it< nocc; it++){
tmp[it] = (double ***)malloc(sizeof(double **) * nocc);
for (jt=0; jt< nocc; jt++){
tmp[it][jt] = (double **)malloc(sizeof(double *) * nvir);
for (at=0; at< nvir; at++){
tmp[it][jt][at] = (double *)malloc(sizeof(double) * nvir);
}
}
}
// for (p=0; p<nocc; p++) {
// for (q=0; q<nocc; q++) {
// for (r=0; r<nvir; r++) {
// for (s=0; s<nvir; s++) {
// printf("%d %d %d %d %20.10lf\n",p,q,r,s,e2ovov[De(p,r,q,s,nocc,nvir)]);
// }
// }
// }
// }
FILE *fp;
char typ[4], line[255];
fp = fopen("CIcoeff_shci.out", "r");
fscanf(fp, "%s\n", line);
if (fp) {
while ( !feof(fp) ){
fscanf(fp, "%c%c%c%c,%s\n", &(typ[0]), &(typ[1]), &(typ[2]), &(typ[3]), line);
fscanf(fp, "%lf\n", &t4);
//printf ("typ=%c%c%c%c line=%s\n",typ[0],typ[1],typ[2],typ[3], line);
if (strncmp(typ, "aabb", 4) == 0 && fabs(t4) > numzero){
norm += t4*t4;
sscanf(line,"%d,%d,%d,%d,%d,%d,%d,%d",&p,&q,&t,&u,&r,&s,&v,&w);
p += nc;
q += nc;
r += nc;
s += nc;
t += - nocc + nc;
u += - nocc + nc;
v += - nocc + nc;
w += - nocc + nc;
// exclude active space (reference space)
//if (p >= nc_ref && q >= nc_ref && r >= nc_ref && s >= nc_ref && \
// r < nvir_ref && s < nvir_ref && t < nvir_ref && u < nvir_ref) continue;
// exclude inactive space
if (p <= nc_ref && q <= nc_ref && r <= nc_ref && s <= nc_ref && \
t > nvir_ref && u > nvir_ref && v > nvir_ref && w > nvir_ref) continue;
// if(!(p == 2 && q == 3 && t == 0 && u == 1 && \
// r == 2 && s == 3 && v == 0 && w == 1)) continue;
for (itmp = 0; itmp < nocc+nvir; itmp++)
det_str[itmp] = Refdet[itmp];
det_str[p] = 2;
det_str[q] = 2;
det_str[t+nocc] = 1;
det_str[u+nocc] = 1;
if (p != r && q != r) det_str[r] = 1;
else det_str[r] = 0;
if (p != s && q != s) det_str[s] = 1;
else det_str[s] = 0;
if (t != v && u != v) det_str[v+nocc] = 2;
else det_str[v+nocc] = 3;
if (t != w && u != w) det_str[w+nocc] = 2;
else det_str[w+nocc] = 3;
//parity = parity_ab_str(det_str, nocc+nvir);
parity = parity_ci_to_cc(p+q, 2, nocc);
parity *= parity_ci_to_cc(r+s, 2, nocc);
// interm norm of c4
t4 = parity * t4 / c0;
// lsh test
// printf("c4 mem %20.10f \n",t4);
// extract t4
t4-= t1xt3aabb(p, q, r, s, t, u, v, w, nocc, nvir, t1, t3aab);
t4-= t2xt2aabb(p, q, r, s, t, u, v, w, nocc, nvir, t2aa, t2ab);
t4-= t1xt1xt2aabb(p, q, r, s, t, u, v, w, nocc, nvir, t1, t2aa, t2ab);
t4-= t1xt1xt1xt1aabb(p, q, r, s, t, u, v, w, nocc, nvir, t1); // may have bug
// lsh test
// printf("t4 mem %20.10f \n",t4);
for (it=0; it< nocc; it++){
for (jt=0; jt< nocc; jt++){
for (at=0; at< nvir; at++){
for (bt=0; bt< nvir; bt++){
tmp[it][jt][at][bt] = 0.0;
}
}
}
}
// printf("eris_ovov mem %20.10f \n",e2ovov[De(p,t,r,v,nocc,nvir)]);
if (p<r && t<v) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4;
if (q<r && t<v) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4;
if (p<s && t<v) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4;
if (q<s && t<v) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4;
if (p<r && u<v) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4;
if (q<r && u<v) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4;
if (p<s && u<v) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4;
if (q<s && u<v) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4;
if (p<r && t<w) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4;
if (q<r && t<w) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4;
if (p<s && t<w) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4;
if (q<s && t<w) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4;
if (p<r && u<w) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4;
if (q<r && u<w) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4;
if (p<s && u<w) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4;
if (q<s && u<w) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4;
if (p<r && v<t) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4;
if (q<r && v<t) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4;
if (p<s && v<t) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4;
if (q<s && v<t) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4;
if (p<r && v<u) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4;
if (q<r && v<u) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4;
if (p<s && v<u) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4;
if (q<s && v<u) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4;
if (p<r && w<t) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4;
if (q<r && w<t) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4;
if (p<s && w<t) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4;
if (q<s && w<t) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4;
if (p<r && w<u) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4;
if (q<r && w<u) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4;
if (p<s && w<u) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4;
if (q<s && w<u) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4;
scale = 0.5;
if (p==r && t<v) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
if (q==r && t<v) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
if (p==s && t<v) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
if (q==s && t<v) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
if (p==r && u<v) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
if (q==r && u<v) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
if (p==s && u<v) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
if (q==s && u<v) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
if (p==r && t<w) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
if (q==r && t<w) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
if (p==s && t<w) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
if (q==s && t<w) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
if (p==r && u<w) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
if (q==r && u<w) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
if (p==s && u<w) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
if (q==s && u<w) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
if (p==r && v<t) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
if (q==r && v<t) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
if (p==s && v<t) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
if (q==s && v<t) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
if (p==r && v<u) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
if (q==r && v<u) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
if (p==s && v<u) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
if (q==s && v<u) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
if (p==r && w<t) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
if (q==r && w<t) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
if (p==s && w<t) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
if (q==s && w<t) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
if (p==r && w<u) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
if (q==r && w<u) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
if (p==s && w<u) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
if (q==s && w<u) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
scale = 0.5;
if (p<r && t==v) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
if (q<r && t==v) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
if (p<s && t==v) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
if (q<s && t==v) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
if (p<r && u==v) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
if (q<r && u==v) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
if (p<s && u==v) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
if (q<s && u==v) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
if (p<r && t==w) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
if (q<r && t==w) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
if (p<s && t==w) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
if (q<s && t==w) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
if (p<r && u==w) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
if (q<r && u==w) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
if (p<s && u==w) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
if (q<s && u==w) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
if (p<r && v==t) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
if (q<r && v==t) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
if (p<s && v==t) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
if (q<s && v==t) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
if (p<r && v==u) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
if (q<r && v==u) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
if (p<s && v==u) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
if (q<s && v==u) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
if (p<r && w==t) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
if (q<r && w==t) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
if (p<s && w==t) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
if (q<s && w==t) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
if (p<r && w==u) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
if (q<r && w==u) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
if (p<s && w==u) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
if (q<s && w==u) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
scale = 0.25;
if (p==r && t==v) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
if (q==r && t==v) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
if (p==s && t==v) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
if (q==s && t==v) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
if (p==r && u==v) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
if (q==r && u==v) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
if (p==s && u==v) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
if (q==s && u==v) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
if (p==r && t==w) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
if (q==r && t==w) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
if (p==s && t==w) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
if (q==s && t==w) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
if (p==r && u==w) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
if (q==r && u==w) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
if (p==s && u==w) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
if (q==s && u==w) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
if (p==r && v==t) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
if (q==r && v==t) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
if (p==s && v==t) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
if (q==s && v==t) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
if (p==r && v==u) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
if (q==r && v==u) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
if (p==s && v==u) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
if (q==s && v==u) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
if (p==r && w==t) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
if (q==r && w==t) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
if (p==s && w==t) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
if (q==s && w==t) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
if (p==r && w==u) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
if (q==r && w==u) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
if (p==s && w==u) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
if (q==s && w==u) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
for (it = 0; it < nocc; it++)
for (jt = 0; jt < nocc; jt++)
for (at = 0; at < nvir; at++)
for (bt = 0; bt < nvir; bt++)
t2t4c[D(it,jt,at,bt,nocc,nvir)] += tmp[it][jt][at][bt] + tmp[jt][it][bt][at];
}
else if (strncmp(typ, "aaab", 4) == 0 && fabs(t4) > numzero){
norm += 2.0*t4*t4;
//lsh test
//printf ("typ=%c%c%c%c line=%s c4=%lf\n",typ[0],typ[1],typ[2],typ[3], line, t4);
sscanf(line,"%d,%d,%d,%d,%d,%d,%d,%d",&p,&q,&r,&t,&u,&v,&s,&w);
p += nc;
q += nc;
r += nc;
s += nc;
t += - nocc + nc;
u += - nocc + nc;
v += - nocc + nc;
w += - nocc + nc;
// exclude active space (reference space)
//if (p >= nc_ref && q >= nc_ref && r >= nc_ref && s >= nc_ref && \
// r < nvir_ref && s < nvir_ref && t < nvir_ref && u < nvir_ref) continue;
// exclude inactive space
if (p <= nc_ref && q <= nc_ref && r <= nc_ref && s <= nc_ref && \
t > nvir_ref && u > nvir_ref && v > nvir_ref && w > nvir_ref) continue;
//printf ("p=%d, q=%d, r=%d, t=%d, u=%d, v=%d, s=%d, w=%d",p,q,r,t,u,v,s,w);
for (itmp = 0; itmp < nocc+nvir; itmp++)
det_str[itmp] = Refdet[itmp];
det_str[p] = 2;
det_str[q] = 2;
det_str[r] = 2;
det_str[t+nocc] = 1;
det_str[u+nocc] = 1;
det_str[v+nocc] = 1;
if (p != s && q != s && r != s) det_str[s] = 1;
else det_str[s] = 0;
if (t != w && u != w && v != w) det_str[w+nocc] = 2;
else det_str[w+nocc] = 3;
//parity = parity_ab_str(det_str, nocc+nvir);
parity = parity_ci_to_cc(p+q+r, 3, nocc);
parity *= parity_ci_to_cc(s, 1, nocc);
// interm norm of c4
t4 = parity * t4 / c0;
// extract t4
t4-= t1xt3aaab (p, q, r, s, t, u, v, w, nocc, nvir, t1, t3aaa, t3aab); // may have 1e-5 bug
t4-= t2xt2aaab (p, q, r, s, t, u, v, w, nocc, nvir, t2aa, t2ab); // may have 1e-3 bug
t4-= t1xt1xt2aaab (p, q, r, s, t, u, v, w, nocc, nvir, t1, t2aa, t2ab); // may have 1e-5 bug
t4-= t1xt1xt1xt1aaab (p, q, r, s, t, u, v, w, nocc, nvir, t1); // may have 1e-6 bug
for (it=0; it< nocc; it++){
for (jt=0; jt< nocc; jt++){
for (at=0; at< nvir; at++){
for (bt=0; bt< nvir; bt++){
tmp[it][jt][at][bt] = 0.0;
}
}
}
}
tmp[r][s][v][w] += (e2ovov[De(p,t,q,u,nocc,nvir)]-e2ovov[De(p,u,q,t,nocc,nvir)]) * t4;
tmp[q][s][v][w] -= (e2ovov[De(p,t,r,u,nocc,nvir)]-e2ovov[De(p,u,r,t,nocc,nvir)]) * t4;
tmp[p][s][v][w] += (e2ovov[De(q,t,s,u,nocc,nvir)]-e2ovov[De(q,u,s,t,nocc,nvir)]) * t4;
tmp[r][s][u][w] -= (e2ovov[De(p,t,q,v,nocc,nvir)]-e2ovov[De(p,v,q,t,nocc,nvir)]) * t4;
tmp[q][s][u][w] += (e2ovov[De(p,t,r,v,nocc,nvir)]-e2ovov[De(p,v,r,t,nocc,nvir)]) * t4;
tmp[p][s][u][w] -= (e2ovov[De(q,t,s,v,nocc,nvir)]-e2ovov[De(q,v,s,t,nocc,nvir)]) * t4;
tmp[r][s][t][w] += (e2ovov[De(p,u,q,v,nocc,nvir)]-e2ovov[De(p,v,q,u,nocc,nvir)]) * t4;
tmp[q][s][t][w] -= (e2ovov[De(p,u,r,v,nocc,nvir)]-e2ovov[De(p,v,r,u,nocc,nvir)]) * t4;
tmp[p][s][t][w] += (e2ovov[De(q,u,s,v,nocc,nvir)]-e2ovov[De(q,v,s,u,nocc,nvir)]) * t4;
for (it = 0; it < nocc; it++)
for (jt = 0; jt < nocc; jt++)
for (at = 0; at < nvir; at++)
for (bt = 0; bt < nvir; bt++)
t2t4c[D(it,jt,at,bt,nocc,nvir)] += tmp[it][jt][at][bt] + tmp[jt][it][bt][at];
}
}
fclose(fp);
}
else
{
// error message
}
for (it=0; it< nocc; it++){
for (jt=0; jt< nocc; jt++){
for (at=0; at< nvir; at++){
free(tmp[it][jt][at]);
}
free(tmp[it][jt]);
}
free(tmp[it]);
}
free(tmp);
printf ("0SDTQ (Q) = %f ( %f )\n", norm, norm-norm0SDT);
// for (itmp = 0; itmp < nocc+nvir; itmp++){
// if (itmp<nocc) Refdet[itmp] = 3;
// else Refdet[itmp] = 0;
// }
//
// // t2t4c += e2ovov * t4aabb
// for (idet = 0; idet < n_aabb; idet++) {
// t4 = c4_aabb[idet];
// if(fabs(t4) > numzero)
// {
// p = p_aabb[idet];
// q = q_aabb[idet];
// r = r_aabb[idet];
// s = s_aabb[idet];
// t = t_aabb[idet];
// u = u_aabb[idet];
// v = v_aabb[idet];
// w = w_aabb[idet];
//
//
// if(!(p == 2 && q == 3 && t == 0 && u == 1 && \
// r == 2 && s == 3 && v == 0 && w == 1)) continue;
//
// for (itmp = 0; itmp < nocc+nvir; itmp++)
// det_str[itmp] = Refdet[itmp];
//
// det_str[p] = 2;
// det_str[q] = 2;
// det_str[t+nocc] = 1;
// det_str[u+nocc] = 1;
//
// if (p != r && q != r) det_str[r] = 1;
// else det_str[r] = 0;
// if (p != s && q != s) det_str[s] = 1;
// else det_str[s] = 0;
// if (t != v && u != v) det_str[v+nocc] = 2;
// else det_str[v+nocc] = 3;
// if (t != w && u != w) det_str[w+nocc] = 2;
// else det_str[w+nocc] = 3;
//
// parity = parity_ab_str(det_str, nocc+nvir);
// parity *= parity_ci_to_cc(p+q, 2, nocc);
// parity *= parity_ci_to_cc(r+s, 2, nocc);
//
// // interm norm of c4
// t4 = parity * t4 / c0;
// // lsh test
// printf("c4 mem %20.10f \n",t4);
//
//
// // extract t4
// t4-= t1xt3aabb(p, q, r, s, t, u, v, w, nocc, nvir, t1, t3aab);
// t4-= t2xt2aabb(p, q, r, s, t, u, v, w, nocc, nvir, t2aa, t2ab);
// t4-= t1xt1xt2aabb(p, q, r, s, t, u, v, w, nocc, nvir, t1, t2aa, t2ab);
// t4-= t1xt1xt1xt1aabb(p, q, r, s, t, u, v, w, nocc, nvir, t1); // may have bug
//
// // lsh test
// printf("t4 mem %20.10f \n",t4);
//
// for (itmp = 0; itmp < dlen; itmp++)
// tmp[itmp] = 0.0;
//
// if (p<r && t<v) tmp[D(q,s,u,w,nocc,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4;
// if (q<r && t<v) tmp[D(p,s,u,w,nocc,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4;
// if (p<s && t<v) tmp[D(q,r,u,w,nocc,nvir)] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4;
// if (q<s && t<v) tmp[D(p,r,u,w,nocc,nvir)] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4;
// if (p<r && u<v) tmp[D(q,s,t,w,nocc,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4;
// if (q<r && u<v) tmp[D(p,s,t,w,nocc,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4;
// if (p<s && u<v) tmp[D(q,r,t,w,nocc,nvir)] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4;
// if (q<s && u<v) tmp[D(p,r,t,w,nocc,nvir)] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4;
// if (p<r && t<w) tmp[D(q,s,u,v,nocc,nvir)] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4;
// if (q<r && t<w) tmp[D(p,s,u,v,nocc,nvir)] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4;
// if (p<s && t<w) tmp[D(q,r,u,v,nocc,nvir)] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4;
// if (q<s && t<w) tmp[D(p,r,u,v,nocc,nvir)] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4;
// if (p<r && u<w) tmp[D(q,s,t,v,nocc,nvir)] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4;
// if (q<r && u<w) tmp[D(p,s,t,v,nocc,nvir)] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4;
// if (p<s && u<w) tmp[D(q,r,t,v,nocc,nvir)] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4;
// if (q<s && u<w) tmp[D(p,r,t,v,nocc,nvir)] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4;
// if (p<r && v<t) tmp[D(q,s,u,w,nocc,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4;
// if (q<r && v<t) tmp[D(p,s,u,w,nocc,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4;
// if (p<s && v<t) tmp[D(q,r,u,w,nocc,nvir)] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4;
// if (q<s && v<t) tmp[D(p,r,u,w,nocc,nvir)] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4;
// if (p<r && v<u) tmp[D(q,s,t,w,nocc,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4;
// if (q<r && v<u) tmp[D(p,s,t,w,nocc,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4;
// if (p<s && v<u) tmp[D(q,r,t,w,nocc,nvir)] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4;
// if (q<s && v<u) tmp[D(p,r,t,w,nocc,nvir)] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4;
// if (p<r && w<t) tmp[D(q,s,u,v,nocc,nvir)] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4;
// if (q<r && w<t) tmp[D(p,s,u,v,nocc,nvir)] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4;
// if (p<s && w<t) tmp[D(q,r,u,v,nocc,nvir)] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4;
// if (q<s && w<t) tmp[D(p,r,u,v,nocc,nvir)] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4;
// if (p<r && w<u) tmp[D(q,s,t,v,nocc,nvir)] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4;
// if (q<r && w<u) tmp[D(p,s,t,v,nocc,nvir)] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4;
// if (p<s && w<u) tmp[D(q,r,t,v,nocc,nvir)] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4;
// if (q<s && w<u) tmp[D(p,r,t,v,nocc,nvir)] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4;
//
// scale = 0.5;
// if (p==r && t<v) tmp[D(q,s,u,w,nocc,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && t<v) tmp[D(p,s,u,w,nocc,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && t<v) tmp[D(q,r,u,w,nocc,nvir)] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && t<v) tmp[D(p,r,u,w,nocc,nvir)] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && u<v) tmp[D(q,s,t,w,nocc,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && u<v) tmp[D(p,s,t,w,nocc,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && u<v) tmp[D(q,r,t,w,nocc,nvir)] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && u<v) tmp[D(p,r,t,w,nocc,nvir)] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && t<w) tmp[D(q,s,u,v,nocc,nvir)] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && t<w) tmp[D(p,s,u,v,nocc,nvir)] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && t<w) tmp[D(q,r,u,v,nocc,nvir)] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && t<w) tmp[D(p,r,u,v,nocc,nvir)] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
// if (p==r && u<w) tmp[D(q,s,t,v,nocc,nvir)] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && u<w) tmp[D(p,s,t,v,nocc,nvir)] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && u<w) tmp[D(q,r,t,v,nocc,nvir)] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && u<w) tmp[D(p,r,t,v,nocc,nvir)] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
// if (p==r && v<t) tmp[D(q,s,u,w,nocc,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && v<t) tmp[D(p,s,u,w,nocc,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && v<t) tmp[D(q,r,u,w,nocc,nvir)] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && v<t) tmp[D(p,r,u,w,nocc,nvir)] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && v<u) tmp[D(q,s,t,w,nocc,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && v<u) tmp[D(p,s,t,w,nocc,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && v<u) tmp[D(q,r,t,w,nocc,nvir)] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && v<u) tmp[D(p,r,t,w,nocc,nvir)] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && w<t) tmp[D(q,s,u,v,nocc,nvir)] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && w<t) tmp[D(p,s,u,v,nocc,nvir)] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && w<t) tmp[D(q,r,u,v,nocc,nvir)] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && w<t) tmp[D(p,r,u,v,nocc,nvir)] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
// if (p==r && w<u) tmp[D(q,s,t,v,nocc,nvir)] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && w<u) tmp[D(p,s,t,v,nocc,nvir)] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && w<u) tmp[D(q,r,t,v,nocc,nvir)] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && w<u) tmp[D(p,r,t,v,nocc,nvir)] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
//
// scale = 0.5;
// if (p<r && t==v) tmp[D(q,s,u,w,nocc,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
// if (q<r && t==v) tmp[D(p,s,u,w,nocc,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
// if (p<s && t==v) tmp[D(q,r,u,w,nocc,nvir)] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
// if (q<s && t==v) tmp[D(p,r,u,w,nocc,nvir)] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
// if (p<r && u==v) tmp[D(q,s,t,w,nocc,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
// if (q<r && u==v) tmp[D(p,s,t,w,nocc,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
// if (p<s && u==v) tmp[D(q,r,t,w,nocc,nvir)] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
// if (q<s && u==v) tmp[D(p,r,t,w,nocc,nvir)] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
// if (p<r && t==w) tmp[D(q,s,u,v,nocc,nvir)] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
// if (q<r && t==w) tmp[D(p,s,u,v,nocc,nvir)] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
// if (p<s && t==w) tmp[D(q,r,u,v,nocc,nvir)] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
// if (q<s && t==w) tmp[D(p,r,u,v,nocc,nvir)] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
// if (p<r && u==w) tmp[D(q,s,t,v,nocc,nvir)] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
// if (q<r && u==w) tmp[D(p,s,t,v,nocc,nvir)] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
// if (p<s && u==w) tmp[D(q,r,t,v,nocc,nvir)] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
// if (q<s && u==w) tmp[D(p,r,t,v,nocc,nvir)] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
// if (p<r && v==t) tmp[D(q,s,u,w,nocc,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
// if (q<r && v==t) tmp[D(p,s,u,w,nocc,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
// if (p<s && v==t) tmp[D(q,r,u,w,nocc,nvir)] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
// if (q<s && v==t) tmp[D(p,r,u,w,nocc,nvir)] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
// if (p<r && v==u) tmp[D(q,s,t,w,nocc,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
// if (q<r && v==u) tmp[D(p,s,t,w,nocc,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
// if (p<s && v==u) tmp[D(q,r,t,w,nocc,nvir)] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
// if (q<s && v==u) tmp[D(p,r,t,w,nocc,nvir)] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
// if (p<r && w==t) tmp[D(q,s,u,v,nocc,nvir)] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
// if (q<r && w==t) tmp[D(p,s,u,v,nocc,nvir)] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
// if (p<s && w==t) tmp[D(q,r,u,v,nocc,nvir)] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
// if (q<s && w==t) tmp[D(p,r,u,v,nocc,nvir)] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
// if (p<r && w==u) tmp[D(q,s,t,v,nocc,nvir)] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
// if (q<r && w==u) tmp[D(p,s,t,v,nocc,nvir)] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
// if (p<s && w==u) tmp[D(q,r,t,v,nocc,nvir)] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
// if (q<s && w==u) tmp[D(p,r,t,v,nocc,nvir)] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
//
// scale = 0.25;
// if (p==r && t==v) tmp[D(q,s,u,w,nocc,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && t==v) tmp[D(p,s,u,w,nocc,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && t==v) tmp[D(q,r,u,w,nocc,nvir)] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && t==v) tmp[D(p,r,u,w,nocc,nvir)] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && u==v) tmp[D(q,s,t,w,nocc,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && u==v) tmp[D(p,s,t,w,nocc,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && u==v) tmp[D(q,r,t,w,nocc,nvir)] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && u==v) tmp[D(p,r,t,w,nocc,nvir)] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && t==w) tmp[D(q,s,u,v,nocc,nvir)] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && t==w) tmp[D(p,s,u,v,nocc,nvir)] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && t==w) tmp[D(q,r,u,v,nocc,nvir)] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && t==w) tmp[D(p,r,u,v,nocc,nvir)] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
// if (p==r && u==w) tmp[D(q,s,t,v,nocc,nvir)] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && u==w) tmp[D(p,s,t,v,nocc,nvir)] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && u==w) tmp[D(q,r,t,v,nocc,nvir)] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && u==w) tmp[D(p,r,t,v,nocc,nvir)] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
// if (p==r && v==t) tmp[D(q,s,u,w,nocc,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && v==t) tmp[D(p,s,u,w,nocc,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && v==t) tmp[D(q,r,u,w,nocc,nvir)] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && v==t) tmp[D(p,r,u,w,nocc,nvir)] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && v==u) tmp[D(q,s,t,w,nocc,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && v==u) tmp[D(p,s,t,w,nocc,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && v==u) tmp[D(q,r,t,w,nocc,nvir)] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && v==u) tmp[D(p,r,t,w,nocc,nvir)] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && w==t) tmp[D(q,s,u,v,nocc,nvir)] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && w==t) tmp[D(p,s,u,v,nocc,nvir)] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && w==t) tmp[D(q,r,u,v,nocc,nvir)] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && w==t) tmp[D(p,r,u,v,nocc,nvir)] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
// if (p==r && w==u) tmp[D(q,s,t,v,nocc,nvir)] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && w==u) tmp[D(p,s,t,v,nocc,nvir)] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && w==u) tmp[D(q,r,t,v,nocc,nvir)] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && w==u) tmp[D(p,r,t,v,nocc,nvir)] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
//
// for (it = 0; it < nocc; it++)
// for (jt = 0; jt < nocc; jt++)
// for (at = 0; at < nvir; at++)
// for (bt = 0; bt < nvir; bt++)
// t2t4c[D(it,jt,at,bt,nocc,nvir)] += tmp[D(it,jt,at,bt,nocc,nvir)] + tmp[D(jt,it,bt,at,nocc,nvir)];
// }
// }
// // t2t4c += e2ovov * t4aaab
// for (idet = 0; idet < n_aaab; idet++) {
// t4 = c4_aaab[idet];
// if(fabs(t4) > numzero)
// {
// p = p_aaab[idet];
// q = q_aaab[idet];
// r = r_aaab[idet];
// s = s_aaab[idet];
// t = t_aaab[idet];
// u = u_aaab[idet];
// v = v_aaab[idet];
// w = w_aaab[idet];
//
// for (itmp = 0; itmp < nocc+nvir; itmp++)
// det_str[itmp] = Refdet[itmp];
// det_str[p] = 2;
// det_str[q] = 2;
// det_str[r] = 2;
// det_str[t+nocc] = 1;
// det_str[u+nocc] = 1;
// det_str[v+nocc] = 1;
//
// if (p != s && q != s && r != s) det_str[s] = 1;
// else det_str[s] = 0;
// if (t != w && u != w && v != w) det_str[w+nocc] = 2;
// else det_str[w+nocc] = 3;
// parity = parity_ab_str(det_str, nocc+nvir);
// parity *= parity_ci_to_cc(p+q+r, 3, nocc);
// parity *= parity_ci_to_cc(s, 1, nocc);
//
// // interm norm of c4
// t4 = parity * t4 / c0;
//
// // extract t4
// t4-= t1xt3aaab (p, q, r, s, t, u, v, w, nocc, nvir, t1, t3aaa, t3aab); // may have 1e-5 bug
// t4-= t2xt2aaab (p, q, r, s, t, u, v, w, nocc, nvir, t2aa, t2ab); // may have 1e-3 bug
// t4-= t1xt1xt2aaab (p, q, r, s, t, u, v, w, nocc, nvir, t1, t2aa, t2ab); // may have 1e-5 bug
// t4-= t1xt1xt1xt1aaab (p, q, r, s, t, u, v, w, nocc, nvir, t1); // may have 1e-6 bug
//
// for (itmp = 0; itmp < dlen; itmp++)
// tmp[itmp] = 0.0;
//
// tmp[D(r,s,v,w,nocc,nvir)] += (e2ovov[De(p,t,q,u,nocc,nvir)]-e2ovov[De(p,u,q,t,nocc,nvir)]) * t4;
// tmp[D(q,s,v,w,nocc,nvir)] -= (e2ovov[De(p,t,r,u,nocc,nvir)]-e2ovov[De(p,u,r,t,nocc,nvir)]) * t4;
// tmp[D(p,s,v,w,nocc,nvir)] += (e2ovov[De(q,t,s,u,nocc,nvir)]-e2ovov[De(q,u,s,t,nocc,nvir)]) * t4;
// tmp[D(r,s,u,w,nocc,nvir)] -= (e2ovov[De(p,t,q,v,nocc,nvir)]-e2ovov[De(p,v,q,t,nocc,nvir)]) * t4;
// tmp[D(q,s,u,w,nocc,nvir)] += (e2ovov[De(p,t,r,v,nocc,nvir)]-e2ovov[De(p,v,r,t,nocc,nvir)]) * t4;
// tmp[D(p,s,u,w,nocc,nvir)] -= (e2ovov[De(q,t,s,v,nocc,nvir)]-e2ovov[De(q,v,s,t,nocc,nvir)]) * t4;
// tmp[D(r,s,t,w,nocc,nvir)] += (e2ovov[De(p,u,q,v,nocc,nvir)]-e2ovov[De(p,v,q,u,nocc,nvir)]) * t4;
// tmp[D(q,s,t,w,nocc,nvir)] -= (e2ovov[De(p,u,r,v,nocc,nvir)]-e2ovov[De(p,v,r,u,nocc,nvir)]) * t4;
// tmp[D(p,s,t,w,nocc,nvir)] += (e2ovov[De(q,u,s,v,nocc,nvir)]-e2ovov[De(q,v,s,u,nocc,nvir)]) * t4;
//
// for (it = 0; it < nocc; it++)
// for (jt = 0; jt < nocc; jt++)
// for (at = 0; at < nvir; at++)
// for (bt = 0; bt < nvir; bt++)
// t2t4c[D(it,jt,at,bt,nocc,nvir)] += tmp[D(it,jt,at,bt,nocc,nvir)] + tmp[D(jt,it,bt,at,nocc,nvir)];
// }
// }
}
//void t2t4c_shci_omp(double *t2t4c, double *t1, double *t2aa, double *t2ab, double *t3aaa, double *t3aab, double *e2ovov, const int nc, const int num_det, const int nocc, const int nvir, const double numzero, const double c0, double norm)
//{
// //double numzero = 1e-7;
//
// double norm0SDT = norm;
//
//// for (p=0; p<nocc; p++) {
//// for (q=0; q<nocc; q++) {
//// for (r=0; r<nvir; r++) {
//// for (s=0; s<nvir; s++) {
//// printf("%d %d %d %d %20.10lf\n",p,q,r,s,e2ovov[De(p,r,q,s,nocc,nvir)]);
//// }
//// }
//// }
//// }
//
// const int t2size = nocc*nocc*nvir*nvir;
// FILE *fp;
// char line_init[255];
// fp = fopen("CIcoeff_shci.out", "r");
// fscanf(fp, "%s\n", line_init);
// if (fp) {
//
//// shared(t1, t2aa, t2ab, t3aaa, t3aab, e2ovov, nc, num_det, nocc, nvir, numzero, c0, fp, t2t4c)
//#pragma omp parallel default(none) \
// shared(t1, t2aa, t2ab, t3aaa, t3aab, e2ovov, fp, t2t4c, norm)
//{
// double t4, parity, scale;
// int p, q, r, s, t, u, v, w, itmp, it, jt, at, bt, ifile;
// char typ[4], line[255];
// uint8_t Refdet[nocc+nvir], det_str[nocc+nvir];
// for (itmp = 0; itmp < nocc+nvir; itmp++){
// if (itmp<nocc) Refdet[itmp] = 3;
// else Refdet[itmp] = 0;
// }
// double ****tmp;
// tmp = (double ****)malloc(sizeof(double ***) * nocc);
// for (it=0; it< nocc; it++){
// tmp[it] = (double ***)malloc(sizeof(double **) * nocc);
// for (jt=0; jt< nocc; jt++){
// tmp[it][jt] = (double **)malloc(sizeof(double *) * nvir);
// for (at=0; at< nvir; at++){
// tmp[it][jt][at] = (double *)malloc(sizeof(double) * nvir);
// }
// }
// }
//
// double *t2t4c_priv;
// t2t4c_priv = (double *)malloc(sizeof(double) * t2size);
// for (it=0; it< t2size; it++){
// t2t4c_priv[it] = 0.0;
// }
//
//// while ( !feof(fp) ){
////#pragma omp for schedule(dynamic, 100) reduction(+ : norm)
//#pragma omp for reduction(+ : norm)
// for ( ifile=0; ifile<num_det; ifile++ ){
// fscanf(fp, "%c%c%c%c,%s\n", &(typ[0]), &(typ[1]), &(typ[2]), &(typ[3]), line);
// fscanf(fp, "%lf\n", &t4);
// //printf ("typ=%c%c%c%c line=%s\n",typ[0],typ[1],typ[2],typ[3], line);
// if (strncmp(typ, "aabb", 4) == 0 && fabs(t4) > numzero){
// norm += t4*t4;
// sscanf(line,"%d,%d,%d,%d,%d,%d,%d,%d",&p,&q,&t,&u,&r,&s,&v,&w);
// p += nc;
// q += nc;
// r += nc;
// s += nc;
// t += - nocc + nc;
// u += - nocc + nc;
// v += - nocc + nc;
// w += - nocc + nc;
//
//// if(!(p == 2 && q == 3 && t == 0 && u == 1 && \
//// r == 2 && s == 3 && v == 0 && w == 1)) continue;
//
// for (itmp = 0; itmp < nocc+nvir; itmp++)
// det_str[itmp] = Refdet[itmp];
//
// det_str[p] = 2;
// det_str[q] = 2;
// det_str[t+nocc] = 1;
// det_str[u+nocc] = 1;
//
// if (p != r && q != r) det_str[r] = 1;
// else det_str[r] = 0;
// if (p != s && q != s) det_str[s] = 1;
// else det_str[s] = 0;
// if (t != v && u != v) det_str[v+nocc] = 2;
// else det_str[v+nocc] = 3;
// if (t != w && u != w) det_str[w+nocc] = 2;
// else det_str[w+nocc] = 3;
//
// parity = parity_ab_str(det_str, nocc+nvir);
// parity *= parity_ci_to_cc(p+q, 2, nocc);
// parity *= parity_ci_to_cc(r+s, 2, nocc);
//
// // interm norm of c4
// t4 = parity * t4 / c0;
// // lsh test
//// printf("c4 mem %20.10f \n",t4);
//
// // extract t4
// t4-= t1xt3aabb(p, q, r, s, t, u, v, w, nocc, nvir, t1, t3aab);
// t4-= t2xt2aabb(p, q, r, s, t, u, v, w, nocc, nvir, t2aa, t2ab);
// t4-= t1xt1xt2aabb(p, q, r, s, t, u, v, w, nocc, nvir, t1, t2aa, t2ab);
// t4-= t1xt1xt1xt1aabb(p, q, r, s, t, u, v, w, nocc, nvir, t1); // may have bug
//
// // lsh test
//// printf("t4 mem %20.10f \n",t4);
//
// for (it=0; it< nocc; it++){
// for (jt=0; jt< nocc; jt++){
// for (at=0; at< nvir; at++){
// for (bt=0; bt< nvir; bt++){
// tmp[it][jt][at][bt] = 0.0;
// }
// }
// }
// }
//// printf("eris_ovov mem %20.10f \n",e2ovov[De(p,t,r,v,nocc,nvir)]);
//
// if (p<r && t<v) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4;
// if (q<r && t<v) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4;
// if (p<s && t<v) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4;
// if (q<s && t<v) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4;
// if (p<r && u<v) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4;
// if (q<r && u<v) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4;
// if (p<s && u<v) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4;
// if (q<s && u<v) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4;
// if (p<r && t<w) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4;
// if (q<r && t<w) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4;
// if (p<s && t<w) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4;
// if (q<s && t<w) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4;
// if (p<r && u<w) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4;
// if (q<r && u<w) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4;
// if (p<s && u<w) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4;
// if (q<s && u<w) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4;
// if (p<r && v<t) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4;
// if (q<r && v<t) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4;
// if (p<s && v<t) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4;
// if (q<s && v<t) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4;
// if (p<r && v<u) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4;
// if (q<r && v<u) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4;
// if (p<s && v<u) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4;
// if (q<s && v<u) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4;
// if (p<r && w<t) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4;
// if (q<r && w<t) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4;
// if (p<s && w<t) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4;
// if (q<s && w<t) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4;
// if (p<r && w<u) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4;
// if (q<r && w<u) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4;
// if (p<s && w<u) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4;
// if (q<s && w<u) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4;
//
// scale = 0.5;
// if (p==r && t<v) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && t<v) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && t<v) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && t<v) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && u<v) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && u<v) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && u<v) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && u<v) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && t<w) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && t<w) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && t<w) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && t<w) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
// if (p==r && u<w) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && u<w) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && u<w) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && u<w) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
// if (p==r && v<t) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && v<t) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && v<t) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && v<t) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && v<u) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && v<u) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && v<u) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && v<u) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && w<t) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && w<t) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && w<t) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && w<t) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
// if (p==r && w<u) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && w<u) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && w<u) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && w<u) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
//
// scale = 0.5;
// if (p<r && t==v) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
// if (q<r && t==v) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
// if (p<s && t==v) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
// if (q<s && t==v) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
// if (p<r && u==v) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
// if (q<r && u==v) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
// if (p<s && u==v) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
// if (q<s && u==v) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
// if (p<r && t==w) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
// if (q<r && t==w) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
// if (p<s && t==w) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
// if (q<s && t==w) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
// if (p<r && u==w) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
// if (q<r && u==w) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
// if (p<s && u==w) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
// if (q<s && u==w) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
// if (p<r && v==t) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
// if (q<r && v==t) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
// if (p<s && v==t) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
// if (q<s && v==t) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
// if (p<r && v==u) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
// if (q<r && v==u) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
// if (p<s && v==u) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
// if (q<s && v==u) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
// if (p<r && w==t) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
// if (q<r && w==t) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
// if (p<s && w==t) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
// if (q<s && w==t) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
// if (p<r && w==u) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
// if (q<r && w==u) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
// if (p<s && w==u) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
// if (q<s && w==u) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
//
// scale = 0.25;
// if (p==r && t==v) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && t==v) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && t==v) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && t==v) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && u==v) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && u==v) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && u==v) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && u==v) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && t==w) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && t==w) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && t==w) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && t==w) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
// if (p==r && u==w) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && u==w) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && u==w) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && u==w) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
// if (p==r && v==t) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && v==t) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && v==t) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && v==t) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && v==u) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && v==u) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && v==u) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && v==u) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && w==t) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && w==t) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && w==t) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && w==t) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
// if (p==r && w==u) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && w==u) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && w==u) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && w==u) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
//
// for (it = 0; it < nocc; it++)
// for (jt = 0; jt < nocc; jt++)
// for (at = 0; at < nvir; at++)
// for (bt = 0; bt < nvir; bt++)
// t2t4c_priv[D(it,jt,at,bt,nocc,nvir)] += tmp[it][jt][at][bt] + tmp[jt][it][bt][at];
//
// }
// else if (strncmp(typ, "aaab", 4) == 0 && fabs(t4) > numzero){
// norm += 2.0*t4*t4;
// //lsh test
// //printf ("typ=%c%c%c%c line=%s c4=%lf\n",typ[0],typ[1],typ[2],typ[3], line, t4);
//
// sscanf(line,"%d,%d,%d,%d,%d,%d,%d,%d",&p,&q,&r,&t,&u,&v,&s,&w);
// p += nc;
// q += nc;
// r += nc;
// s += nc;
// t += - nocc + nc;
// u += - nocc + nc;
// v += - nocc + nc;
// w += - nocc + nc;
//
// //printf ("p=%d, q=%d, r=%d, t=%d, u=%d, v=%d, s=%d, w=%d",p,q,r,t,u,v,s,w);
//
// for (itmp = 0; itmp < nocc+nvir; itmp++)
// det_str[itmp] = Refdet[itmp];
// det_str[p] = 2;
// det_str[q] = 2;
// det_str[r] = 2;
// det_str[t+nocc] = 1;
// det_str[u+nocc] = 1;
// det_str[v+nocc] = 1;
//
// if (p != s && q != s && r != s) det_str[s] = 1;
// else det_str[s] = 0;
// if (t != w && u != w && v != w) det_str[w+nocc] = 2;
// else det_str[w+nocc] = 3;
// parity = parity_ab_str(det_str, nocc+nvir);
// parity *= parity_ci_to_cc(p+q+r, 3, nocc);
// parity *= parity_ci_to_cc(s, 1, nocc);
//
// // interm norm of c4
// t4 = parity * t4 / c0;
//
// // extract t4
// t4-= t1xt3aaab (p, q, r, s, t, u, v, w, nocc, nvir, t1, t3aaa, t3aab); // may have 1e-5 bug
// t4-= t2xt2aaab (p, q, r, s, t, u, v, w, nocc, nvir, t2aa, t2ab); // may have 1e-3 bug
// t4-= t1xt1xt2aaab (p, q, r, s, t, u, v, w, nocc, nvir, t1, t2aa, t2ab); // may have 1e-5 bug
// t4-= t1xt1xt1xt1aaab (p, q, r, s, t, u, v, w, nocc, nvir, t1); // may have 1e-6 bug
//
// for (it=0; it< nocc; it++){
// for (jt=0; jt< nocc; jt++){
// for (at=0; at< nvir; at++){
// for (bt=0; bt< nvir; bt++){
// tmp[it][jt][at][bt] = 0.0;
// }
// }
// }
// }
//
// tmp[r][s][v][w] += (e2ovov[De(p,t,q,u,nocc,nvir)]-e2ovov[De(p,u,q,t,nocc,nvir)]) * t4;
// tmp[q][s][v][w] -= (e2ovov[De(p,t,r,u,nocc,nvir)]-e2ovov[De(p,u,r,t,nocc,nvir)]) * t4;
// tmp[p][s][v][w] += (e2ovov[De(q,t,s,u,nocc,nvir)]-e2ovov[De(q,u,s,t,nocc,nvir)]) * t4;
// tmp[r][s][u][w] -= (e2ovov[De(p,t,q,v,nocc,nvir)]-e2ovov[De(p,v,q,t,nocc,nvir)]) * t4;
// tmp[q][s][u][w] += (e2ovov[De(p,t,r,v,nocc,nvir)]-e2ovov[De(p,v,r,t,nocc,nvir)]) * t4;
// tmp[p][s][u][w] -= (e2ovov[De(q,t,s,v,nocc,nvir)]-e2ovov[De(q,v,s,t,nocc,nvir)]) * t4;
// tmp[r][s][t][w] += (e2ovov[De(p,u,q,v,nocc,nvir)]-e2ovov[De(p,v,q,u,nocc,nvir)]) * t4;
// tmp[q][s][t][w] -= (e2ovov[De(p,u,r,v,nocc,nvir)]-e2ovov[De(p,v,r,u,nocc,nvir)]) * t4;
// tmp[p][s][t][w] += (e2ovov[De(q,u,s,v,nocc,nvir)]-e2ovov[De(q,v,s,u,nocc,nvir)]) * t4;
//
// for (it = 0; it < nocc; it++)
// for (jt = 0; jt < nocc; jt++)
// for (at = 0; at < nvir; at++)
// for (bt = 0; bt < nvir; bt++)
// t2t4c_priv[D(it,jt,at,bt,nocc,nvir)] += tmp[it][jt][at][bt] + tmp[jt][it][bt][at];
//
// }
// }
//
// for (it=0; it< nocc; it++){
// for (jt=0; jt< nocc; jt++){
// for (at=0; at< nvir; at++){
// free(tmp[it][jt][at]);
// }
// free(tmp[it][jt]);
// }
// free(tmp[it]);
// }
// free(tmp);
//#pragma omp critical
// {
// for (it=0; it< nocc; it++){
// for (jt=0; jt< nocc; jt++){
// for (at=0; at< nvir; at++){
// for (bt=0; bt< nvir; bt++){
// t2t4c[D(it,jt,at,bt,nocc,nvir)] += t2t4c_priv[D(it,jt,at,bt,nocc,nvir)];
// }
// }
// }
// }
// free(t2t4c_priv);
// }
//}
//
// fclose(fp);
// }
// else
// {
// // error message
// }
//
// printf ("0SDTQ (Q) = %f ( %f )\n", norm, norm-norm0SDT);
//
//}
//
void t2t4c_shci_omp(double *t2t4c, double *t1, double *t2aa, double *t2ab, double *t3aaa, double *t3aab, double *e2ovov, const int nc, const int num_det, const int nocc, const int nvir, const double numzero, const double c0, double norm)
{
//double numzero = 1e-7;
double norm0SDT = norm;
const int t2size = nocc*nocc*nvir*nvir;
// shared(t1, t2aa, t2ab, t3aaa, t3aab, e2ovov, nc, num_det, nocc, nvir, numzero, c0, t2t4c)
#pragma omp parallel default(none) \
shared(t1, t2aa, t2ab, t3aaa, t3aab, e2ovov, t2t4c, norm)
{
double t4, parity, scale;
int p, q, r, s, t, u, v, w, itmp, it, jt, at, bt, ifile;
char typ[4], line[255];
uint8_t Refdet[nocc+nvir], det_str[nocc+nvir];
for (itmp = 0; itmp < nocc+nvir; itmp++){
if (itmp<nocc) Refdet[itmp] = 3;
else Refdet[itmp] = 0;
}
double ****tmp;
tmp = (double ****)malloc(sizeof(double ***) * nocc);
for (it=0; it< nocc; it++){
tmp[it] = (double ***)malloc(sizeof(double **) * nocc);
for (jt=0; jt< nocc; jt++){
tmp[it][jt] = (double **)malloc(sizeof(double *) * nvir);
for (at=0; at< nvir; at++){
tmp[it][jt][at] = (double *)malloc(sizeof(double) * nvir);
}
}
}
double *t2t4c_priv;
t2t4c_priv = (double *)malloc(sizeof(double) * t2size);
for (it=0; it< t2size; it++){
t2t4c_priv[it] = 0.0;
}
//lsh test
//printf ("num_threads = %d\n",omp_get_num_threads());
int i;
#pragma omp for reduction(+ : norm)
for (i=0; i<omp_get_num_threads(); i++){
char s0[20]="t4.";
char s1[4];
sprintf(s1, "%d", i);
char* filename = strcat(s0,s1);
FILE *fp = fopen(filename, "r");
//printf ("filename = %s\n",filename);
if (fp) {
while ( !feof(fp) ){
fscanf(fp, "%c%c%c%c,%s\n", &(typ[0]), &(typ[1]), &(typ[2]), &(typ[3]), line);
fscanf(fp, "%lf\n", &t4);
//lsh test
//printf ("typ=%c%c%c%c line=%s\n",typ[0],typ[1],typ[2],typ[3], line);
if (strncmp(typ, "aabb", 4) == 0 && fabs(t4) > numzero){
norm += t4*t4;
sscanf(line,"%d,%d,%d,%d,%d,%d,%d,%d",&p,&q,&t,&u,&r,&s,&v,&w);
p += nc;
q += nc;
r += nc;
s += nc;
t += - nocc + nc;
u += - nocc + nc;
v += - nocc + nc;
w += - nocc + nc;
// if(!(p == 2 && q == 3 && t == 0 && u == 1 && \
// r == 2 && s == 3 && v == 0 && w == 1)) continue;
for (itmp = 0; itmp < nocc+nvir; itmp++)
det_str[itmp] = Refdet[itmp];
det_str[p] = 2;
det_str[q] = 2;
det_str[t+nocc] = 1;
det_str[u+nocc] = 1;
if (p != r && q != r) det_str[r] = 1;
else det_str[r] = 0;
if (p != s && q != s) det_str[s] = 1;
else det_str[s] = 0;
if (t != v && u != v) det_str[v+nocc] = 2;
else det_str[v+nocc] = 3;
if (t != w && u != w) det_str[w+nocc] = 2;
else det_str[w+nocc] = 3;
//parity = parity_ab_str(det_str, nocc+nvir);
parity = parity_ci_to_cc(p+q, 2, nocc);
parity *= parity_ci_to_cc(r+s, 2, nocc);
// interm norm of c4
t4 = parity * t4 / c0;
// lsh test
// printf("c4 mem %20.10f \n",t4);
// extract t4
t4-= t1xt3aabb(p, q, r, s, t, u, v, w, nocc, nvir, t1, t3aab);
t4-= t2xt2aabb(p, q, r, s, t, u, v, w, nocc, nvir, t2aa, t2ab);
t4-= t1xt1xt2aabb(p, q, r, s, t, u, v, w, nocc, nvir, t1, t2aa, t2ab);
t4-= t1xt1xt1xt1aabb(p, q, r, s, t, u, v, w, nocc, nvir, t1); // may have bug
// lsh test
// printf("t4 mem %20.10f \n",t4);
for (it=0; it< nocc; it++){
for (jt=0; jt< nocc; jt++){
for (at=0; at< nvir; at++){
for (bt=0; bt< nvir; bt++){
tmp[it][jt][at][bt] = 0.0;
}
}
}
}
// printf("eris_ovov mem %20.10f \n",e2ovov[De(p,t,r,v,nocc,nvir)]);
if (p<r && t<v) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4;
if (q<r && t<v) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4;
if (p<s && t<v) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4;
if (q<s && t<v) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4;
if (p<r && u<v) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4;
if (q<r && u<v) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4;
if (p<s && u<v) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4;
if (q<s && u<v) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4;
if (p<r && t<w) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4;
if (q<r && t<w) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4;
if (p<s && t<w) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4;
if (q<s && t<w) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4;
if (p<r && u<w) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4;
if (q<r && u<w) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4;
if (p<s && u<w) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4;
if (q<s && u<w) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4;
if (p<r && v<t) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4;
if (q<r && v<t) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4;
if (p<s && v<t) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4;
if (q<s && v<t) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4;
if (p<r && v<u) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4;
if (q<r && v<u) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4;
if (p<s && v<u) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4;
if (q<s && v<u) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4;
if (p<r && w<t) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4;
if (q<r && w<t) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4;
if (p<s && w<t) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4;
if (q<s && w<t) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4;
if (p<r && w<u) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4;
if (q<r && w<u) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4;
if (p<s && w<u) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4;
if (q<s && w<u) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4;
scale = 0.5;
if (p==r && t<v) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
if (q==r && t<v) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
if (p==s && t<v) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
if (q==s && t<v) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
if (p==r && u<v) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
if (q==r && u<v) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
if (p==s && u<v) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
if (q==s && u<v) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
if (p==r && t<w) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
if (q==r && t<w) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
if (p==s && t<w) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
if (q==s && t<w) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
if (p==r && u<w) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
if (q==r && u<w) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
if (p==s && u<w) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
if (q==s && u<w) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
if (p==r && v<t) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
if (q==r && v<t) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
if (p==s && v<t) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
if (q==s && v<t) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
if (p==r && v<u) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
if (q==r && v<u) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
if (p==s && v<u) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
if (q==s && v<u) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
if (p==r && w<t) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
if (q==r && w<t) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
if (p==s && w<t) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
if (q==s && w<t) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
if (p==r && w<u) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
if (q==r && w<u) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
if (p==s && w<u) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
if (q==s && w<u) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
scale = 0.5;
if (p<r && t==v) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
if (q<r && t==v) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
if (p<s && t==v) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
if (q<s && t==v) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
if (p<r && u==v) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
if (q<r && u==v) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
if (p<s && u==v) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
if (q<s && u==v) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
if (p<r && t==w) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
if (q<r && t==w) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
if (p<s && t==w) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
if (q<s && t==w) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
if (p<r && u==w) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
if (q<r && u==w) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
if (p<s && u==w) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
if (q<s && u==w) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
if (p<r && v==t) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
if (q<r && v==t) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
if (p<s && v==t) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
if (q<s && v==t) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
if (p<r && v==u) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
if (q<r && v==u) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
if (p<s && v==u) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
if (q<s && v==u) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
if (p<r && w==t) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
if (q<r && w==t) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
if (p<s && w==t) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
if (q<s && w==t) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
if (p<r && w==u) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
if (q<r && w==u) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
if (p<s && w==u) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
if (q<s && w==u) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
scale = 0.25;
if (p==r && t==v) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
if (q==r && t==v) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
if (p==s && t==v) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
if (q==s && t==v) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
if (p==r && u==v) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
if (q==r && u==v) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
if (p==s && u==v) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
if (q==s && u==v) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
if (p==r && t==w) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
if (q==r && t==w) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
if (p==s && t==w) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
if (q==s && t==w) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
if (p==r && u==w) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
if (q==r && u==w) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
if (p==s && u==w) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
if (q==s && u==w) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
if (p==r && v==t) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
if (q==r && v==t) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
if (p==s && v==t) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
if (q==s && v==t) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
if (p==r && v==u) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
if (q==r && v==u) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
if (p==s && v==u) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
if (q==s && v==u) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
if (p==r && w==t) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
if (q==r && w==t) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
if (p==s && w==t) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
if (q==s && w==t) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
if (p==r && w==u) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
if (q==r && w==u) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
if (p==s && w==u) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
if (q==s && w==u) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
for (it = 0; it < nocc; it++)
for (jt = 0; jt < nocc; jt++)
for (at = 0; at < nvir; at++)
for (bt = 0; bt < nvir; bt++)
t2t4c_priv[D(it,jt,at,bt,nocc,nvir)] += tmp[it][jt][at][bt] + tmp[jt][it][bt][at];
}
else if (strncmp(typ, "aaab", 4) == 0 && fabs(t4) > numzero){
norm += 2.0*t4*t4;
//lsh test
//printf ("typ=%c%c%c%c line=%s c4=%lf\n",typ[0],typ[1],typ[2],typ[3], line, t4);
sscanf(line,"%d,%d,%d,%d,%d,%d,%d,%d",&p,&q,&r,&t,&u,&v,&s,&w);
p += nc;
q += nc;
r += nc;
s += nc;
t += - nocc + nc;
u += - nocc + nc;
v += - nocc + nc;
w += - nocc + nc;
//printf ("p=%d, q=%d, r=%d, t=%d, u=%d, v=%d, s=%d, w=%d",p,q,r,t,u,v,s,w);
for (itmp = 0; itmp < nocc+nvir; itmp++)
det_str[itmp] = Refdet[itmp];
det_str[p] = 2;
det_str[q] = 2;
det_str[r] = 2;
det_str[t+nocc] = 1;
det_str[u+nocc] = 1;
det_str[v+nocc] = 1;
if (p != s && q != s && r != s) det_str[s] = 1;
else det_str[s] = 0;
if (t != w && u != w && v != w) det_str[w+nocc] = 2;
else det_str[w+nocc] = 3;
//parity = parity_ab_str(det_str, nocc+nvir);
parity = parity_ci_to_cc(p+q+r, 3, nocc);
parity *= parity_ci_to_cc(s, 1, nocc);
// interm norm of c4
t4 = parity * t4 / c0;
// extract t4
t4-= t1xt3aaab (p, q, r, s, t, u, v, w, nocc, nvir, t1, t3aaa, t3aab); // may have 1e-5 bug
t4-= t2xt2aaab (p, q, r, s, t, u, v, w, nocc, nvir, t2aa, t2ab); // may have 1e-3 bug
t4-= t1xt1xt2aaab (p, q, r, s, t, u, v, w, nocc, nvir, t1, t2aa, t2ab); // may have 1e-5 bug
t4-= t1xt1xt1xt1aaab (p, q, r, s, t, u, v, w, nocc, nvir, t1); // may have 1e-6 bug
for (it=0; it< nocc; it++){
for (jt=0; jt< nocc; jt++){
for (at=0; at< nvir; at++){
for (bt=0; bt< nvir; bt++){
tmp[it][jt][at][bt] = 0.0;
}
}
}
}
tmp[r][s][v][w] += (e2ovov[De(p,t,q,u,nocc,nvir)]-e2ovov[De(p,u,q,t,nocc,nvir)]) * t4;
tmp[q][s][v][w] -= (e2ovov[De(p,t,r,u,nocc,nvir)]-e2ovov[De(p,u,r,t,nocc,nvir)]) * t4;
tmp[p][s][v][w] += (e2ovov[De(q,t,s,u,nocc,nvir)]-e2ovov[De(q,u,s,t,nocc,nvir)]) * t4;
tmp[r][s][u][w] -= (e2ovov[De(p,t,q,v,nocc,nvir)]-e2ovov[De(p,v,q,t,nocc,nvir)]) * t4;
tmp[q][s][u][w] += (e2ovov[De(p,t,r,v,nocc,nvir)]-e2ovov[De(p,v,r,t,nocc,nvir)]) * t4;
tmp[p][s][u][w] -= (e2ovov[De(q,t,s,v,nocc,nvir)]-e2ovov[De(q,v,s,t,nocc,nvir)]) * t4;
tmp[r][s][t][w] += (e2ovov[De(p,u,q,v,nocc,nvir)]-e2ovov[De(p,v,q,u,nocc,nvir)]) * t4;
tmp[q][s][t][w] -= (e2ovov[De(p,u,r,v,nocc,nvir)]-e2ovov[De(p,v,r,u,nocc,nvir)]) * t4;
tmp[p][s][t][w] += (e2ovov[De(q,u,s,v,nocc,nvir)]-e2ovov[De(q,v,s,u,nocc,nvir)]) * t4;
for (it = 0; it < nocc; it++)
for (jt = 0; jt < nocc; jt++)
for (at = 0; at < nvir; at++)
for (bt = 0; bt < nvir; bt++)
t2t4c_priv[D(it,jt,at,bt,nocc,nvir)] += tmp[it][jt][at][bt] + tmp[jt][it][bt][at];
}
}
fclose(fp);
}
}
for (it=0; it< nocc; it++){
for (jt=0; jt< nocc; jt++){
for (at=0; at< nvir; at++){
free(tmp[it][jt][at]);
}
free(tmp[it][jt]);
}
free(tmp[it]);
}
free(tmp);
#pragma omp critical
{
for (it=0; it< nocc; it++){
for (jt=0; jt< nocc; jt++){
for (at=0; at< nvir; at++){
for (bt=0; bt< nvir; bt++){
t2t4c[D(it,jt,at,bt,nocc,nvir)] += t2t4c_priv[D(it,jt,at,bt,nocc,nvir)];
}
}
}
}
free(t2t4c_priv);
}
}
printf ("0SDTQ (Q) = %f ( %f )\n", norm, norm-norm0SDT);
}
//void t2t4c_shci_omp_otf_old(double *t2t4c, double *t1, double *t2aa, double *t2ab, double *c3aaa, double *c3aab, double *e2ovov, const int nc, const int num_det, const int nocc, const int nvir, const double numzero, const double c0, double norm)
//{
// //double numzero = 1e-7;
//
// const int nocc2 = (int) nocc*(nocc-1)/2;
// const int nocc3 = (int) nocc*(nocc-1)*(nocc-2)/6;
// double norm0SDT = norm;
//
// const int t2size = nocc*nocc*nvir*nvir;
//
//// shared(t1, t2aa, t2ab, c3aaa, c3aab, e2ovov, nc, num_det, nocc, nvir, numzero, c0, t2t4c)
//#pragma omp parallel default(none) \
// shared(t1, t2aa, t2ab, c3aaa, c3aab, e2ovov, t2t4c, norm)
//{
// double t4, parity, scale;
// int p, q, r, s, t, u, v, w, itmp, it, jt, at, bt, ifile;
// char typ[4], line[255];
// uint8_t Refdet[nocc+nvir], det_str[nocc+nvir];
// for (itmp = 0; itmp < nocc+nvir; itmp++){
// if (itmp<nocc) Refdet[itmp] = 3;
// else Refdet[itmp] = 0;
// }
// double ****tmp;
// tmp = (double ****)malloc(sizeof(double ***) * nocc);
// for (it=0; it< nocc; it++){
// tmp[it] = (double ***)malloc(sizeof(double **) * nocc);
// for (jt=0; jt< nocc; jt++){
// tmp[it][jt] = (double **)malloc(sizeof(double *) * nvir);
// for (at=0; at< nvir; at++){
// tmp[it][jt][at] = (double *)malloc(sizeof(double) * nvir);
// }
// }
// }
//
// double *t2t4c_priv;
// t2t4c_priv = (double *)malloc(sizeof(double) * t2size);
// for (it=0; it< t2size; it++){
// t2t4c_priv[it] = 0.0;
// }
// //lsh test
// //printf ("num_threads = %d\n",omp_get_num_threads());
//
// int i;
//#pragma omp for reduction(+ : norm)
// for (i=0; i<omp_get_num_threads(); i++){
// char s0[20]="t4.";
// char s1[4];
// sprintf(s1, "%d", i);
// char* filename = strcat(s0,s1);
// FILE *fp = fopen(filename, "r");
// //printf ("filename = %s\n",filename);
//
// if (fp) {
// while ( !feof(fp) ){
//
// fscanf(fp, "%c%c%c%c,%s\n", &(typ[0]), &(typ[1]), &(typ[2]), &(typ[3]), line);
// fscanf(fp, "%lf\n", &t4);
// //lsh test
// //printf ("typ=%c%c%c%c line=%s\n",typ[0],typ[1],typ[2],typ[3], line);
// if (strncmp(typ, "aabb", 4) == 0 && fabs(t4) > numzero){
// norm += t4*t4;
// sscanf(line,"%d,%d,%d,%d,%d,%d,%d,%d",&p,&q,&t,&u,&r,&s,&v,&w);
// p += nc;
// q += nc;
// r += nc;
// s += nc;
// t += - nocc + nc;
// u += - nocc + nc;
// v += - nocc + nc;
// w += - nocc + nc;
//
//// if(!(p == 2 && q == 3 && t == 0 && u == 1 && \
//// r == 2 && s == 3 && v == 0 && w == 1)) continue;
//
// for (itmp = 0; itmp < nocc+nvir; itmp++)
// det_str[itmp] = Refdet[itmp];
//
// det_str[p] = 2;
// det_str[q] = 2;
// det_str[t+nocc] = 1;
// det_str[u+nocc] = 1;
//
// if (p != r && q != r) det_str[r] = 1;
// else det_str[r] = 0;
// if (p != s && q != s) det_str[s] = 1;
// else det_str[s] = 0;
// if (t != v && u != v) det_str[v+nocc] = 2;
// else det_str[v+nocc] = 3;
// if (t != w && u != w) det_str[w+nocc] = 2;
// else det_str[w+nocc] = 3;
//
// parity = parity_ab_str(det_str, nocc+nvir);
// parity *= parity_ci_to_cc(p+q, 2, nocc);
// parity *= parity_ci_to_cc(r+s, 2, nocc);
//
// // interm norm of c4
// t4 = parity * t4 / c0;
// // lsh test
//// printf("c4 mem %20.10f \n",t4);
//
// // extract t4
// t4-= t1xc3aabb(p, q, r, s, t, u, v, w, nocc, nocc2, nvir, t1, t2aa, t2ab, c3aab, c0);
// t4-= t2xt2aabb(p, q, r, s, t, u, v, w, nocc, nvir, t2aa, t2ab);
// t4-= t1xt1xt2aabb(p, q, r, s, t, u, v, w, nocc, nvir, t1, t2aa, t2ab);
// t4-= t1xt1xt1xt1aabb(p, q, r, s, t, u, v, w, nocc, nvir, t1); // may have bug
//
// // lsh test
//// printf("t4 mem %20.10f \n",t4);
//
// for (it=0; it< nocc; it++)
// for (jt=0; jt< nocc; jt++)
// for (at=0; at< nvir; at++)
// for (bt=0; bt< nvir; bt++)
// tmp[it][jt][at][bt] = 0.0;
//// printf("eris_ovov mem %20.10f \n",e2ovov[De(p,t,r,v,nocc,nvir)]);
//
// if (p<r && t<v) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4;
// if (q<r && t<v) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4;
// if (p<s && t<v) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4;
// if (q<s && t<v) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4;
// if (p<r && u<v) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4;
// if (q<r && u<v) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4;
// if (p<s && u<v) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4;
// if (q<s && u<v) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4;
// if (p<r && t<w) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4;
// if (q<r && t<w) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4;
// if (p<s && t<w) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4;
// if (q<s && t<w) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4;
// if (p<r && u<w) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4;
// if (q<r && u<w) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4;
// if (p<s && u<w) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4;
// if (q<s && u<w) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4;
// if (p<r && v<t) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4;
// if (q<r && v<t) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4;
// if (p<s && v<t) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4;
// if (q<s && v<t) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4;
// if (p<r && v<u) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4;
// if (q<r && v<u) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4;
// if (p<s && v<u) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4;
// if (q<s && v<u) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4;
// if (p<r && w<t) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4;
// if (q<r && w<t) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4;
// if (p<s && w<t) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4;
// if (q<s && w<t) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4;
// if (p<r && w<u) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4;
// if (q<r && w<u) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4;
// if (p<s && w<u) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4;
// if (q<s && w<u) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4;
//
// scale = 0.5;
// if (p==r && t<v) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && t<v) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && t<v) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && t<v) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && u<v) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && u<v) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && u<v) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && u<v) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && t<w) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && t<w) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && t<w) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && t<w) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
// if (p==r && u<w) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && u<w) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && u<w) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && u<w) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
// if (p==r && v<t) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && v<t) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && v<t) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && v<t) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && v<u) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && v<u) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && v<u) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && v<u) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && w<t) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && w<t) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && w<t) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && w<t) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
// if (p==r && w<u) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && w<u) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && w<u) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && w<u) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
//
// scale = 0.5;
// if (p<r && t==v) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
// if (q<r && t==v) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
// if (p<s && t==v) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
// if (q<s && t==v) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
// if (p<r && u==v) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
// if (q<r && u==v) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
// if (p<s && u==v) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
// if (q<s && u==v) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
// if (p<r && t==w) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
// if (q<r && t==w) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
// if (p<s && t==w) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
// if (q<s && t==w) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
// if (p<r && u==w) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
// if (q<r && u==w) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
// if (p<s && u==w) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
// if (q<s && u==w) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
// if (p<r && v==t) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
// if (q<r && v==t) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
// if (p<s && v==t) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
// if (q<s && v==t) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
// if (p<r && v==u) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
// if (q<r && v==u) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
// if (p<s && v==u) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
// if (q<s && v==u) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
// if (p<r && w==t) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
// if (q<r && w==t) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
// if (p<s && w==t) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
// if (q<s && w==t) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
// if (p<r && w==u) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
// if (q<r && w==u) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
// if (p<s && w==u) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
// if (q<s && w==u) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
//
// scale = 0.25;
// if (p==r && t==v) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && t==v) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && t==v) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && t==v) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && u==v) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && u==v) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && u==v) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && u==v) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && t==w) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && t==w) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && t==w) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && t==w) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
// if (p==r && u==w) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && u==w) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && u==w) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && u==w) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
// if (p==r && v==t) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && v==t) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && v==t) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && v==t) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && v==u) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
// if (q==r && v==u) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
// if (p==s && v==u) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
// if (q==s && v==u) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
// if (p==r && w==t) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && w==t) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && w==t) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && w==t) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
// if (p==r && w==u) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
// if (q==r && w==u) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
// if (p==s && w==u) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
// if (q==s && w==u) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
//
// for (it = 0; it < nocc; it++)
// for (jt = 0; jt < nocc; jt++)
// for (at = 0; at < nvir; at++)
// for (bt = 0; bt < nvir; bt++)
// t2t4c_priv[D(it,jt,at,bt,nocc,nvir)] += tmp[it][jt][at][bt] + tmp[jt][it][bt][at];
//
// }
// else if (strncmp(typ, "aaab", 4) == 0 && fabs(t4) > numzero){
// norm += 2.0*t4*t4;
// //lsh test
// //printf ("typ=%c%c%c%c line=%s c4=%lf\n",typ[0],typ[1],typ[2],typ[3], line, t4);
//
// sscanf(line,"%d,%d,%d,%d,%d,%d,%d,%d",&p,&q,&r,&t,&u,&v,&s,&w);
// p += nc;
// q += nc;
// r += nc;
// s += nc;
// t += - nocc + nc;
// u += - nocc + nc;
// v += - nocc + nc;
// w += - nocc + nc;
//
// //printf ("p=%d, q=%d, r=%d, t=%d, u=%d, v=%d, s=%d, w=%d",p,q,r,t,u,v,s,w);
//
// for (itmp = 0; itmp < nocc+nvir; itmp++)
// det_str[itmp] = Refdet[itmp];
// det_str[p] = 2;
// det_str[q] = 2;
// det_str[r] = 2;
// det_str[t+nocc] = 1;
// det_str[u+nocc] = 1;
// det_str[v+nocc] = 1;
//
// if (p != s && q != s && r != s) det_str[s] = 1;
// else det_str[s] = 0;
// if (t != w && u != w && v != w) det_str[w+nocc] = 2;
// else det_str[w+nocc] = 3;
// parity = parity_ab_str(det_str, nocc+nvir);
// parity *= parity_ci_to_cc(p+q+r, 3, nocc);
// parity *= parity_ci_to_cc(s, 1, nocc);
//
// // interm norm of c4
// t4 = parity * t4 / c0;
//
// // extract t4
// t4-= t1xc3aaab (p, q, r, s, t, u, v, w, nocc, nocc2, nocc3, nvir, t1, t2aa, t2ab, c3aaa, c3aab, c0);
// t4-= t2xt2aaab (p, q, r, s, t, u, v, w, nocc, nvir, t2aa, t2ab); // may have 1e-3 bug
// t4-= t1xt1xt2aaab (p, q, r, s, t, u, v, w, nocc, nvir, t1, t2aa, t2ab); // may have 1e-5 bug
// t4-= t1xt1xt1xt1aaab (p, q, r, s, t, u, v, w, nocc, nvir, t1); // may have 1e-6 bug
//
// for (it=0; it< nocc; it++){
// for (jt=0; jt< nocc; jt++){
// for (at=0; at< nvir; at++){
// for (bt=0; bt< nvir; bt++){
// tmp[it][jt][at][bt] = 0.0;
// }
// }
// }
// }
//
// tmp[r][s][v][w] += (e2ovov[De(p,t,q,u,nocc,nvir)]-e2ovov[De(p,u,q,t,nocc,nvir)]) * t4;
// tmp[q][s][v][w] -= (e2ovov[De(p,t,r,u,nocc,nvir)]-e2ovov[De(p,u,r,t,nocc,nvir)]) * t4;
// tmp[p][s][v][w] += (e2ovov[De(q,t,s,u,nocc,nvir)]-e2ovov[De(q,u,s,t,nocc,nvir)]) * t4;
// tmp[r][s][u][w] -= (e2ovov[De(p,t,q,v,nocc,nvir)]-e2ovov[De(p,v,q,t,nocc,nvir)]) * t4;
// tmp[q][s][u][w] += (e2ovov[De(p,t,r,v,nocc,nvir)]-e2ovov[De(p,v,r,t,nocc,nvir)]) * t4;
// tmp[p][s][u][w] -= (e2ovov[De(q,t,s,v,nocc,nvir)]-e2ovov[De(q,v,s,t,nocc,nvir)]) * t4;
// tmp[r][s][t][w] += (e2ovov[De(p,u,q,v,nocc,nvir)]-e2ovov[De(p,v,q,u,nocc,nvir)]) * t4;
// tmp[q][s][t][w] -= (e2ovov[De(p,u,r,v,nocc,nvir)]-e2ovov[De(p,v,r,u,nocc,nvir)]) * t4;
// tmp[p][s][t][w] += (e2ovov[De(q,u,s,v,nocc,nvir)]-e2ovov[De(q,v,s,u,nocc,nvir)]) * t4;
//
// for (it = 0; it < nocc; it++)
// for (jt = 0; jt < nocc; jt++)
// for (at = 0; at < nvir; at++)
// for (bt = 0; bt < nvir; bt++)
// t2t4c_priv[D(it,jt,at,bt,nocc,nvir)] += tmp[it][jt][at][bt] + tmp[jt][it][bt][at];
//
// }
//
// }
// fclose(fp);
// }
// }
//
// for (it=0; it< nocc; it++){
// for (jt=0; jt< nocc; jt++){
// for (at=0; at< nvir; at++){
// free(tmp[it][jt][at]);
// }
// free(tmp[it][jt]);
// }
// free(tmp[it]);
// }
// free(tmp);
//#pragma omp critical
// {
// for (it=0; it< nocc; it++){
// for (jt=0; jt< nocc; jt++){
// for (at=0; at< nvir; at++){
// for (bt=0; bt< nvir; bt++){
// t2t4c[D(it,jt,at,bt,nocc,nvir)] += t2t4c_priv[D(it,jt,at,bt,nocc,nvir)];
// }
// }
// }
// }
// free(t2t4c_priv);
// }
//}
//
//
// printf ("0SDTQ (Q) = %f ( %f )\n", norm, norm-norm0SDT);
//
//}
void t2t4c_shci_omp_otf(double *t2t4c, double *t1, double *t2aa, double *t2ab, double *c3aaa, double *c3aab, double *e2ovov, const int nc, const int num_det, const int nocc, const int nvir, const double numzero, const double c0, double norm)
{
//double numzero = 1e-7;
const int nocc2 = (int) nocc*(nocc-1)/2;
const int nocc3 = (int) nocc*(nocc-1)*(nocc-2)/6;
double norm0SDT = norm;
const int t2size = nocc*nocc*nvir*nvir;
// shared(t1, t2aa, t2ab, c3aaa, c3aab, e2ovov, nc, num_det, nocc, nvir, numzero, c0, t2t4c)
#pragma omp parallel default(none) \
shared(t1, t2aa, t2ab, c3aaa, c3aab, e2ovov, t2t4c, norm)
{
double t4, parity, scale;
int p, q, r, s, t, u, v, w, itmp, it, jt, at, bt, ifile;
char typ[4], line[255];
uint8_t Refdet[nocc+nvir], det_str[nocc+nvir];
for (itmp = 0; itmp < nocc+nvir; itmp++){
if (itmp<nocc) Refdet[itmp] = 3;
else Refdet[itmp] = 0;
}
double *t2t4c_priv;
t2t4c_priv = (double *)malloc(sizeof(double) * t2size);
for (it=0; it< t2size; it++){
t2t4c_priv[it] = 0.0;
}
//lsh test
//printf ("num_threads = %d\n",omp_get_num_threads());
int i;
#pragma omp for reduction(+ : norm)
for (i=0; i<omp_get_num_threads(); i++){
char s0[20]="t4.";
char s1[4];
sprintf(s1, "%d", i);
char* filename = strcat(s0,s1);
FILE *fp = fopen(filename, "r");
//printf ("filename = %s\n",filename);
if (fp) {
while ( !feof(fp) ){
fscanf(fp, "%c%c%c%c,%s\n", &(typ[0]), &(typ[1]), &(typ[2]), &(typ[3]), line);
fscanf(fp, "%lf\n", &t4);
//lsh test
//printf ("typ=%c%c%c%c line=%s\n",typ[0],typ[1],typ[2],typ[3], line);
if (strncmp(typ, "aabb", 4) == 0 && fabs(t4) > numzero){
norm += t4*t4;
sscanf(line,"%d,%d,%d,%d,%d,%d,%d,%d",&p,&q,&t,&u,&r,&s,&v,&w);
p += nc;
q += nc;
r += nc;
s += nc;
t += - nocc + nc;
u += - nocc + nc;
v += - nocc + nc;
w += - nocc + nc;
// if(!(p == 2 && q == 3 && t == 0 && u == 1 && \
// r == 2 && s == 3 && v == 0 && w == 1)) continue;
for (itmp = 0; itmp < nocc+nvir; itmp++)
det_str[itmp] = Refdet[itmp];
det_str[p] = 2;
det_str[q] = 2;
det_str[t+nocc] = 1;
det_str[u+nocc] = 1;
if (p != r && q != r) det_str[r] = 1;
else det_str[r] = 0;
if (p != s && q != s) det_str[s] = 1;
else det_str[s] = 0;
if (t != v && u != v) det_str[v+nocc] = 2;
else det_str[v+nocc] = 3;
if (t != w && u != w) det_str[w+nocc] = 2;
else det_str[w+nocc] = 3;
//parity = parity_ab_str(det_str, nocc+nvir);
parity = parity_ci_to_cc(p+q, 2, nocc);
parity *= parity_ci_to_cc(r+s, 2, nocc);
// interm norm of c4
t4 = parity * t4 / c0;
// lsh test
// printf("c4 mem %20.10f \n",t4);
// extract t4
t4-= t1xc3aabb(p, q, r, s, t, u, v, w, nocc, nocc2, nvir, t1, t2aa, t2ab, c3aab, c0);
t4-= t2xt2aabb(p, q, r, s, t, u, v, w, nocc, nvir, t2aa, t2ab);
t4-= t1xt1xt2aabb(p, q, r, s, t, u, v, w, nocc, nvir, t1, t2aa, t2ab);
t4-= t1xt1xt1xt1aabb(p, q, r, s, t, u, v, w, nocc, nvir, t1); // may have bug
t2t4c_priv[D(q,s,u,w,nocc,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4;
t2t4c_priv[D(p,s,u,w,nocc,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4;
t2t4c_priv[D(q,s,t,w,nocc,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4;
t2t4c_priv[D(p,s,t,w,nocc,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4;
t2t4c_priv[D(q,r,u,w,nocc,nvir)] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4;
t2t4c_priv[D(p,r,u,w,nocc,nvir)] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4;
t2t4c_priv[D(q,r,t,w,nocc,nvir)] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4;
t2t4c_priv[D(p,r,t,w,nocc,nvir)] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4;
t2t4c_priv[D(q,s,u,v,nocc,nvir)] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4;
t2t4c_priv[D(p,s,u,v,nocc,nvir)] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4;
t2t4c_priv[D(q,s,t,v,nocc,nvir)] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4;
t2t4c_priv[D(p,s,t,v,nocc,nvir)] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4;
t2t4c_priv[D(q,r,u,v,nocc,nvir)] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4;
t2t4c_priv[D(p,r,u,v,nocc,nvir)] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4;
t2t4c_priv[D(q,r,t,v,nocc,nvir)] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4;
t2t4c_priv[D(p,r,t,v,nocc,nvir)] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4;
}
else if (strncmp(typ, "aaab", 4) == 0 && fabs(t4) > numzero){
norm += 2.0*t4*t4;
//lsh test
//printf ("typ=%c%c%c%c line=%s c4=%lf\n",typ[0],typ[1],typ[2],typ[3], line, t4);
sscanf(line,"%d,%d,%d,%d,%d,%d,%d,%d",&p,&q,&r,&t,&u,&v,&s,&w);
p += nc;
q += nc;
r += nc;
s += nc;
t += - nocc + nc;
u += - nocc + nc;
v += - nocc + nc;
w += - nocc + nc;
//printf ("p=%d, q=%d, r=%d, t=%d, u=%d, v=%d, s=%d, w=%d",p,q,r,t,u,v,s,w);
for (itmp = 0; itmp < nocc+nvir; itmp++)
det_str[itmp] = Refdet[itmp];
det_str[p] = 2;
det_str[q] = 2;
det_str[r] = 2;
det_str[t+nocc] = 1;
det_str[u+nocc] = 1;
det_str[v+nocc] = 1;
if (p != s && q != s && r != s) det_str[s] = 1;
else det_str[s] = 0;
if (t != w && u != w && v != w) det_str[w+nocc] = 2;
else det_str[w+nocc] = 3;
//parity = parity_ab_str(det_str, nocc+nvir);
parity = parity_ci_to_cc(p+q+r, 3, nocc);
parity *= parity_ci_to_cc(s, 1, nocc);
// interm norm of c4
t4 = parity * t4 / c0;
// extract t4
t4-= t1xc3aaab (p, q, r, s, t, u, v, w, nocc, nocc2, nocc3, nvir, t1, t2aa, t2ab, c3aaa, c3aab, c0);
t4-= t2xt2aaab (p, q, r, s, t, u, v, w, nocc, nvir, t2aa, t2ab); // may have 1e-3 bug
t4-= t1xt1xt2aaab (p, q, r, s, t, u, v, w, nocc, nvir, t1, t2aa, t2ab); // may have 1e-5 bug
t4-= t1xt1xt1xt1aaab (p, q, r, s, t, u, v, w, nocc, nvir, t1); // may have 1e-6 bug
t2t4c_priv[D(r,s,v,w,nocc,nvir)] += e2ovov[De(p,t,q,u,nocc,nvir)] * t4;
t2t4c_priv[D(q,s,v,w,nocc,nvir)] += e2ovov[De(r,t,p,u,nocc,nvir)] * t4;
t2t4c_priv[D(p,s,v,w,nocc,nvir)] += e2ovov[De(q,t,r,u,nocc,nvir)] * t4;
t2t4c_priv[D(r,s,v,w,nocc,nvir)] -= e2ovov[De(q,t,p,u,nocc,nvir)] * t4;
t2t4c_priv[D(q,s,v,w,nocc,nvir)] -= e2ovov[De(p,t,r,u,nocc,nvir)] * t4;
t2t4c_priv[D(p,s,v,w,nocc,nvir)] -= e2ovov[De(r,t,q,u,nocc,nvir)] * t4;
t2t4c_priv[D(r,s,u,w,nocc,nvir)] += e2ovov[De(p,v,q,t,nocc,nvir)] * t4;
t2t4c_priv[D(q,s,u,w,nocc,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t4;
t2t4c_priv[D(p,s,u,w,nocc,nvir)] += e2ovov[De(q,v,r,t,nocc,nvir)] * t4;
t2t4c_priv[D(r,s,u,w,nocc,nvir)] -= e2ovov[De(q,v,p,t,nocc,nvir)] * t4;
t2t4c_priv[D(q,s,u,w,nocc,nvir)] -= e2ovov[De(p,v,r,t,nocc,nvir)] * t4;
t2t4c_priv[D(p,s,u,w,nocc,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t4;
t2t4c_priv[D(r,s,t,w,nocc,nvir)] += e2ovov[De(p,u,q,v,nocc,nvir)] * t4;
t2t4c_priv[D(q,s,t,w,nocc,nvir)] += e2ovov[De(r,u,p,v,nocc,nvir)] * t4;
t2t4c_priv[D(p,s,t,w,nocc,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4;
t2t4c_priv[D(r,s,t,w,nocc,nvir)] -= e2ovov[De(q,u,p,v,nocc,nvir)] * t4;
t2t4c_priv[D(q,s,t,w,nocc,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4;
t2t4c_priv[D(p,s,t,w,nocc,nvir)] -= e2ovov[De(r,u,q,v,nocc,nvir)] * t4;
t2t4c_priv[D(r,s,u,w,nocc,nvir)] -= e2ovov[De(p,t,q,v,nocc,nvir)] * t4;
t2t4c_priv[D(q,s,u,w,nocc,nvir)] -= e2ovov[De(r,t,p,v,nocc,nvir)] * t4;
t2t4c_priv[D(p,s,u,w,nocc,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4;
t2t4c_priv[D(r,s,u,w,nocc,nvir)] += e2ovov[De(q,t,p,v,nocc,nvir)] * t4;
t2t4c_priv[D(q,s,u,w,nocc,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4;
t2t4c_priv[D(p,s,u,w,nocc,nvir)] += e2ovov[De(r,t,q,v,nocc,nvir)] * t4;
t2t4c_priv[D(r,s,t,w,nocc,nvir)] -= e2ovov[De(p,v,q,u,nocc,nvir)] * t4;
t2t4c_priv[D(q,s,t,w,nocc,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t4;
t2t4c_priv[D(p,s,t,w,nocc,nvir)] -= e2ovov[De(q,v,r,u,nocc,nvir)] * t4;
t2t4c_priv[D(r,s,t,w,nocc,nvir)] += e2ovov[De(q,v,p,u,nocc,nvir)] * t4;
t2t4c_priv[D(q,s,t,w,nocc,nvir)] += e2ovov[De(p,v,r,u,nocc,nvir)] * t4;
t2t4c_priv[D(p,s,t,w,nocc,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t4;
t2t4c_priv[D(r,s,v,w,nocc,nvir)] -= e2ovov[De(p,u,q,t,nocc,nvir)] * t4;
t2t4c_priv[D(q,s,v,w,nocc,nvir)] -= e2ovov[De(r,u,p,t,nocc,nvir)] * t4;
t2t4c_priv[D(p,s,v,w,nocc,nvir)] -= e2ovov[De(q,u,r,t,nocc,nvir)] * t4;
t2t4c_priv[D(r,s,v,w,nocc,nvir)] += e2ovov[De(q,u,p,t,nocc,nvir)] * t4;
t2t4c_priv[D(q,s,v,w,nocc,nvir)] += e2ovov[De(p,u,r,t,nocc,nvir)] * t4;
t2t4c_priv[D(p,s,v,w,nocc,nvir)] += e2ovov[De(r,u,q,t,nocc,nvir)] * t4;
}
}
fclose(fp);
}
}
#pragma omp critical
{
for (it=0; it< nocc; it++){
for (jt=0; jt< nocc; jt++){
for (at=0; at< nvir; at++){
for (bt=0; bt< nvir; bt++){
t2t4c[D(it,jt,at,bt,nocc,nvir)] += 0.5*(t2t4c_priv[D(it,jt,at,bt,nocc,nvir)]+t2t4c_priv[D(jt,it,bt,at,nocc,nvir)]);
}
}
}
}
free(t2t4c_priv);
}
}
printf ("0SDTQ (Q) = %f ( %f )\n", norm, norm-norm0SDT);
}
void t2t4c_dmrg(double *t2t4c, double *t1, double *t2aa, double *t2ab, double *t3aaa, double *t3aab, double *e2ovov, const int nc, const int nocc, const int nvir, const double numzero, const double c0, double norm)
{
//double numzero = 1e-7;
int p, q, r, s, t, u, v, w, it, jt, at, bt;
double t4, scale;
double norm0SDT = norm;
double ****tmp;
tmp = (double ****)malloc(sizeof(double ***) * nocc);
for (it=0; it< nocc; it++){
tmp[it] = (double ***)malloc(sizeof(double **) * nocc);
for (jt=0; jt< nocc; jt++){
tmp[it][jt] = (double **)malloc(sizeof(double *) * nvir);
for (at=0; at< nvir; at++){
tmp[it][jt][at] = (double *)malloc(sizeof(double) * nvir);
}
}
}
// for (p=0; p<nocc; p++) {
// for (q=0; q<nocc; q++) {
// for (r=0; r<nvir; r++) {
// for (s=0; s<nvir; s++) {
// printf("%d %d %d %d %20.10lf\n",p,q,r,s,e2ovov[De(p,r,q,s,nocc,nvir)]);
// }
// }
// }
// }
FILE *fp;
char typ[4], line[255];
fp = fopen("CIcoeff_dmrg.out", "r");
fscanf(fp, "%s\n", line);
if (fp) {
//#pragma omp parallel default(none) \
// shared(count, m, out, v1, v2, a, b)
//{
while ( !feof(fp) ){
fscanf(fp, "%c%c%c%c,%s\n", &(typ[0]), &(typ[1]), &(typ[2]), &(typ[3]), line);
fscanf(fp, "%lf\n", &t4);
//printf ("typ=%c%c%c%c line=%s\n",typ[0],typ[1],typ[2],typ[3], line);
if (strncmp(typ, "aabb", 4) == 0 && fabs(t4) > numzero){
norm += t4*t4;
sscanf(line,"%d,%d,%d,%d,%d,%d,%d,%d",&p,&q,&t,&u,&r,&s,&v,&w);
p += nc;
q += nc;
r += nc;
s += nc;
t += nc;
u += nc;
v += nc;
w += nc;
// if(!(p == 2 && q == 3 && t == 0 && u == 1 && \
// r == 2 && s == 3 && v == 0 && w == 1)) continue;
// interm norm of c4
t4 = t4 / c0;
// lsh test
// printf("c4 mem %20.10f \n",t4);
// extract t4
t4-= t1xt3aabb(p, q, r, s, t, u, v, w, nocc, nvir, t1, t3aab);
t4-= t2xt2aabb(p, q, r, s, t, u, v, w, nocc, nvir, t2aa, t2ab);
t4-= t1xt1xt2aabb(p, q, r, s, t, u, v, w, nocc, nvir, t1, t2aa, t2ab);
t4-= t1xt1xt1xt1aabb(p, q, r, s, t, u, v, w, nocc, nvir, t1); // may have bug
// lsh test
// printf("t4 mem %20.10f \n",t4);
for (it=0; it< nocc; it++){
for (jt=0; jt< nocc; jt++){
for (at=0; at< nvir; at++){
for (bt=0; bt< nvir; bt++){
tmp[it][jt][at][bt] = 0.0;
}
}
}
}
// printf("eris_ovov mem %20.10f \n",e2ovov[De(p,t,r,v,nocc,nvir)]);
if (p<r && t<v) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4;
if (q<r && t<v) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4;
if (p<s && t<v) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4;
if (q<s && t<v) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4;
if (p<r && u<v) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4;
if (q<r && u<v) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4;
if (p<s && u<v) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4;
if (q<s && u<v) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4;
if (p<r && t<w) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4;
if (q<r && t<w) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4;
if (p<s && t<w) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4;
if (q<s && t<w) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4;
if (p<r && u<w) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4;
if (q<r && u<w) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4;
if (p<s && u<w) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4;
if (q<s && u<w) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4;
if (p<r && v<t) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4;
if (q<r && v<t) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4;
if (p<s && v<t) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4;
if (q<s && v<t) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4;
if (p<r && v<u) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4;
if (q<r && v<u) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4;
if (p<s && v<u) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4;
if (q<s && v<u) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4;
if (p<r && w<t) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4;
if (q<r && w<t) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4;
if (p<s && w<t) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4;
if (q<s && w<t) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4;
if (p<r && w<u) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4;
if (q<r && w<u) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4;
if (p<s && w<u) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4;
if (q<s && w<u) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4;
scale = 0.5;
if (p==r && t<v) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
if (q==r && t<v) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
if (p==s && t<v) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
if (q==s && t<v) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
if (p==r && u<v) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
if (q==r && u<v) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
if (p==s && u<v) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
if (q==s && u<v) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
if (p==r && t<w) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
if (q==r && t<w) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
if (p==s && t<w) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
if (q==s && t<w) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
if (p==r && u<w) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
if (q==r && u<w) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
if (p==s && u<w) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
if (q==s && u<w) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
if (p==r && v<t) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
if (q==r && v<t) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
if (p==s && v<t) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
if (q==s && v<t) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
if (p==r && v<u) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
if (q==r && v<u) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
if (p==s && v<u) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
if (q==s && v<u) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
if (p==r && w<t) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
if (q==r && w<t) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
if (p==s && w<t) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
if (q==s && w<t) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
if (p==r && w<u) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
if (q==r && w<u) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
if (p==s && w<u) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
if (q==s && w<u) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
scale = 0.5;
if (p<r && t==v) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
if (q<r && t==v) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
if (p<s && t==v) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
if (q<s && t==v) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
if (p<r && u==v) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
if (q<r && u==v) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
if (p<s && u==v) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
if (q<s && u==v) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
if (p<r && t==w) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
if (q<r && t==w) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
if (p<s && t==w) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
if (q<s && t==w) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
if (p<r && u==w) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
if (q<r && u==w) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
if (p<s && u==w) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
if (q<s && u==w) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
if (p<r && v==t) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
if (q<r && v==t) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
if (p<s && v==t) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
if (q<s && v==t) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
if (p<r && v==u) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
if (q<r && v==u) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
if (p<s && v==u) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
if (q<s && v==u) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
if (p<r && w==t) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
if (q<r && w==t) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
if (p<s && w==t) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
if (q<s && w==t) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
if (p<r && w==u) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
if (q<r && w==u) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
if (p<s && w==u) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
if (q<s && w==u) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
scale = 0.25;
if (p==r && t==v) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
if (q==r && t==v) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
if (p==s && t==v) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
if (q==s && t==v) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
if (p==r && u==v) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
if (q==r && u==v) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
if (p==s && u==v) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
if (q==s && u==v) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
if (p==r && t==w) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
if (q==r && t==w) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
if (p==s && t==w) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
if (q==s && t==w) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
if (p==r && u==w) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
if (q==r && u==w) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
if (p==s && u==w) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
if (q==s && u==w) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
if (p==r && v==t) tmp[q][s][u][w] += e2ovov[De(p,t,r,v,nocc,nvir)] * t4 * scale;
if (q==r && v==t) tmp[p][s][u][w] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t4 * scale;
if (p==s && v==t) tmp[q][r][u][w] -= e2ovov[De(p,t,s,v,nocc,nvir)] * t4 * scale;
if (q==s && v==t) tmp[p][r][u][w] += e2ovov[De(q,t,s,v,nocc,nvir)] * t4 * scale;
if (p==r && v==u) tmp[q][s][t][w] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t4 * scale;
if (q==r && v==u) tmp[p][s][t][w] += e2ovov[De(q,u,r,v,nocc,nvir)] * t4 * scale;
if (p==s && v==u) tmp[q][r][t][w] += e2ovov[De(p,u,s,v,nocc,nvir)] * t4 * scale;
if (q==s && v==u) tmp[p][r][t][w] -= e2ovov[De(q,u,s,v,nocc,nvir)] * t4 * scale;
if (p==r && w==t) tmp[q][s][u][v] -= e2ovov[De(p,t,r,w,nocc,nvir)] * t4 * scale;
if (q==r && w==t) tmp[p][s][u][v] += e2ovov[De(q,t,r,w,nocc,nvir)] * t4 * scale;
if (p==s && w==t) tmp[q][r][u][v] += e2ovov[De(p,t,s,w,nocc,nvir)] * t4 * scale;
if (q==s && w==t) tmp[p][r][u][v] -= e2ovov[De(q,t,s,w,nocc,nvir)] * t4 * scale;
if (p==r && w==u) tmp[q][s][t][v] += e2ovov[De(p,u,r,w,nocc,nvir)] * t4 * scale;
if (q==r && w==u) tmp[p][s][t][v] -= e2ovov[De(q,u,r,w,nocc,nvir)] * t4 * scale;
if (p==s && w==u) tmp[q][r][t][v] -= e2ovov[De(p,u,s,w,nocc,nvir)] * t4 * scale;
if (q==s && w==u) tmp[p][r][t][v] += e2ovov[De(q,u,s,w,nocc,nvir)] * t4 * scale;
for (it = 0; it < nocc; it++)
for (jt = 0; jt < nocc; jt++)
for (at = 0; at < nvir; at++)
for (bt = 0; bt < nvir; bt++)
t2t4c[D(it,jt,at,bt,nocc,nvir)] += tmp[it][jt][at][bt] + tmp[jt][it][bt][at];
}
else if (strncmp(typ, "aaab", 4) == 0 && fabs(t4) > numzero){
norm += 2.0*t4*t4;
//lsh test
//printf ("typ=%c%c%c%c line=%s c4=%lf\n",typ[0],typ[1],typ[2],typ[3], line, t4);
sscanf(line,"%d,%d,%d,%d,%d,%d,%d,%d",&p,&q,&r,&t,&u,&v,&s,&w);
p += nc;
q += nc;
r += nc;
s += nc;
t += nc;
u += nc;
v += nc;
w += nc;
//printf ("p=%d, q=%d, r=%d, t=%d, u=%d, v=%d, s=%d, w=%d",p,q,r,t,u,v,s,w);
// interm norm of c4
t4 = t4 / c0;
// extract t4
t4-= t1xt3aaab (p, q, r, s, t, u, v, w, nocc, nvir, t1, t3aaa, t3aab); // may have 1e-5 bug
t4-= t2xt2aaab (p, q, r, s, t, u, v, w, nocc, nvir, t2aa, t2ab); // may have 1e-3 bug
t4-= t1xt1xt2aaab (p, q, r, s, t, u, v, w, nocc, nvir, t1, t2aa, t2ab); // may have 1e-5 bug
t4-= t1xt1xt1xt1aaab (p, q, r, s, t, u, v, w, nocc, nvir, t1); // may have 1e-6 bug
for (it=0; it< nocc; it++){
for (jt=0; jt< nocc; jt++){
for (at=0; at< nvir; at++){
for (bt=0; bt< nvir; bt++){
tmp[it][jt][at][bt] = 0.0;
}
}
}
}
tmp[r][s][v][w] += (e2ovov[De(p,t,q,u,nocc,nvir)]-e2ovov[De(p,u,q,t,nocc,nvir)]) * t4;
tmp[q][s][v][w] -= (e2ovov[De(p,t,r,u,nocc,nvir)]-e2ovov[De(p,u,r,t,nocc,nvir)]) * t4;
tmp[p][s][v][w] += (e2ovov[De(q,t,s,u,nocc,nvir)]-e2ovov[De(q,u,s,t,nocc,nvir)]) * t4;
tmp[r][s][u][w] -= (e2ovov[De(p,t,q,v,nocc,nvir)]-e2ovov[De(p,v,q,t,nocc,nvir)]) * t4;
tmp[q][s][u][w] += (e2ovov[De(p,t,r,v,nocc,nvir)]-e2ovov[De(p,v,r,t,nocc,nvir)]) * t4;
tmp[p][s][u][w] -= (e2ovov[De(q,t,s,v,nocc,nvir)]-e2ovov[De(q,v,s,t,nocc,nvir)]) * t4;
tmp[r][s][t][w] += (e2ovov[De(p,u,q,v,nocc,nvir)]-e2ovov[De(p,v,q,u,nocc,nvir)]) * t4;
tmp[q][s][t][w] -= (e2ovov[De(p,u,r,v,nocc,nvir)]-e2ovov[De(p,v,r,u,nocc,nvir)]) * t4;
tmp[p][s][t][w] += (e2ovov[De(q,u,s,v,nocc,nvir)]-e2ovov[De(q,v,s,u,nocc,nvir)]) * t4;
for (it = 0; it < nocc; it++)
for (jt = 0; jt < nocc; jt++)
for (at = 0; at < nvir; at++)
for (bt = 0; bt < nvir; bt++)
t2t4c[D(it,jt,at,bt,nocc,nvir)] += tmp[it][jt][at][bt] + tmp[jt][it][bt][at];
}
}
//}
fclose(fp);
}
else
{
// error message
}
for (it=0; it< nocc; it++){
for (jt=0; jt< nocc; jt++){
for (at=0; at< nvir; at++){
free(tmp[it][jt][at]);
}
free(tmp[it][jt]);
}
free(tmp[it]);
}
free(tmp);
printf ("0SDTQ (Q) = %f ( %f )\n", norm, norm-norm0SDT);
}
//
//void c4_to_t4_test(double *t4aaab, double *t4aabb, double *c4aaab, double *c4aabb, double *t1, double *t2aa, double *t2ab, double *t3aaa, double *t3aab, int nocc, int nvir, double numzero)
//{
// int i, j, k, l, a, b, c, d, m_ijab;
// int ijkabc, ld, ijkabcld_c;
// int ijklabcd_t11, ijklabcd_t21, ijklabcd_t31, ijklabcd_t41, ijklabcd_t51, ijklabcd_t61;
// int ijklabcd_t12, ijklabcd_t22, ijklabcd_t32, ijklabcd_t42, ijklabcd_t52, ijklabcd_t62;
// int ijklabcd_t13, ijklabcd_t23, ijklabcd_t33, ijklabcd_t43, ijklabcd_t53, ijklabcd_t63;
// int ijklabcd_t14, ijklabcd_t24, ijklabcd_t34, ijklabcd_t44, ijklabcd_t54, ijklabcd_t64;
// int ijklabcd_t15, ijklabcd_t25, ijklabcd_t35, ijklabcd_t45, ijklabcd_t55, ijklabcd_t65;
// int ijklabcd_t16, ijklabcd_t26, ijklabcd_t36, ijklabcd_t46, ijklabcd_t56, ijklabcd_t66;
// int ijab, klcd, ijabklcd_c;
//
// double tmp, tmp2;
//
// // t4aaab
// ijkabc = -1;
// for (c = 2; c < nvir; c++) {
// for (b = 1; b < c; b++) {
// for (a = 0; a < b; a++) {
// for (k = nocc-1; k > 1; k--) {
// for (j = k-1; j > 0; j--) {
// for (i = j-1; i > -1; i--) {
// ijkabc += 1;
// ld = -1;
// for (d = 0; d < nvir; d++) {
// for (l = nocc-1; l > -1; l--) {
// ld += 1;
// ijkabcld_c = ijkabc * nocc*nvir + ld;
// tmp = c4aaab[ijkabcld_c];
//
//// if(fabs(tmp)-fabs(tmp2) > numzero)
// if(fabs(tmp) > numzero)
// {
// tmp2 = t1xt3aaab (i, j, k, l, a, b, c, d, nocc, nvir, t1, t3aaa, t3aab); // may have 1e-5 bug
// tmp2+= t2xt2aaab (i, j, k, l, a, b, c, d, nocc, nvir, t2aa, t2ab); // may have 1e-3 bug
// tmp2+= t1xt1xt2aaab (i, j, k, l, a, b, c, d, nocc, nvir, t1, t2aa, t2ab); // may have 1e-5 bug
// tmp2+= t1xt1xt1xt1aaab (i, j, k, l, a, b, c, d, nocc, nvir, t1); // may have 1e-6 bug
//
// tmp = tmp2;
// ijklabcd_t11 = Q(i, j, k, l, a, b, c, d, nocc, nvir);
// ijklabcd_t12 = Q(i, j, k, l, b, c, a, d, nocc, nvir);
// ijklabcd_t13 = Q(i, j, k, l, c, a, b, d, nocc, nvir);
// ijklabcd_t14 = Q(i, j, k, l, a, c, b, d, nocc, nvir);
// ijklabcd_t15 = Q(i, j, k, l, b, a, c, d, nocc, nvir);
// ijklabcd_t16 = Q(i, j, k, l, c, b, a, d, nocc, nvir);
//
// t4aaab[ijklabcd_t11] = tmp;
// t4aaab[ijklabcd_t12] = tmp;
// t4aaab[ijklabcd_t13] = tmp;
// t4aaab[ijklabcd_t14] = -tmp;
// t4aaab[ijklabcd_t15] = -tmp;
// t4aaab[ijklabcd_t16] = -tmp;
//
// ijklabcd_t21 = Q(j, k, i, l, a, b, c, d, nocc, nvir);
// ijklabcd_t22 = Q(j, k, i, l, b, c, a, d, nocc, nvir);
// ijklabcd_t23 = Q(j, k, i, l, c, a, b, d, nocc, nvir);
// ijklabcd_t24 = Q(j, k, i, l, a, c, b, d, nocc, nvir);
// ijklabcd_t25 = Q(j, k, i, l, b, a, c, d, nocc, nvir);
// ijklabcd_t26 = Q(j, k, i, l, c, b, a, d, nocc, nvir);
//
// t4aaab[ijklabcd_t21] = tmp;
// t4aaab[ijklabcd_t22] = tmp;
// t4aaab[ijklabcd_t23] = tmp;
// t4aaab[ijklabcd_t24] = -tmp;
// t4aaab[ijklabcd_t25] = -tmp;
// t4aaab[ijklabcd_t26] = -tmp;
//
// ijklabcd_t31 = Q(k, i, j, l, a, b, c, d, nocc, nvir);
// ijklabcd_t32 = Q(k, i, j, l, b, c, a, d, nocc, nvir);
// ijklabcd_t33 = Q(k, i, j, l, c, a, b, d, nocc, nvir);
// ijklabcd_t34 = Q(k, i, j, l, a, c, b, d, nocc, nvir);
// ijklabcd_t35 = Q(k, i, j, l, b, a, c, d, nocc, nvir);
// ijklabcd_t36 = Q(k, i, j, l, c, b, a, d, nocc, nvir);
//
// t4aaab[ijklabcd_t31] = tmp;
// t4aaab[ijklabcd_t32] = tmp;
// t4aaab[ijklabcd_t33] = tmp;
// t4aaab[ijklabcd_t34] = -tmp;
// t4aaab[ijklabcd_t35] = -tmp;
// t4aaab[ijklabcd_t36] = -tmp;
//
// ijklabcd_t41 = Q(i, k, j, l, a, b, c, d, nocc, nvir);
// ijklabcd_t42 = Q(i, k, j, l, b, c, a, d, nocc, nvir);
// ijklabcd_t43 = Q(i, k, j, l, c, a, b, d, nocc, nvir);
// ijklabcd_t44 = Q(i, k, j, l, a, c, b, d, nocc, nvir);
// ijklabcd_t45 = Q(i, k, j, l, b, a, c, d, nocc, nvir);
// ijklabcd_t46 = Q(i, k, j, l, c, b, a, d, nocc, nvir);
//
// t4aaab[ijklabcd_t41] = -tmp;
// t4aaab[ijklabcd_t42] = -tmp;
// t4aaab[ijklabcd_t43] = -tmp;
// t4aaab[ijklabcd_t44] = tmp;
// t4aaab[ijklabcd_t45] = tmp;
// t4aaab[ijklabcd_t46] = tmp;
//
// ijklabcd_t51 = Q(j, i, k, l, a, b, c, d, nocc, nvir);
// ijklabcd_t52 = Q(j, i, k, l, b, c, a, d, nocc, nvir);
// ijklabcd_t53 = Q(j, i, k, l, c, a, b, d, nocc, nvir);
// ijklabcd_t54 = Q(j, i, k, l, a, c, b, d, nocc, nvir);
// ijklabcd_t55 = Q(j, i, k, l, b, a, c, d, nocc, nvir);
// ijklabcd_t56 = Q(j, i, k, l, c, b, a, d, nocc, nvir);
//
// t4aaab[ijklabcd_t51] = -tmp;
// t4aaab[ijklabcd_t52] = -tmp;
// t4aaab[ijklabcd_t53] = -tmp;
// t4aaab[ijklabcd_t54] = tmp;
// t4aaab[ijklabcd_t55] = tmp;
// t4aaab[ijklabcd_t56] = tmp;
//
// ijklabcd_t61 = Q(k, j, i, l, a, b, c, d, nocc, nvir);
// ijklabcd_t62 = Q(k, j, i, l, b, c, a, d, nocc, nvir);
// ijklabcd_t63 = Q(k, j, i, l, c, a, b, d, nocc, nvir);
// ijklabcd_t64 = Q(k, j, i, l, a, c, b, d, nocc, nvir);
// ijklabcd_t65 = Q(k, j, i, l, b, a, c, d, nocc, nvir);
// ijklabcd_t66 = Q(k, j, i, l, c, b, a, d, nocc, nvir);
//
// t4aaab[ijklabcd_t61] = -tmp;
// t4aaab[ijklabcd_t62] = -tmp;
// t4aaab[ijklabcd_t63] = -tmp;
// t4aaab[ijklabcd_t64] = tmp;
// t4aaab[ijklabcd_t65] = tmp;
// t4aaab[ijklabcd_t66] = tmp;
// }
// }
// }
// }
// }
// }
// }
// }
// }
//
// // TODO lsh: reduce symmetry of t4, t3
//
// // t4aabb
// m_ijab = nocc*(nocc-1)/2 * nvir*(nvir-1)/2;
// ijab = -1;
// for (b = 1; b < nvir; b++) {
// for (a = 0; a < b; a++) {
// for (j = nocc-1; j > 0; j--) {
// for (i = j-1; i > -1; i--) {
// ijab += 1;
// klcd =-1;
// for (d = 1; d < nvir; d++) {
// for (c = 0; c < d; c++) {
// for (l = nocc-1; l > 0; l--) {
// for (k = l-1; k > -1; k--) {
// klcd += 1;
// ijabklcd_c = ijab * m_ijab + klcd;
// tmp = c4aabb[ijabklcd_c];
//
//// if(fabs(tmp)-fabs(tmp2) > numzero)
// if(fabs(tmp) > numzero)
// {
// tmp2 = t1xt3aabb(i, j, k, l, a, b, c, d, nocc, nvir, t1, t3aab);
// tmp2+= t2xt2aabb(i, j, k, l, a, b, c, d, nocc, nvir, t2aa, t2ab);
// tmp2+= t1xt1xt2aabb(i, j, k, l, a, b, c, d, nocc, nvir, t1, t2aa, t2ab);
// tmp2+= t1xt1xt1xt1aabb(i, j, k, l, a, b, c, d, nocc, nvir, t1); // may have bug
//
// tmp = tmp2;
// ijklabcd_t11 = Q(i, j, k, l, a, b, c, d, nocc, nvir);
// ijklabcd_t12 = Q(j, i, k, l, b, a, c, d, nocc, nvir);
// ijklabcd_t13 = Q(i, j, k, l, b, a, c, d, nocc, nvir);
// ijklabcd_t14 = Q(j, i, k, l, a, b, c, d, nocc, nvir);
//
// t4aabb[ijklabcd_t11] = tmp;
// t4aabb[ijklabcd_t12] = tmp;
// t4aabb[ijklabcd_t13] = -tmp;
// t4aabb[ijklabcd_t14] = -tmp;
//
// ijklabcd_t21 = Q(i, j, l, k, a, b, d, c, nocc, nvir);
// ijklabcd_t22 = Q(j, i, l, k, b, a, d, c, nocc, nvir);
// ijklabcd_t23 = Q(i, j, l, k, b, a, d, c, nocc, nvir);
// ijklabcd_t24 = Q(j, i, l, k, a, b, d, c, nocc, nvir);
//
// t4aabb[ijklabcd_t21] = tmp;
// t4aabb[ijklabcd_t22] = tmp;
// t4aabb[ijklabcd_t23] = -tmp;
// t4aabb[ijklabcd_t24] = -tmp;
//
// ijklabcd_t31 = Q(i, j, k, l, a, b, d, c, nocc, nvir);
// ijklabcd_t32 = Q(j, i, k, l, b, a, d, c, nocc, nvir);
// ijklabcd_t33 = Q(i, j, k, l, b, a, d, c, nocc, nvir);
// ijklabcd_t34 = Q(j, i, k, l, a, b, d, c, nocc, nvir);
//
// t4aabb[ijklabcd_t31] = -tmp;
// t4aabb[ijklabcd_t32] = -tmp;
// t4aabb[ijklabcd_t33] = tmp;
// t4aabb[ijklabcd_t34] = tmp;
//
// ijklabcd_t41 = Q(i, j, l, k, a, b, c, d, nocc, nvir);
// ijklabcd_t42 = Q(j, i, l, k, b, a, c, d, nocc, nvir);
// ijklabcd_t43 = Q(i, j, l, k, b, a, c, d, nocc, nvir);
// ijklabcd_t44 = Q(j, i, l, k, a, b, c, d, nocc, nvir);
//
// t4aabb[ijklabcd_t41] = -tmp;
// t4aabb[ijklabcd_t42] = -tmp;
// t4aabb[ijklabcd_t43] = tmp;
// t4aabb[ijklabcd_t44] = tmp;
// }
// }
// }
// }
// }
// }
// }
// }
// }
//
//}
void t1t3c_shci(double *t1t3c, double *t1, double *t2aa, double *t2ab, double *e2ovov, const int nc, const int nocc, const int nvir, const double numzero, const double c0, double norm)
{
int p, q, r, t, u, v, itmp, it, at;
double t3, parity, scale;
uint8_t Refdet[nocc+nvir], det_str[nocc+nvir];
for (itmp = 0; itmp < nocc+nvir; itmp++){
if (itmp<nocc) Refdet[itmp] = 3;
else Refdet[itmp] = 0;
}
FILE *fp;
char line[255], typ[4], tmpc[255];
char *ptr;
fp = fopen("CIcoeff_shci.out", "r");
fgets(line, 255, fp);
double norm0SD = norm;
if (fp) {
while ( !feof(fp) ){
fgets(line, 255, fp);
ptr = strtok(line, ",");
it=0;
while(ptr != NULL){
if(it==0){
strcpy(typ, ptr);
}
if (it>0 && strlen(typ)==3 && strncmp(typ, "aaa", 3)==0){
strcpy(tmpc, ptr);
if(it==1) p = atoi(tmpc);
if(it==2) q = atoi(tmpc);
if(it==3) r = atoi(tmpc);
if(it==4) t = atoi(tmpc);
if(it==5) u = atoi(tmpc);
if(it==6) v = atoi(tmpc);
if(it==7) t3= atof(tmpc);
}
if (it>0 && strlen(typ)==3 && strncmp(typ, "aab", 3)==0){
strcpy(tmpc, ptr);
if(it==1) p = atoi(tmpc);
if(it==2) q = atoi(tmpc);
if(it==3) t = atoi(tmpc);
if(it==4) u = atoi(tmpc);
if(it==5) r = atoi(tmpc);
if(it==6) v = atoi(tmpc);
if(it==7) t3= atof(tmpc);
}
if (it>7) break;
ptr = strtok(NULL, ",");
it++;
}
if (strlen(typ)==3 && strncmp(typ, "aaa", 3) == 0 && fabs(t3) > numzero){
// printf("'%s, %d, %d, %d, %d, %d, %d, %15.8f'\n", typ, p,q,r,t,u,v,t3);
norm += 2.0*t3*t3;
p += nc;
q += nc;
r += nc;
t += - nocc + nc;
u += - nocc + nc;
v += - nocc + nc;
// //lsh test
// if(!(p == 1 && q == 2 && r == 3 && \
// t == 0 && u == 2 && v == 4)) continue;
// printf("c3 in OTF: %15.8f\n",t3);
for (itmp = 0; itmp < nocc+nvir; itmp++)
det_str[itmp] = Refdet[itmp];
det_str[p] = 2;
det_str[q] = 2;
det_str[r] = 2;
det_str[t+nocc] = 1;
det_str[u+nocc] = 1;
det_str[v+nocc] = 1;
//parity = parity_ab_str(det_str, nocc+nvir);
parity = parity_ci_to_cc(p+q+r, 3, nocc);
// interm norm of c3
t3 = parity * t3 / c0;
// extract t3
t3-= t1xt2aaa (p, q, r, t, u, v, nocc, nvir, t1, t2aa);
t3-= t1xt1xt1aaa (p, q, r, t, u, v, nocc, nvir, t1);
// printf("t3 in OTF: %15.8f\n",t3);
scale = 1.0;
t1t3c[S(p,t,nvir)] += (e2ovov[De(q,u,r,v,nocc,nvir)]-e2ovov[De(r,u,q,v,nocc,nvir)]) * t3 * scale;
t1t3c[S(q,t,nvir)] -= (e2ovov[De(p,u,r,v,nocc,nvir)]-e2ovov[De(r,u,p,v,nocc,nvir)]) * t3 * scale;
t1t3c[S(r,t,nvir)] += (e2ovov[De(p,u,q,v,nocc,nvir)]-e2ovov[De(q,u,p,v,nocc,nvir)]) * t3 * scale;
t1t3c[S(p,u,nvir)] -= (e2ovov[De(q,t,r,v,nocc,nvir)]-e2ovov[De(r,t,q,v,nocc,nvir)]) * t3 * scale;
t1t3c[S(q,u,nvir)] += (e2ovov[De(p,t,r,v,nocc,nvir)]-e2ovov[De(r,t,p,v,nocc,nvir)]) * t3 * scale;
t1t3c[S(r,u,nvir)] -= (e2ovov[De(p,t,q,v,nocc,nvir)]-e2ovov[De(q,t,p,v,nocc,nvir)]) * t3 * scale;
t1t3c[S(p,v,nvir)] += (e2ovov[De(q,t,r,u,nocc,nvir)]-e2ovov[De(r,t,q,u,nocc,nvir)]) * t3 * scale;
t1t3c[S(q,v,nvir)] -= (e2ovov[De(p,t,r,u,nocc,nvir)]-e2ovov[De(r,t,p,u,nocc,nvir)]) * t3 * scale;
t1t3c[S(r,v,nvir)] += (e2ovov[De(p,t,q,u,nocc,nvir)]-e2ovov[De(q,t,p,u,nocc,nvir)]) * t3 * scale;
}
else if (strlen(typ)==3 && strncmp(typ, "aab", 3) == 0 && fabs(t3) > numzero){
//printf("'%s, %d, %d, %d, %d, %d, %d, %15.8f'\n", typ, p,q,t,u,r,v,t3);
norm += 2.0*t3*t3;
p += nc;
q += nc;
r += nc;
t += - nocc + nc;
u += - nocc + nc;
v += - nocc + nc;
//lsh test
//if(!(p == 2 && q == 3 && r == 3 && \
// t == 0 && u == 1 && v == 1)) continue;
//printf("c3 in OTF: %15.8f\n",t3);
for (itmp = 0; itmp < nocc+nvir; itmp++)
det_str[itmp] = Refdet[itmp];
det_str[p] = 2;
det_str[q] = 2;
det_str[t+nocc] = 1;
det_str[u+nocc] = 1;
if (p != r && q != r) det_str[r] = 1;
else det_str[r] = 0;
if (t != v && u != v) det_str[v+nocc] = 2;
else det_str[v+nocc] = 3;
//parity = parity_ab_str(det_str, nocc+nvir);
parity = parity_ci_to_cc(p+q, 2, nocc);
parity *= parity_ci_to_cc(r, 1, nocc);
// interm norm of c3
t3 = parity * t3 / c0;
// extract t3
t3-= t1xt2aab(p, q, r, t, u, v, nocc, nvir, t1, t2aa, t2ab);
t3-= t1xt1xt1aab(p, q, r, t, u, v, nocc, nvir, t1);
//printf("t3 in OTF: %15.8f\n",t3);
t1t3c[S(r,v,nvir)] += (e2ovov[De(p,t,q,u,nocc,nvir)] - e2ovov[De(p,u,q,t,nocc,nvir)]) * t3;
scale = 1.0;
if (r<q && v<u) t1t3c[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
if (r<p && v<u) t1t3c[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
if (r<q && v<t) t1t3c[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
if (r<p && v<t) t1t3c[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
if (q<r && u<v) t1t3c[S(p,t,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t3 * scale;
if (p<r && u<v) t1t3c[S(q,t,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t3 * scale;
if (q<r && t<v) t1t3c[S(p,u,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t3 * scale;
if (p<r && t<v) t1t3c[S(q,u,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t3 * scale;
scale = 0.5;
if (r==q && v<u) t1t3c[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
if (r==p && v<u) t1t3c[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
if (r==q && v<t) t1t3c[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
if (r==p && v<t) t1t3c[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
if (q==r && u<v) t1t3c[S(p,t,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t3 * scale;
if (p==r && u<v) t1t3c[S(q,t,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t3 * scale;
if (q==r && t<v) t1t3c[S(p,u,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t3 * scale;
if (p==r && t<v) t1t3c[S(q,u,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t3 * scale;
if (r<q && v==u) t1t3c[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
if (r<p && v==u) t1t3c[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
if (r<q && v==t) t1t3c[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
if (r<p && v==t) t1t3c[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
if (q<r && u==v) t1t3c[S(p,t,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t3 * scale;
if (p<r && u==v) t1t3c[S(q,t,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t3 * scale;
if (q<r && t==v) t1t3c[S(p,u,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t3 * scale;
if (p<r && t==v) t1t3c[S(q,u,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t3 * scale;
scale = 0.25;
if (r==q && v==u) t1t3c[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
if (r==p && v==u) t1t3c[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
if (r==q && v==t) t1t3c[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
if (r==p && v==t) t1t3c[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
if (q==r && u==v) t1t3c[S(p,t,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t3 * scale;
if (p==r && u==v) t1t3c[S(q,t,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t3 * scale;
if (q==r && t==v) t1t3c[S(p,u,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t3 * scale;
if (p==r && t==v) t1t3c[S(q,u,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t3 * scale;
scale = 1.0;
if (r<q && u<v) t1t3c[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
if (r<p && u<v) t1t3c[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
if (r<q && t<v) t1t3c[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
if (r<p && t<v) t1t3c[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
if (q<r && v<u) t1t3c[S(p,t,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t3 * scale;
if (p<r && v<u) t1t3c[S(q,t,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t3 * scale;
if (q<r && v<t) t1t3c[S(p,u,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t3 * scale;
if (p<r && v<t) t1t3c[S(q,u,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t3 * scale;
scale = 0.5;
if (r==q && u<v) t1t3c[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
if (r==p && u<v) t1t3c[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
if (r==q && t<v) t1t3c[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
if (r==p && t<v) t1t3c[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
if (q==r && v<u) t1t3c[S(p,t,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t3 * scale;
if (p==r && v<u) t1t3c[S(q,t,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t3 * scale;
if (q==r && v<t) t1t3c[S(p,u,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t3 * scale;
if (p==r && v<t) t1t3c[S(q,u,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t3 * scale;
if (r<q && u==v) t1t3c[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
if (r<p && u==v) t1t3c[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
if (r<q && t==v) t1t3c[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
if (r<p && t==v) t1t3c[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
if (q<r && v==u) t1t3c[S(p,t,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t3 * scale;
if (p<r && v==u) t1t3c[S(q,t,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t3 * scale;
if (q<r && v==t) t1t3c[S(p,u,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t3 * scale;
if (p<r && v==t) t1t3c[S(q,u,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t3 * scale;
scale = 0.25;
if (r==q && u==v) t1t3c[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
if (r==p && u==v) t1t3c[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
if (r==q && t==v) t1t3c[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
if (r==p && t==v) t1t3c[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
if (q==r && v==u) t1t3c[S(p,t,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t3 * scale;
if (p==r && v==u) t1t3c[S(q,t,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t3 * scale;
if (q==r && v==t) t1t3c[S(p,u,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t3 * scale;
if (p==r && v==t) t1t3c[S(q,u,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t3 * scale;
// scale = 1.0;
// if (r==q && u<v) t1t3c[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
// if (r==p && u<v) t1t3c[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
// if (r==q && t<v) t1t3c[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
// if (r==p && t<v) t1t3c[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
//
// if (r<q && u==v) t1t3c[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
// if (r<p && u==v) t1t3c[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
// if (r<q && t==v) t1t3c[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
// if (r<p && t==v) t1t3c[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
//
// scale = 0.5;
// if (r==q && u==v) t1t3c[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
// if (r==p && u==v) t1t3c[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
// if (r==q && t==v) t1t3c[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
// if (r==p && t==v) t1t3c[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
}
}
fclose(fp);
}
else
{
// error message
}
//printf (" 0SDT (T) = %f ( %f )\n", norm, norm-norm0SD);
}
void t1t3c_dmrg_omp(double *t1t3c, double *t1, double *t2aa, double *t2ab, double *e2ovov, const int nc, const int nocc, const int nvir, const double numzero, const double c0, double norm)
{
double norm0SD = norm;
const int t1size = nocc*nvir;
//printf ("nocc, nvir, nc = %d, %d, %d\n",nocc, nvir, nc);
#pragma omp parallel default(none) \
shared(t1, t2aa, t2ab, e2ovov, t1t3c, norm)
{
int p, q, r, t, u, v, itmp, it, at;
double t3, parity, scale;
uint8_t Refdet[nocc+nvir], det_str[nocc+nvir];
for (itmp = 0; itmp < nocc+nvir; itmp++){
if (itmp<nocc) Refdet[itmp] = 3;
else Refdet[itmp] = 0;
}
double *t1t3c_priv;
t1t3c_priv = (double *)malloc(sizeof(double) * t1size);
for (it=0; it< t1size; it++){
t1t3c_priv[it] = 0.0;
}
int i;
#pragma omp for reduction(+ : norm)
for (i=0; i<omp_get_num_threads(); i++){
char line[255], typ[4];
//char *ptr;
char s0[20]="t3.";
char s1[4];
sprintf(s1, "%d", i);
char* filename = strcat(s0,s1);
FILE *fp = fopen(filename, "r");
//printf ("filename = %s\n",filename);
fp = fopen(filename, "r");
if (fp) {
while ( !feof(fp) ){
fscanf(fp, "%c%c%c,%s\n", &(typ[0]), &(typ[1]), &(typ[2]), line);
fscanf(fp, "%lf\n", &t3);
if (strncmp(typ, "aaa", 3) == 0 && fabs(t3) > numzero){
sscanf(line,"%d,%d,%d,%d,%d,%d",&p,&q,&r,&t,&u,&v);
norm += 2.0*t3*t3;
p += nc;
q += nc;
r += nc;
t += - nocc + nc;
u += - nocc + nc;
v += - nocc + nc;
for (itmp = 0; itmp < nocc+nvir; itmp++)
det_str[itmp] = Refdet[itmp];
det_str[p] = 1;
det_str[q] = 1;
det_str[r] = 1;
det_str[t+nocc] = 2;
det_str[u+nocc] = 2;
det_str[v+nocc] = 2;
//parity = parity_ab_str(det_str, nocc+nvir);
parity = parity_ci_to_cc(p+q+r, 3, nocc);
// interm norm of c3
t3 = parity * t3 / c0;
// extract t3
t3-= t1xt2aaa (p, q, r, t, u, v, nocc, nvir, t1, t2aa);
t3-= t1xt1xt1aaa (p, q, r, t, u, v, nocc, nvir, t1);
scale = 1.0;
t1t3c_priv[S(p,t,nvir)] += (e2ovov[De(q,u,r,v,nocc,nvir)]-e2ovov[De(r,u,q,v,nocc,nvir)]) * t3 * scale;
t1t3c_priv[S(q,t,nvir)] -= (e2ovov[De(p,u,r,v,nocc,nvir)]-e2ovov[De(r,u,p,v,nocc,nvir)]) * t3 * scale;
t1t3c_priv[S(r,t,nvir)] += (e2ovov[De(p,u,q,v,nocc,nvir)]-e2ovov[De(q,u,p,v,nocc,nvir)]) * t3 * scale;
t1t3c_priv[S(p,u,nvir)] -= (e2ovov[De(q,t,r,v,nocc,nvir)]-e2ovov[De(r,t,q,v,nocc,nvir)]) * t3 * scale;
t1t3c_priv[S(q,u,nvir)] += (e2ovov[De(p,t,r,v,nocc,nvir)]-e2ovov[De(r,t,p,v,nocc,nvir)]) * t3 * scale;
t1t3c_priv[S(r,u,nvir)] -= (e2ovov[De(p,t,q,v,nocc,nvir)]-e2ovov[De(q,t,p,v,nocc,nvir)]) * t3 * scale;
t1t3c_priv[S(p,v,nvir)] += (e2ovov[De(q,t,r,u,nocc,nvir)]-e2ovov[De(r,t,q,u,nocc,nvir)]) * t3 * scale;
t1t3c_priv[S(q,v,nvir)] -= (e2ovov[De(p,t,r,u,nocc,nvir)]-e2ovov[De(r,t,p,u,nocc,nvir)]) * t3 * scale;
t1t3c_priv[S(r,v,nvir)] += (e2ovov[De(p,t,q,u,nocc,nvir)]-e2ovov[De(q,t,p,u,nocc,nvir)]) * t3 * scale;
}
else if (strncmp(typ, "aab", 3) == 0 && fabs(t3) > numzero){
sscanf(line,"%d,%d,%d,%d,%d,%d",&p,&q,&t,&u,&r,&v);
//printf("'%s, %d, %d, %d, %d, %d, %d, %15.8f'\n", typ, p,q,t,u,r,v,t3);
norm += 2.0*t3*t3;
p += nc;
q += nc;
r += nc;
t += - nocc + nc;
u += - nocc + nc;
v += - nocc + nc;
//lsh test
//if(!(p == 2 && q == 3 && r == 3 && \
// t == 0 && u == 1 && v == 1)) continue;
//printf("c3 in OTF: %15.8f\n",t3);
for (itmp = 0; itmp < nocc+nvir; itmp++)
det_str[itmp] = Refdet[itmp];
det_str[p] = 1;
det_str[q] = 1;
det_str[t+nocc] = 2;
det_str[u+nocc] = 2;
if (p != r && q != r) det_str[r] = 2;
else det_str[r] = 0;
if (t != v && u != v) det_str[v+nocc] = 1;
else det_str[v+nocc] = 3;
//parity = parity_ab_str(det_str, nocc+nvir);
parity = parity_ci_to_cc(p+q, 2, nocc);
parity *= parity_ci_to_cc(r, 1, nocc);
// interm norm of c3
t3 = parity * t3 / c0;
// extract t3
t3-= t1xt2aab(p, q, r, t, u, v, nocc, nvir, t1, t2aa, t2ab);
t3-= t1xt1xt1aab(p, q, r, t, u, v, nocc, nvir, t1);
//printf("t3 in OTF: %15.8f\n",t3);
t1t3c_priv[S(r,v,nvir)] += (e2ovov[De(p,t,q,u,nocc,nvir)] - e2ovov[De(p,u,q,t,nocc,nvir)]) * t3;
scale = 1.0;
if (r<q && v<u) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
if (r<p && v<u) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
if (r<q && v<t) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
if (r<p && v<t) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
if (q<r && u<v) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t3 * scale;
if (p<r && u<v) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t3 * scale;
if (q<r && t<v) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t3 * scale;
if (p<r && t<v) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t3 * scale;
scale = 0.5;
if (r==q && v<u) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
if (r==p && v<u) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
if (r==q && v<t) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
if (r==p && v<t) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
if (q==r && u<v) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t3 * scale;
if (p==r && u<v) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t3 * scale;
if (q==r && t<v) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t3 * scale;
if (p==r && t<v) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t3 * scale;
if (r<q && v==u) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
if (r<p && v==u) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
if (r<q && v==t) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
if (r<p && v==t) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
if (q<r && u==v) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t3 * scale;
if (p<r && u==v) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t3 * scale;
if (q<r && t==v) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t3 * scale;
if (p<r && t==v) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t3 * scale;
scale = 0.25;
if (r==q && v==u) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
if (r==p && v==u) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
if (r==q && v==t) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
if (r==p && v==t) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
if (q==r && u==v) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t3 * scale;
if (p==r && u==v) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t3 * scale;
if (q==r && t==v) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t3 * scale;
if (p==r && t==v) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t3 * scale;
scale = 1.0;
if (r<q && u<v) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
if (r<p && u<v) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
if (r<q && t<v) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
if (r<p && t<v) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
if (q<r && v<u) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t3 * scale;
if (p<r && v<u) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t3 * scale;
if (q<r && v<t) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t3 * scale;
if (p<r && v<t) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t3 * scale;
scale = 0.5;
if (r==q && u<v) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
if (r==p && u<v) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
if (r==q && t<v) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
if (r==p && t<v) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
if (q==r && v<u) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t3 * scale;
if (p==r && v<u) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t3 * scale;
if (q==r && v<t) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t3 * scale;
if (p==r && v<t) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t3 * scale;
if (r<q && u==v) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
if (r<p && u==v) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
if (r<q && t==v) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
if (r<p && t==v) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
if (q<r && v==u) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t3 * scale;
if (p<r && v==u) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t3 * scale;
if (q<r && v==t) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t3 * scale;
if (p<r && v==t) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t3 * scale;
scale = 0.25;
if (r==q && u==v) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
if (r==p && u==v) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
if (r==q && t==v) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
if (r==p && t==v) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
if (q==r && v==u) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t3 * scale;
if (p==r && v==u) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t3 * scale;
if (q==r && v==t) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t3 * scale;
if (p==r && v==t) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t3 * scale;
}
}
fclose(fp);
}
else
{
// error message
}
}
#pragma omp critical
{
for (it=0; it< nocc; it++){
for (at=0; at< nvir; at++){
t1t3c[S(it,at,nvir)] += t1t3c_priv[S(it,at,nvir)];
}
}
free(t1t3c_priv);
}
}
//printf (" 0SDT (T) = %f ( %f )\n", norm, norm-norm0SD);
}
void t1t3c_shci_omp(double *t1t3c, double *t1, double *t2aa, double *t2ab, double *e2ovov, const int nc, const int nocc, const int nvir, const double numzero, const double c0, double norm)
{
double norm0SD = norm;
const int t1size = nocc*nvir;
//printf ("nocc, nvir, nc = %d, %d, %d\n",nocc, nvir, nc);
#pragma omp parallel default(none) \
shared(t1, t2aa, t2ab, e2ovov, t1t3c, norm)
{
int p, q, r, t, u, v, itmp, it, at;
double t3, parity, scale;
uint8_t Refdet[nocc+nvir], det_str[nocc+nvir];
for (itmp = 0; itmp < nocc+nvir; itmp++){
if (itmp<nocc) Refdet[itmp] = 3;
else Refdet[itmp] = 0;
}
double *t1t3c_priv;
t1t3c_priv = (double *)malloc(sizeof(double) * t1size);
for (it=0; it< t1size; it++){
t1t3c_priv[it] = 0.0;
}
int i;
#pragma omp for reduction(+ : norm)
for (i=0; i<omp_get_num_threads(); i++){
char line[255], typ[4];
//char *ptr;
char s0[20]="t3.";
char s1[4];
sprintf(s1, "%d", i);
char* filename = strcat(s0,s1);
FILE *fp = fopen(filename, "r");
//printf ("filename = %s\n",filename);
fp = fopen(filename, "r");
if (fp) {
while ( !feof(fp) ){
fscanf(fp, "%c%c%c,%s\n", &(typ[0]), &(typ[1]), &(typ[2]), line);
fscanf(fp, "%lf\n", &t3);
if (strncmp(typ, "aaa", 3) == 0 && fabs(t3) > numzero){
sscanf(line,"%d,%d,%d,%d,%d,%d",&p,&q,&r,&t,&u,&v);
norm += 2.0*t3*t3;
p += nc;
q += nc;
r += nc;
t += - nocc + nc;
u += - nocc + nc;
v += - nocc + nc;
for (itmp = 0; itmp < nocc+nvir; itmp++)
det_str[itmp] = Refdet[itmp];
det_str[p] = 2;
det_str[q] = 2;
det_str[r] = 2;
det_str[t+nocc] = 1;
det_str[u+nocc] = 1;
det_str[v+nocc] = 1;
//parity = parity_ab_str(det_str, nocc+nvir);
parity = parity_ci_to_cc(p+q+r, 3, nocc);
// interm norm of c3
t3 = parity * t3 / c0;
// extract t3
t3-= t1xt2aaa (p, q, r, t, u, v, nocc, nvir, t1, t2aa);
t3-= t1xt1xt1aaa (p, q, r, t, u, v, nocc, nvir, t1);
scale = 1.0;
t1t3c_priv[S(p,t,nvir)] += (e2ovov[De(q,u,r,v,nocc,nvir)]-e2ovov[De(r,u,q,v,nocc,nvir)]) * t3 * scale;
t1t3c_priv[S(q,t,nvir)] -= (e2ovov[De(p,u,r,v,nocc,nvir)]-e2ovov[De(r,u,p,v,nocc,nvir)]) * t3 * scale;
t1t3c_priv[S(r,t,nvir)] += (e2ovov[De(p,u,q,v,nocc,nvir)]-e2ovov[De(q,u,p,v,nocc,nvir)]) * t3 * scale;
t1t3c_priv[S(p,u,nvir)] -= (e2ovov[De(q,t,r,v,nocc,nvir)]-e2ovov[De(r,t,q,v,nocc,nvir)]) * t3 * scale;
t1t3c_priv[S(q,u,nvir)] += (e2ovov[De(p,t,r,v,nocc,nvir)]-e2ovov[De(r,t,p,v,nocc,nvir)]) * t3 * scale;
t1t3c_priv[S(r,u,nvir)] -= (e2ovov[De(p,t,q,v,nocc,nvir)]-e2ovov[De(q,t,p,v,nocc,nvir)]) * t3 * scale;
t1t3c_priv[S(p,v,nvir)] += (e2ovov[De(q,t,r,u,nocc,nvir)]-e2ovov[De(r,t,q,u,nocc,nvir)]) * t3 * scale;
t1t3c_priv[S(q,v,nvir)] -= (e2ovov[De(p,t,r,u,nocc,nvir)]-e2ovov[De(r,t,p,u,nocc,nvir)]) * t3 * scale;
t1t3c_priv[S(r,v,nvir)] += (e2ovov[De(p,t,q,u,nocc,nvir)]-e2ovov[De(q,t,p,u,nocc,nvir)]) * t3 * scale;
}
else if (strncmp(typ, "aab", 3) == 0 && fabs(t3) > numzero){
sscanf(line,"%d,%d,%d,%d,%d,%d",&p,&q,&t,&u,&r,&v);
//printf("'%s, %d, %d, %d, %d, %d, %d, %15.8f'\n", typ, p,q,t,u,r,v,t3);
norm += 2.0*t3*t3;
p += nc;
q += nc;
r += nc;
t += - nocc + nc;
u += - nocc + nc;
v += - nocc + nc;
//lsh test
//if(!(p == 2 && q == 3 && r == 3 && \
// t == 0 && u == 1 && v == 1)) continue;
//printf("c3 in OTF: %15.8f\n",t3);
for (itmp = 0; itmp < nocc+nvir; itmp++)
det_str[itmp] = Refdet[itmp];
det_str[p] = 2;
det_str[q] = 2;
det_str[t+nocc] = 1;
det_str[u+nocc] = 1;
if (p != r && q != r) det_str[r] = 1;
else det_str[r] = 0;
if (t != v && u != v) det_str[v+nocc] = 2;
else det_str[v+nocc] = 3;
//parity = parity_ab_str(det_str, nocc+nvir);
parity = parity_ci_to_cc(p+q, 2, nocc);
parity *= parity_ci_to_cc(r, 1, nocc);
// interm norm of c3
t3 = parity * t3 / c0;
// extract t3
t3-= t1xt2aab(p, q, r, t, u, v, nocc, nvir, t1, t2aa, t2ab);
t3-= t1xt1xt1aab(p, q, r, t, u, v, nocc, nvir, t1);
//printf("t3 in OTF: %15.8f\n",t3);
t1t3c_priv[S(r,v,nvir)] += (e2ovov[De(p,t,q,u,nocc,nvir)] - e2ovov[De(p,u,q,t,nocc,nvir)]) * t3;
scale = 1.0;
if (r<q && v<u) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
if (r<p && v<u) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
if (r<q && v<t) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
if (r<p && v<t) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
if (q<r && u<v) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t3 * scale;
if (p<r && u<v) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t3 * scale;
if (q<r && t<v) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t3 * scale;
if (p<r && t<v) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t3 * scale;
scale = 0.5;
if (r==q && v<u) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
if (r==p && v<u) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
if (r==q && v<t) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
if (r==p && v<t) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
if (q==r && u<v) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t3 * scale;
if (p==r && u<v) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t3 * scale;
if (q==r && t<v) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t3 * scale;
if (p==r && t<v) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t3 * scale;
if (r<q && v==u) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
if (r<p && v==u) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
if (r<q && v==t) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
if (r<p && v==t) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
if (q<r && u==v) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t3 * scale;
if (p<r && u==v) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t3 * scale;
if (q<r && t==v) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t3 * scale;
if (p<r && t==v) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t3 * scale;
scale = 0.25;
if (r==q && v==u) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
if (r==p && v==u) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
if (r==q && v==t) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
if (r==p && v==t) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
if (q==r && u==v) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t3 * scale;
if (p==r && u==v) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t3 * scale;
if (q==r && t==v) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t3 * scale;
if (p==r && t==v) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t3 * scale;
scale = 1.0;
if (r<q && u<v) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
if (r<p && u<v) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
if (r<q && t<v) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
if (r<p && t<v) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
if (q<r && v<u) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t3 * scale;
if (p<r && v<u) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t3 * scale;
if (q<r && v<t) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t3 * scale;
if (p<r && v<t) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t3 * scale;
scale = 0.5;
if (r==q && u<v) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
if (r==p && u<v) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
if (r==q && t<v) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
if (r==p && t<v) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
if (q==r && v<u) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t3 * scale;
if (p==r && v<u) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t3 * scale;
if (q==r && v<t) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t3 * scale;
if (p==r && v<t) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t3 * scale;
if (r<q && u==v) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
if (r<p && u==v) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
if (r<q && t==v) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
if (r<p && t==v) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
if (q<r && v==u) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t3 * scale;
if (p<r && v==u) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t3 * scale;
if (q<r && v==t) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t3 * scale;
if (p<r && v==t) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t3 * scale;
scale = 0.25;
if (r==q && u==v) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(r,v,q,u,nocc,nvir)] * t3 * scale;
if (r==p && u==v) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(r,v,p,u,nocc,nvir)] * t3 * scale;
if (r==q && t==v) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(r,v,q,t,nocc,nvir)] * t3 * scale;
if (r==p && t==v) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(r,v,p,t,nocc,nvir)] * t3 * scale;
if (q==r && v==u) t1t3c_priv[S(p,t,nvir)] += e2ovov[De(q,u,r,v,nocc,nvir)] * t3 * scale;
if (p==r && v==u) t1t3c_priv[S(q,t,nvir)] -= e2ovov[De(p,u,r,v,nocc,nvir)] * t3 * scale;
if (q==r && v==t) t1t3c_priv[S(p,u,nvir)] -= e2ovov[De(q,t,r,v,nocc,nvir)] * t3 * scale;
if (p==r && v==t) t1t3c_priv[S(q,u,nvir)] += e2ovov[De(p,t,r,v,nocc,nvir)] * t3 * scale;
}
}
fclose(fp);
}
else
{
// error message
}
}
#pragma omp critical
{
for (it=0; it< nocc; it++){
for (at=0; at< nvir; at++){
t1t3c[S(it,at,nvir)] += t1t3c_priv[S(it,at,nvir)];
}
}
free(t1t3c_priv);
}
}
//printf (" 0SDT (T) = %f ( %f )\n", norm, norm-norm0SD);
}
void t2t3c_shci(double *t2_t3t4c, double *t1, double *t2aa, double *t2ab, double *tmp1, double *tmp2, double *tmp3, const int nc, const int nocc, const int nvir, const double numzero, const double c0)
{
int p, q, r, t, u, v, itmp, it, jt, at, bt;
double t3, parity, scale;
uint8_t Refdet[nocc+nvir], det_str[nocc+nvir];
for (itmp = 0; itmp < nocc+nvir; itmp++){
if (itmp<nocc) Refdet[itmp] = 3;
else Refdet[itmp] = 0;
}
// double ****tmp;
// tmp = (double ****)malloc(sizeof(double ***) * nocc);
// for (it=0; it< nocc; it++){
// tmp[it] = (double ***)malloc(sizeof(double **) * nocc);
// for (jt=0; jt< nocc; jt++){
// tmp[it][jt] = (double **)malloc(sizeof(double *) * nvir);
// for (at=0; at< nvir; at++){
// tmp[it][jt][at] = (double *)malloc(sizeof(double) * nvir);
// }
// }
// }
FILE *fp;
char line[255], typ[4], tmpc[255];
char *ptr;
fp = fopen("CIcoeff_shci.out", "r");
fgets(line, 255, fp);
// //test
// int kt;
// printf("tmp3 in c\n");
// for (it=0; it< nocc; it++)
// for (at=0; at< nvir; at++)
// for (bt=0; bt< nvir; bt++)
// for (ct=0; ct< nvir; ct++)
// printf("%d %d %d %d %15.8f \n",it,at,bt,ct,tmp3[Dtmp34(it,at,bt,ct,nocc,nvir)]);
if (fp) {
while ( !feof(fp) ){
fgets(line, 255, fp);
ptr = strtok(line, ",");
it=0;
while(ptr != NULL){
if(it==0){
strcpy(typ, ptr);
}
if (it>0 && strlen(typ)==3 && strncmp(typ, "aab", 3)==0){
strcpy(tmpc, ptr);
if(it==1) p = atoi(tmpc);
if(it==2) q = atoi(tmpc);
if(it==3) t = atoi(tmpc);
if(it==4) u = atoi(tmpc);
if(it==5) r = atoi(tmpc);
if(it==6) v = atoi(tmpc);
if(it==7) t3= atof(tmpc);
}
if (it>7) break;
ptr = strtok(NULL, ",");
it++;
}
if (strlen(typ)==3 && strncmp(typ, "aab", 3) == 0 && fabs(t3) > numzero){
//printf("'%s, %d, %d, %d, %d, %d, %d, %15.8f'\n", typ, p,q,t,u,r,v,t3);
p += nc;
q += nc;
r += nc;
t += - nocc + nc;
u += - nocc + nc;
v += - nocc + nc;
// //lsh test
// if(!(p == 2 && q == 3 && r == 3 && \
// t == 0 && u == 1 && v == 1)) continue;
// //printf("c3 in OTF: %15.8f\n",t3);
for (itmp = 0; itmp < nocc+nvir; itmp++)
det_str[itmp] = Refdet[itmp];
det_str[p] = 2;
det_str[q] = 2;
det_str[t+nocc] = 1;
det_str[u+nocc] = 1;
if (p != r && q != r) det_str[r] = 1;
else det_str[r] = 0;
if (t != v && u != v) det_str[v+nocc] = 2;
else det_str[v+nocc] = 3;
//parity = parity_ab_str(det_str, nocc+nvir);
parity = parity_ci_to_cc(p+q, 2, nocc);
parity *= parity_ci_to_cc(r, 1, nocc);
// interm norm of c3
t3 = parity * t3 / c0;
// extract t3
t3-= t1xt2aab(p, q, r, t, u, v, nocc, nvir, t1, t2aa, t2ab);
t3-= t1xt1xt1aab(p, q, r, t, u, v, nocc, nvir, t1);
//printf("t3 in OTF: %15.8f\n",t3);
// for (it=0; it< nocc; it++)
// for (jt=0; jt< nocc; jt++)
// for (at=0; at< nvir; at++)
// for (bt=0; bt< nvir; bt++)
// tmp[it][jt][at][bt] = 0.0;
for (it=0; it< nocc; it++){
t2_t3t4c[D(it,r,u,v,nocc,nvir)] -= tmp1[Dtmp1(q,it,p,t,nocc,nvir)] * t3;
t2_t3t4c[D(it,r,u,v,nocc,nvir)] += tmp1[Dtmp1(p,it,q,t,nocc,nvir)] * t3;
t2_t3t4c[D(it,r,t,v,nocc,nvir)] += tmp1[Dtmp1(q,it,p,u,nocc,nvir)] * t3;
t2_t3t4c[D(it,r,t,v,nocc,nvir)] -= tmp1[Dtmp1(p,it,q,u,nocc,nvir)] * t3;
t2_t3t4c[D(it,q,v,u,nocc,nvir)] -= tmp1[Dtmp1(r,it,p,t,nocc,nvir)] * t3;
t2_t3t4c[D(it,p,v,u,nocc,nvir)] += tmp1[Dtmp1(r,it,q,t,nocc,nvir)] * t3;
t2_t3t4c[D(it,q,v,t,nocc,nvir)] += tmp1[Dtmp1(r,it,p,u,nocc,nvir)] * t3;
t2_t3t4c[D(it,p,v,t,nocc,nvir)] -= tmp1[Dtmp1(r,it,q,u,nocc,nvir)] * t3;
t2_t3t4c[D(q,it,u,v,nocc,nvir)] -= tmp1[Dtmp1(r,it,p,t,nocc,nvir)] * t3;
t2_t3t4c[D(p,it,u,v,nocc,nvir)] += tmp1[Dtmp1(r,it,q,t,nocc,nvir)] * t3;
t2_t3t4c[D(q,it,t,v,nocc,nvir)] += tmp1[Dtmp1(r,it,p,u,nocc,nvir)] * t3;
t2_t3t4c[D(p,it,t,v,nocc,nvir)] -= tmp1[Dtmp1(r,it,q,u,nocc,nvir)] * t3;
t2_t3t4c[D(r,it,v,u,nocc,nvir)] -= tmp1[Dtmp1(q,it,p,t,nocc,nvir)] * t3;
t2_t3t4c[D(r,it,v,u,nocc,nvir)] += tmp1[Dtmp1(p,it,q,t,nocc,nvir)] * t3;
t2_t3t4c[D(r,it,v,t,nocc,nvir)] += tmp1[Dtmp1(q,it,p,u,nocc,nvir)] * t3;
t2_t3t4c[D(r,it,v,t,nocc,nvir)] -= tmp1[Dtmp1(p,it,q,u,nocc,nvir)] * t3;
}
for (at = 0; at < nvir; at++){
t2_t3t4c[D(q,r,u,at,nocc,nvir)] += tmp2[Dtmp2(p,t,at,v,nvir)] * t3;
t2_t3t4c[D(p,r,u,at,nocc,nvir)] -= tmp2[Dtmp2(q,t,at,v,nvir)] * t3;
t2_t3t4c[D(q,r,t,at,nocc,nvir)] -= tmp2[Dtmp2(p,u,at,v,nvir)] * t3;
t2_t3t4c[D(p,r,t,at,nocc,nvir)] += tmp2[Dtmp2(q,u,at,v,nvir)] * t3;
t2_t3t4c[D(r,q,v,at,nocc,nvir)] += tmp2[Dtmp2(p,t,at,u,nvir)] * t3;
t2_t3t4c[D(r,p,v,at,nocc,nvir)] -= tmp2[Dtmp2(q,t,at,u,nvir)] * t3;
t2_t3t4c[D(r,q,v,at,nocc,nvir)] -= tmp2[Dtmp2(p,u,at,t,nvir)] * t3;
t2_t3t4c[D(r,p,v,at,nocc,nvir)] += tmp2[Dtmp2(q,u,at,t,nvir)] * t3;
t2_t3t4c[D(r,q,at,u,nocc,nvir)] += tmp2[Dtmp2(p,t,at,v,nvir)] * t3;
t2_t3t4c[D(r,p,at,u,nocc,nvir)] -= tmp2[Dtmp2(q,t,at,v,nvir)] * t3;
t2_t3t4c[D(r,q,at,t,nocc,nvir)] -= tmp2[Dtmp2(p,u,at,v,nvir)] * t3;
t2_t3t4c[D(r,p,at,t,nocc,nvir)] += tmp2[Dtmp2(q,u,at,v,nvir)] * t3;
t2_t3t4c[D(q,r,at,v,nocc,nvir)] += tmp2[Dtmp2(p,t,at,u,nvir)] * t3;
t2_t3t4c[D(p,r,at,v,nocc,nvir)] -= tmp2[Dtmp2(q,t,at,u,nvir)] * t3;
t2_t3t4c[D(q,r,at,v,nocc,nvir)] -= tmp2[Dtmp2(p,u,at,t,nvir)] * t3;
t2_t3t4c[D(p,r,at,v,nocc,nvir)] += tmp2[Dtmp2(q,u,at,t,nvir)] * t3;
}
t2_t3t4c[D(q,r,u,v,nocc,nvir)] += tmp3[S(p,t,nvir)] * t3;
t2_t3t4c[D(p,r,u,v,nocc,nvir)] -= tmp3[S(q,t,nvir)] * t3;
t2_t3t4c[D(q,r,t,v,nocc,nvir)] -= tmp3[S(p,u,nvir)] * t3;
t2_t3t4c[D(p,r,t,v,nocc,nvir)] += tmp3[S(q,u,nvir)] * t3;
t2_t3t4c[D(r,q,v,u,nocc,nvir)] += tmp3[S(p,t,nvir)] * t3;
t2_t3t4c[D(r,p,v,u,nocc,nvir)] -= tmp3[S(q,t,nvir)] * t3;
t2_t3t4c[D(r,q,v,t,nocc,nvir)] -= tmp3[S(p,u,nvir)] * t3;
t2_t3t4c[D(r,p,v,t,nocc,nvir)] += tmp3[S(q,u,nvir)] * t3;
}
}
fclose(fp);
}
else
{
// error message
}
// for (it=0; it< nocc; it++)
// for (jt=0; jt< nocc; jt++)
// for (at=0; at< nvir; at++)
// for (bt=0; bt< nvir; bt++)
// tmp[it][jt][at][bt] = t2_t3t4c[D(it,jt,at,bt,nocc,nvir)] + t2_t3t4c[D(jt,it,at,bt,nocc,nvir)];
//
// for (it=0; it< nocc; it++)
// for (jt=0; jt< nocc; jt++)
// for (at=0; at< nvir; at++)
// for (bt=0; bt< nvir; bt++)
// t2_t3t4c[D(it,jt,at,bt,nocc,nvir)] = tmp[it][jt][at][bt];
// for (it=0; it< nocc; it++){
// for (jt=0; jt< nocc; jt++){
// for (at=0; at< nvir; at++){
// free(tmp[it][jt][at]);
// }
// free(tmp[it][jt]);
// }
// free(tmp[it]);
// }
// free(tmp);
}
void t2t3c_dmrg_omp(double *t2_t3t4c, double *t1, double *t2aa, double *t2ab, double *tmp1, double *tmp2, double *tmp3, const int nc, const int nocc, const int nvir, const double numzero, const double c0)
{
const int t2size = nocc*nocc*nvir*nvir;
#pragma omp parallel default(none) \
shared(t1, t2aa, t2ab, tmp1, tmp2, tmp3, t2_t3t4c)
{
int p, q, r, t, u, v, itmp, it, jt, at, bt;
double t3, parity, scale;
uint8_t Refdet[nocc+nvir], det_str[nocc+nvir];
for (itmp = 0; itmp < nocc+nvir; itmp++){
if (itmp<nocc) Refdet[itmp] = 3;
else Refdet[itmp] = 0;
}
double *t2_t3t4c_priv;
t2_t3t4c_priv = (double *)malloc(sizeof(double) * t2size);
for (it=0; it< t2size; it++){
t2_t3t4c_priv [it] = 0.0;
}
int i;
#pragma omp for
for (i=0; i<omp_get_num_threads(); i++){
char line[255], typ[4];
//char *ptr;
char s0[20]="t3.";
char s1[4];
sprintf(s1, "%d", i);
char* filename = strcat(s0,s1);
FILE *fp = fopen(filename, "r");
//printf ("filename = %s\n",filename);
fp = fopen(filename, "r");
if (fp) {
while ( !feof(fp) ){
fscanf(fp, "%c%c%c,%s\n", &(typ[0]), &(typ[1]), &(typ[2]), line);
fscanf(fp, "%lf\n", &t3);
if (strncmp(typ, "aab", 3) == 0 && fabs(t3) > numzero){
sscanf(line,"%d,%d,%d,%d,%d,%d",&p,&q,&t,&u,&r,&v);
p += nc;
q += nc;
r += nc;
t += - nocc + nc;
u += - nocc + nc;
v += - nocc + nc;
for (itmp = 0; itmp < nocc+nvir; itmp++)
det_str[itmp] = Refdet[itmp];
det_str[p] = 1;
det_str[q] = 1;
det_str[t+nocc] = 2;
det_str[u+nocc] = 2;
if (p != r && q != r) det_str[r] = 2;
else det_str[r] = 0;
if (t != v && u != v) det_str[v+nocc] = 1;
else det_str[v+nocc] = 3;
//parity = parity_ab_str(det_str, nocc+nvir);
parity = parity_ci_to_cc(p+q, 2, nocc);
parity *= parity_ci_to_cc(r, 1, nocc);
// interm norm of c3
t3 = parity * t3 / c0;
// extract t3
t3-= t1xt2aab(p, q, r, t, u, v, nocc, nvir, t1, t2aa, t2ab);
t3-= t1xt1xt1aab(p, q, r, t, u, v, nocc, nvir, t1);
for (it=0; it< nocc; it++){
t2_t3t4c_priv[D(it,r,u,v,nocc,nvir)] -= tmp1[Dtmp1(q,it,p,t,nocc,nvir)] * t3;
t2_t3t4c_priv[D(it,r,u,v,nocc,nvir)] += tmp1[Dtmp1(p,it,q,t,nocc,nvir)] * t3;
t2_t3t4c_priv[D(it,r,t,v,nocc,nvir)] += tmp1[Dtmp1(q,it,p,u,nocc,nvir)] * t3;
t2_t3t4c_priv[D(it,r,t,v,nocc,nvir)] -= tmp1[Dtmp1(p,it,q,u,nocc,nvir)] * t3;
t2_t3t4c_priv[D(it,q,v,u,nocc,nvir)] -= tmp1[Dtmp1(r,it,p,t,nocc,nvir)] * t3;
t2_t3t4c_priv[D(it,p,v,u,nocc,nvir)] += tmp1[Dtmp1(r,it,q,t,nocc,nvir)] * t3;
t2_t3t4c_priv[D(it,q,v,t,nocc,nvir)] += tmp1[Dtmp1(r,it,p,u,nocc,nvir)] * t3;
t2_t3t4c_priv[D(it,p,v,t,nocc,nvir)] -= tmp1[Dtmp1(r,it,q,u,nocc,nvir)] * t3;
t2_t3t4c_priv[D(q,it,u,v,nocc,nvir)] -= tmp1[Dtmp1(r,it,p,t,nocc,nvir)] * t3;
t2_t3t4c_priv[D(p,it,u,v,nocc,nvir)] += tmp1[Dtmp1(r,it,q,t,nocc,nvir)] * t3;
t2_t3t4c_priv[D(q,it,t,v,nocc,nvir)] += tmp1[Dtmp1(r,it,p,u,nocc,nvir)] * t3;
t2_t3t4c_priv[D(p,it,t,v,nocc,nvir)] -= tmp1[Dtmp1(r,it,q,u,nocc,nvir)] * t3;
t2_t3t4c_priv[D(r,it,v,u,nocc,nvir)] -= tmp1[Dtmp1(q,it,p,t,nocc,nvir)] * t3;
t2_t3t4c_priv[D(r,it,v,u,nocc,nvir)] += tmp1[Dtmp1(p,it,q,t,nocc,nvir)] * t3;
t2_t3t4c_priv[D(r,it,v,t,nocc,nvir)] += tmp1[Dtmp1(q,it,p,u,nocc,nvir)] * t3;
t2_t3t4c_priv[D(r,it,v,t,nocc,nvir)] -= tmp1[Dtmp1(p,it,q,u,nocc,nvir)] * t3;
}
for (at = 0; at < nvir; at++){
t2_t3t4c_priv[D(q,r,u,at,nocc,nvir)] += tmp2[Dtmp2(p,t,at,v,nvir)] * t3;
t2_t3t4c_priv[D(p,r,u,at,nocc,nvir)] -= tmp2[Dtmp2(q,t,at,v,nvir)] * t3;
t2_t3t4c_priv[D(q,r,t,at,nocc,nvir)] -= tmp2[Dtmp2(p,u,at,v,nvir)] * t3;
t2_t3t4c_priv[D(p,r,t,at,nocc,nvir)] += tmp2[Dtmp2(q,u,at,v,nvir)] * t3;
t2_t3t4c_priv[D(r,q,v,at,nocc,nvir)] += tmp2[Dtmp2(p,t,at,u,nvir)] * t3;
t2_t3t4c_priv[D(r,p,v,at,nocc,nvir)] -= tmp2[Dtmp2(q,t,at,u,nvir)] * t3;
t2_t3t4c_priv[D(r,q,v,at,nocc,nvir)] -= tmp2[Dtmp2(p,u,at,t,nvir)] * t3;
t2_t3t4c_priv[D(r,p,v,at,nocc,nvir)] += tmp2[Dtmp2(q,u,at,t,nvir)] * t3;
t2_t3t4c_priv[D(r,q,at,u,nocc,nvir)] += tmp2[Dtmp2(p,t,at,v,nvir)] * t3;
t2_t3t4c_priv[D(r,p,at,u,nocc,nvir)] -= tmp2[Dtmp2(q,t,at,v,nvir)] * t3;
t2_t3t4c_priv[D(r,q,at,t,nocc,nvir)] -= tmp2[Dtmp2(p,u,at,v,nvir)] * t3;
t2_t3t4c_priv[D(r,p,at,t,nocc,nvir)] += tmp2[Dtmp2(q,u,at,v,nvir)] * t3;
t2_t3t4c_priv[D(q,r,at,v,nocc,nvir)] += tmp2[Dtmp2(p,t,at,u,nvir)] * t3;
t2_t3t4c_priv[D(p,r,at,v,nocc,nvir)] -= tmp2[Dtmp2(q,t,at,u,nvir)] * t3;
t2_t3t4c_priv[D(q,r,at,v,nocc,nvir)] -= tmp2[Dtmp2(p,u,at,t,nvir)] * t3;
t2_t3t4c_priv[D(p,r,at,v,nocc,nvir)] += tmp2[Dtmp2(q,u,at,t,nvir)] * t3;
}
t2_t3t4c_priv[D(q,r,u,v,nocc,nvir)] += tmp3[S(p,t,nvir)] * t3;
t2_t3t4c_priv[D(p,r,u,v,nocc,nvir)] -= tmp3[S(q,t,nvir)] * t3;
t2_t3t4c_priv[D(q,r,t,v,nocc,nvir)] -= tmp3[S(p,u,nvir)] * t3;
t2_t3t4c_priv[D(p,r,t,v,nocc,nvir)] += tmp3[S(q,u,nvir)] * t3;
t2_t3t4c_priv[D(r,q,v,u,nocc,nvir)] += tmp3[S(p,t,nvir)] * t3;
t2_t3t4c_priv[D(r,p,v,u,nocc,nvir)] -= tmp3[S(q,t,nvir)] * t3;
t2_t3t4c_priv[D(r,q,v,t,nocc,nvir)] -= tmp3[S(p,u,nvir)] * t3;
t2_t3t4c_priv[D(r,p,v,t,nocc,nvir)] += tmp3[S(q,u,nvir)] * t3;
}
}
fclose(fp);
}
else
{
// error message
}
}
#pragma omp critical
{
for (it=0; it< nocc; it++){
for (jt=0; jt< nocc; jt++){
for (at=0; at< nvir; at++){
for (bt=0; bt< nvir; bt++){
t2_t3t4c[D(it,jt,at,bt,nocc,nvir)] += t2_t3t4c_priv[D(it,jt,at,bt,nocc,nvir)];
}
}
}
}
free(t2_t3t4c_priv);
}
}
}
void t2t3c_shci_omp(double *t2_t3t4c, double *t1, double *t2aa, double *t2ab, double *tmp1, double *tmp2, double *tmp3, const int nc, const int nocc, const int nvir, const double numzero, const double c0)
{
const int t2size = nocc*nocc*nvir*nvir;
#pragma omp parallel default(none) \
shared(t1, t2aa, t2ab, tmp1, tmp2, tmp3, t2_t3t4c)
{
int p, q, r, t, u, v, itmp, it, jt, at, bt;
double t3, parity, scale;
uint8_t Refdet[nocc+nvir], det_str[nocc+nvir];
for (itmp = 0; itmp < nocc+nvir; itmp++){
if (itmp<nocc) Refdet[itmp] = 3;
else Refdet[itmp] = 0;
}
double *t2_t3t4c_priv;
t2_t3t4c_priv = (double *)malloc(sizeof(double) * t2size);
for (it=0; it< t2size; it++){
t2_t3t4c_priv [it] = 0.0;
}
int i;
#pragma omp for
for (i=0; i<omp_get_num_threads(); i++){
char line[255], typ[4];
//char *ptr;
char s0[20]="t3.";
char s1[4];
sprintf(s1, "%d", i);
char* filename = strcat(s0,s1);
FILE *fp = fopen(filename, "r");
//printf ("filename = %s\n",filename);
fp = fopen(filename, "r");
if (fp) {
while ( !feof(fp) ){
fscanf(fp, "%c%c%c,%s\n", &(typ[0]), &(typ[1]), &(typ[2]), line);
fscanf(fp, "%lf\n", &t3);
if (strncmp(typ, "aab", 3) == 0 && fabs(t3) > numzero){
sscanf(line,"%d,%d,%d,%d,%d,%d",&p,&q,&t,&u,&r,&v);
p += nc;
q += nc;
r += nc;
t += - nocc + nc;
u += - nocc + nc;
v += - nocc + nc;
for (itmp = 0; itmp < nocc+nvir; itmp++)
det_str[itmp] = Refdet[itmp];
det_str[p] = 2;
det_str[q] = 2;
det_str[t+nocc] = 1;
det_str[u+nocc] = 1;
if (p != r && q != r) det_str[r] = 1;
else det_str[r] = 0;
if (t != v && u != v) det_str[v+nocc] = 2;
else det_str[v+nocc] = 3;
//parity = parity_ab_str(det_str, nocc+nvir);
parity = parity_ci_to_cc(p+q, 2, nocc);
parity *= parity_ci_to_cc(r, 1, nocc);
// interm norm of c3
t3 = parity * t3 / c0;
// extract t3
t3-= t1xt2aab(p, q, r, t, u, v, nocc, nvir, t1, t2aa, t2ab);
t3-= t1xt1xt1aab(p, q, r, t, u, v, nocc, nvir, t1);
for (it=0; it< nocc; it++){
t2_t3t4c_priv[D(it,r,u,v,nocc,nvir)] -= tmp1[Dtmp1(q,it,p,t,nocc,nvir)] * t3;
t2_t3t4c_priv[D(it,r,u,v,nocc,nvir)] += tmp1[Dtmp1(p,it,q,t,nocc,nvir)] * t3;
t2_t3t4c_priv[D(it,r,t,v,nocc,nvir)] += tmp1[Dtmp1(q,it,p,u,nocc,nvir)] * t3;
t2_t3t4c_priv[D(it,r,t,v,nocc,nvir)] -= tmp1[Dtmp1(p,it,q,u,nocc,nvir)] * t3;
t2_t3t4c_priv[D(it,q,v,u,nocc,nvir)] -= tmp1[Dtmp1(r,it,p,t,nocc,nvir)] * t3;
t2_t3t4c_priv[D(it,p,v,u,nocc,nvir)] += tmp1[Dtmp1(r,it,q,t,nocc,nvir)] * t3;
t2_t3t4c_priv[D(it,q,v,t,nocc,nvir)] += tmp1[Dtmp1(r,it,p,u,nocc,nvir)] * t3;
t2_t3t4c_priv[D(it,p,v,t,nocc,nvir)] -= tmp1[Dtmp1(r,it,q,u,nocc,nvir)] * t3;
t2_t3t4c_priv[D(q,it,u,v,nocc,nvir)] -= tmp1[Dtmp1(r,it,p,t,nocc,nvir)] * t3;
t2_t3t4c_priv[D(p,it,u,v,nocc,nvir)] += tmp1[Dtmp1(r,it,q,t,nocc,nvir)] * t3;
t2_t3t4c_priv[D(q,it,t,v,nocc,nvir)] += tmp1[Dtmp1(r,it,p,u,nocc,nvir)] * t3;
t2_t3t4c_priv[D(p,it,t,v,nocc,nvir)] -= tmp1[Dtmp1(r,it,q,u,nocc,nvir)] * t3;
t2_t3t4c_priv[D(r,it,v,u,nocc,nvir)] -= tmp1[Dtmp1(q,it,p,t,nocc,nvir)] * t3;
t2_t3t4c_priv[D(r,it,v,u,nocc,nvir)] += tmp1[Dtmp1(p,it,q,t,nocc,nvir)] * t3;
t2_t3t4c_priv[D(r,it,v,t,nocc,nvir)] += tmp1[Dtmp1(q,it,p,u,nocc,nvir)] * t3;
t2_t3t4c_priv[D(r,it,v,t,nocc,nvir)] -= tmp1[Dtmp1(p,it,q,u,nocc,nvir)] * t3;
}
for (at = 0; at < nvir; at++){
t2_t3t4c_priv[D(q,r,u,at,nocc,nvir)] += tmp2[Dtmp2(p,t,at,v,nvir)] * t3;
t2_t3t4c_priv[D(p,r,u,at,nocc,nvir)] -= tmp2[Dtmp2(q,t,at,v,nvir)] * t3;
t2_t3t4c_priv[D(q,r,t,at,nocc,nvir)] -= tmp2[Dtmp2(p,u,at,v,nvir)] * t3;
t2_t3t4c_priv[D(p,r,t,at,nocc,nvir)] += tmp2[Dtmp2(q,u,at,v,nvir)] * t3;
t2_t3t4c_priv[D(r,q,v,at,nocc,nvir)] += tmp2[Dtmp2(p,t,at,u,nvir)] * t3;
t2_t3t4c_priv[D(r,p,v,at,nocc,nvir)] -= tmp2[Dtmp2(q,t,at,u,nvir)] * t3;
t2_t3t4c_priv[D(r,q,v,at,nocc,nvir)] -= tmp2[Dtmp2(p,u,at,t,nvir)] * t3;
t2_t3t4c_priv[D(r,p,v,at,nocc,nvir)] += tmp2[Dtmp2(q,u,at,t,nvir)] * t3;
t2_t3t4c_priv[D(r,q,at,u,nocc,nvir)] += tmp2[Dtmp2(p,t,at,v,nvir)] * t3;
t2_t3t4c_priv[D(r,p,at,u,nocc,nvir)] -= tmp2[Dtmp2(q,t,at,v,nvir)] * t3;
t2_t3t4c_priv[D(r,q,at,t,nocc,nvir)] -= tmp2[Dtmp2(p,u,at,v,nvir)] * t3;
t2_t3t4c_priv[D(r,p,at,t,nocc,nvir)] += tmp2[Dtmp2(q,u,at,v,nvir)] * t3;
t2_t3t4c_priv[D(q,r,at,v,nocc,nvir)] += tmp2[Dtmp2(p,t,at,u,nvir)] * t3;
t2_t3t4c_priv[D(p,r,at,v,nocc,nvir)] -= tmp2[Dtmp2(q,t,at,u,nvir)] * t3;
t2_t3t4c_priv[D(q,r,at,v,nocc,nvir)] -= tmp2[Dtmp2(p,u,at,t,nvir)] * t3;
t2_t3t4c_priv[D(p,r,at,v,nocc,nvir)] += tmp2[Dtmp2(q,u,at,t,nvir)] * t3;
}
t2_t3t4c_priv[D(q,r,u,v,nocc,nvir)] += tmp3[S(p,t,nvir)] * t3;
t2_t3t4c_priv[D(p,r,u,v,nocc,nvir)] -= tmp3[S(q,t,nvir)] * t3;
t2_t3t4c_priv[D(q,r,t,v,nocc,nvir)] -= tmp3[S(p,u,nvir)] * t3;
t2_t3t4c_priv[D(p,r,t,v,nocc,nvir)] += tmp3[S(q,u,nvir)] * t3;
t2_t3t4c_priv[D(r,q,v,u,nocc,nvir)] += tmp3[S(p,t,nvir)] * t3;
t2_t3t4c_priv[D(r,p,v,u,nocc,nvir)] -= tmp3[S(q,t,nvir)] * t3;
t2_t3t4c_priv[D(r,q,v,t,nocc,nvir)] -= tmp3[S(p,u,nvir)] * t3;
t2_t3t4c_priv[D(r,p,v,t,nocc,nvir)] += tmp3[S(q,u,nvir)] * t3;
}
}
fclose(fp);
}
else
{
// error message
}
}
#pragma omp critical
{
for (it=0; it< nocc; it++){
for (jt=0; jt< nocc; jt++){
for (at=0; at< nvir; at++){
for (bt=0; bt< nvir; bt++){
t2_t3t4c[D(it,jt,at,bt,nocc,nvir)] += t2_t3t4c_priv[D(it,jt,at,bt,nocc,nvir)];
}
}
}
}
free(t2_t3t4c_priv);
}
}
}
//void t2t3c_shci(double *t2_t3t4c, double *t1, double *t2aa, double *t2ab, double *tmp1, double *tmp2, double *tmp3, double *tmp4, double *tmp5, const int nc, const int nocc, const int nvir, const double numzero, const double c0)
//{
// int p, q, r, t, u, v, itmp, it, jt, at, bt;
// double t3, parity, scale;
// uint8_t Refdet[nocc+nvir], det_str[nocc+nvir];
// for (itmp = 0; itmp < nocc+nvir; itmp++){
// if (itmp<nocc) Refdet[itmp] = 3;
// else Refdet[itmp] = 0;
// }
//
// double ****tmp;
// tmp = (double ****)malloc(sizeof(double ***) * nocc);
// for (it=0; it< nocc; it++){
// tmp[it] = (double ***)malloc(sizeof(double **) * nocc);
// for (jt=0; jt< nocc; jt++){
// tmp[it][jt] = (double **)malloc(sizeof(double *) * nvir);
// for (at=0; at< nvir; at++){
// tmp[it][jt][at] = (double *)malloc(sizeof(double) * nvir);
// }
// }
// }
//
// FILE *fp;
// char line[255], typ[4], tmpc[255];
// char *ptr;
// fp = fopen("CIcoeff_shci.out", "r");
// fgets(line, 255, fp);
//
//// //test
//// int kt;
//// printf("tmp3 in c\n");
//// for (it=0; it< nocc; it++)
//// for (at=0; at< nvir; at++)
//// for (bt=0; bt< nvir; bt++)
//// for (ct=0; ct< nvir; ct++)
//// printf("%d %d %d %d %15.8f \n",it,at,bt,ct,tmp3[Dtmp34(it,at,bt,ct,nocc,nvir)]);
//
// if (fp) {
// while ( !feof(fp) ){
// fgets(line, 255, fp);
// ptr = strtok(line, ",");
//
// it=0;
// while(ptr != NULL){
// if(it==0){
// strcpy(typ, ptr);
// }
// if (it>0 && strlen(typ)==3 && strncmp(typ, "aab", 3)==0){
// strcpy(tmpc, ptr);
// if(it==1) p = atoi(tmpc);
// if(it==2) q = atoi(tmpc);
// if(it==3) t = atoi(tmpc);
// if(it==4) u = atoi(tmpc);
// if(it==5) r = atoi(tmpc);
// if(it==6) v = atoi(tmpc);
// if(it==7) t3= atof(tmpc);
// }
// if (it>7) break;
// ptr = strtok(NULL, ",");
// it++;
// }
//
// if (strlen(typ)==3 && strncmp(typ, "aab", 3) == 0 && fabs(t3) > numzero){
// //printf("'%s, %d, %d, %d, %d, %d, %d, %15.8f'\n", typ, p,q,t,u,r,v,t3);
//
// p += nc;
// q += nc;
// r += nc;
// t += - nocc + nc;
// u += - nocc + nc;
// v += - nocc + nc;
//
// //lsh test
// if(!(p == 2 && q == 3 && r == 3 && \
// t == 0 && u == 1 && v == 1)) continue;
// //printf("c3 in OTF: %15.8f\n",t3);
//
// for (itmp = 0; itmp < nocc+nvir; itmp++)
// det_str[itmp] = Refdet[itmp];
// det_str[p] = 2;
// det_str[q] = 2;
// det_str[t+nocc] = 1;
// det_str[u+nocc] = 1;
//
// if (p != r && q != r) det_str[r] = 1;
// else det_str[r] = 0;
// if (t != v && u != v) det_str[v+nocc] = 2;
// else det_str[v+nocc] = 3;
// parity = parity_ab_str(det_str, nocc+nvir);
// parity *= parity_ci_to_cc(p+q, 2, nocc);
// parity *= parity_ci_to_cc(r, 1, nocc);
//
// // interm norm of c3
// t3 = parity * t3 / c0;
//
// // extract t3
// t3-= t1xt2aab(p, q, r, t, u, v, nocc, nvir, t1, t2aa, t2ab);
// t3-= t1xt1xt1aab(p, q, r, t, u, v, nocc, nvir, t1);
// //printf("t3 in OTF: %15.8f\n",t3);
//
//// for (it=0; it< nocc; it++)
//// for (jt=0; jt< nocc; jt++)
//// for (at=0; at< nvir; at++)
//// for (bt=0; bt< nvir; bt++)
//// tmp[it][jt][at][bt] = 0.0;
//
// for (it=0; it< nocc; it++){
//// t2_t3t4c[D(it,r,u,v,nocc,nvir)] += tmp1[Dtmp1(p,it,q,t,nocc,nvir)] * t3;
//// t2_t3t4c[D(it,r,t,v,nocc,nvir)] -= tmp1[Dtmp1(p,it,q,u,nocc,nvir)] * t3;
//// if (r<p) t2_t3t4c[D(it,q,v,u,nocc,nvir)] -= tmp1[Dtmp1(r,it,p,t,nocc,nvir)] * t3;
//// if (r<q) t2_t3t4c[D(it,p,v,u,nocc,nvir)] += tmp1[Dtmp1(r,it,q,t,nocc,nvir)] * t3;
//// if (r<p) t2_t3t4c[D(it,q,v,t,nocc,nvir)] += tmp1[Dtmp1(r,it,p,u,nocc,nvir)] * t3;
//// if (r<q) t2_t3t4c[D(it,p,v,t,nocc,nvir)] -= tmp1[Dtmp1(r,it,q,u,nocc,nvir)] * t3;
////
//// t2_t3t4c[D(r,it,u,v,nocc,nvir)] += tmp1[Dtmp1(p,it,q,t,nocc,nvir)] * t3;
//// t2_t3t4c[D(r,it,t,v,nocc,nvir)] -= tmp1[Dtmp1(p,it,q,u,nocc,nvir)] * t3;
//// if (r<p) t2_t3t4c[D(q,it,v,u,nocc,nvir)] -= tmp1[Dtmp1(r,it,p,t,nocc,nvir)] * t3;
//// if (r<q) t2_t3t4c[D(p,it,v,u,nocc,nvir)] += tmp1[Dtmp1(r,it,q,t,nocc,nvir)] * t3;
//// if (r<p) t2_t3t4c[D(q,it,v,t,nocc,nvir)] += tmp1[Dtmp1(r,it,p,u,nocc,nvir)] * t3;
//// if (r<q) t2_t3t4c[D(p,it,v,t,nocc,nvir)] -= tmp1[Dtmp1(r,it,q,u,nocc,nvir)] * t3;
////
//// scale = 0.5;
//// if (r==p) t2_t3t4c[D(it,q,v,u,nocc,nvir)] -= tmp1[Dtmp1(r,it,p,t,nocc,nvir)] * t3 * scale;
//// if (r==q) t2_t3t4c[D(it,p,v,u,nocc,nvir)] += tmp1[Dtmp1(r,it,q,t,nocc,nvir)] * t3 * scale;
//// if (r==p) t2_t3t4c[D(it,q,v,t,nocc,nvir)] += tmp1[Dtmp1(r,it,p,u,nocc,nvir)] * t3 * scale;
//// if (r==q) t2_t3t4c[D(it,p,v,t,nocc,nvir)] -= tmp1[Dtmp1(r,it,q,u,nocc,nvir)] * t3 * scale;
//// if (r==p) t2_t3t4c[D(q,it,v,u,nocc,nvir)] -= tmp1[Dtmp1(r,it,p,t,nocc,nvir)] * t3 * scale;
//// if (r==q) t2_t3t4c[D(p,it,v,u,nocc,nvir)] += tmp1[Dtmp1(r,it,q,t,nocc,nvir)] * t3 * scale;
//// if (r==p) t2_t3t4c[D(q,it,v,t,nocc,nvir)] += tmp1[Dtmp1(r,it,p,u,nocc,nvir)] * t3 * scale;
//// if (r==q) t2_t3t4c[D(p,it,v,t,nocc,nvir)] -= tmp1[Dtmp1(r,it,q,u,nocc,nvir)] * t3 * scale;
////
//// t2_t3t4c[D(it,r,u,v,nocc,nvir)] -= tmp2[Dtmp2(p,t,q,it,nocc,nvir)] * t3;
//// t2_t3t4c[D(it,r,t,v,nocc,nvir)] += tmp2[Dtmp2(p,u,q,it,nocc,nvir)] * t3;
//// if (p<r) t2_t3t4c[D(it,q,v,u,nocc,nvir)] -= tmp2[Dtmp2(p,t,r,it,nocc,nvir)] * t3;
//// if (q<r) t2_t3t4c[D(it,p,v,u,nocc,nvir)] += tmp2[Dtmp2(q,t,r,it,nocc,nvir)] * t3;
//// if (p<r) t2_t3t4c[D(it,q,v,t,nocc,nvir)] += tmp2[Dtmp2(p,u,r,it,nocc,nvir)] * t3;
//// if (q<r) t2_t3t4c[D(it,p,v,t,nocc,nvir)] -= tmp2[Dtmp2(q,u,r,it,nocc,nvir)] * t3;
////
//// t2_t3t4c[D(r,it,u,v,nocc,nvir)] -= tmp2[Dtmp2(p,t,q,it,nocc,nvir)] * t3;
//// t2_t3t4c[D(r,it,t,v,nocc,nvir)] += tmp2[Dtmp2(p,u,q,it,nocc,nvir)] * t3;
//// if (p<r) t2_t3t4c[D(q,it,v,u,nocc,nvir)] -= tmp2[Dtmp2(p,t,r,it,nocc,nvir)] * t3;
//// if (q<r) t2_t3t4c[D(p,it,v,u,nocc,nvir)] += tmp2[Dtmp2(q,t,r,it,nocc,nvir)] * t3;
//// if (p<r) t2_t3t4c[D(q,it,v,t,nocc,nvir)] += tmp2[Dtmp2(p,u,r,it,nocc,nvir)] * t3;
//// if (q<r) t2_t3t4c[D(p,it,v,t,nocc,nvir)] -= tmp2[Dtmp2(q,u,r,it,nocc,nvir)] * t3;
////
//// scale = 0.5;
//// if (p==r) t2_t3t4c[D(it,q,v,u,nocc,nvir)] -= tmp2[Dtmp2(p,t,r,it,nocc,nvir)] * t3 * scale;
//// if (q==r) t2_t3t4c[D(it,p,v,u,nocc,nvir)] += tmp2[Dtmp2(q,t,r,it,nocc,nvir)] * t3 * scale;
//// if (p==r) t2_t3t4c[D(it,q,v,t,nocc,nvir)] += tmp2[Dtmp2(p,u,r,it,nocc,nvir)] * t3 * scale;
//// if (q==r) t2_t3t4c[D(it,p,v,t,nocc,nvir)] -= tmp2[Dtmp2(q,u,r,it,nocc,nvir)] * t3 * scale;
//// if (p==r) t2_t3t4c[D(q,it,v,u,nocc,nvir)] -= tmp2[Dtmp2(p,t,r,it,nocc,nvir)] * t3 * scale;
//// if (q==r) t2_t3t4c[D(p,it,v,u,nocc,nvir)] += tmp2[Dtmp2(q,t,r,it,nocc,nvir)] * t3 * scale;
//// if (p==r) t2_t3t4c[D(q,it,v,t,nocc,nvir)] += tmp2[Dtmp2(p,u,r,it,nocc,nvir)] * t3 * scale;
//// if (q==r) t2_t3t4c[D(p,it,v,t,nocc,nvir)] -= tmp2[Dtmp2(q,u,r,it,nocc,nvir)] * t3 * scale;
//
// }
//
// for (at = 0; at < nvir; at++){
// t2_t3t4c[D(q,r,at,v,nocc,nvir)] += tmp3[Dtmp34(p,t,at,u,nvir)] * t3;
// t2_t3t4c[D(p,r,at,v,nocc,nvir)] -= tmp3[Dtmp34(q,t,at,u,nvir)] * t3;
// if (t<v) t2_t3t4c[D(r,q,at,u,nocc,nvir)] += tmp3[Dtmp34(p,t,at,v,nvir)] * t3;
// if (t<v) t2_t3t4c[D(r,p,at,u,nocc,nvir)] -= tmp3[Dtmp34(q,t,at,v,nvir)] * t3;
// if (u<v) t2_t3t4c[D(r,q,at,t,nocc,nvir)] -= tmp3[Dtmp34(p,u,at,v,nvir)] * t3;
// if (u<v) t2_t3t4c[D(r,p,at,t,nocc,nvir)] += tmp3[Dtmp34(q,u,at,v,nvir)] * t3;
//
// t2_t3t4c[D(q,r,v,at,nocc,nvir)] += tmp3[Dtmp34(p,t,at,u,nvir)] * t3;
// t2_t3t4c[D(p,r,v,at,nocc,nvir)] -= tmp3[Dtmp34(q,t,at,u,nvir)] * t3;
// if (t<v) t2_t3t4c[D(r,q,u,at,nocc,nvir)] += tmp3[Dtmp34(p,t,at,v,nvir)] * t3;
// if (t<v) t2_t3t4c[D(r,p,u,at,nocc,nvir)] -= tmp3[Dtmp34(q,t,at,v,nvir)] * t3;
// if (u<v) t2_t3t4c[D(r,q,t,at,nocc,nvir)] -= tmp3[Dtmp34(p,u,at,v,nvir)] * t3;
// if (u<v) t2_t3t4c[D(r,p,t,at,nocc,nvir)] += tmp3[Dtmp34(q,u,at,v,nvir)] * t3;
//
// scale = 0.5;
// if (t==v) t2_t3t4c[D(r,q,at,u,nocc,nvir)] += tmp3[Dtmp34(p,t,at,v,nvir)] * t3 * scale;
// if (t==v) t2_t3t4c[D(r,p,at,u,nocc,nvir)] -= tmp3[Dtmp34(q,t,at,v,nvir)] * t3 * scale;
// if (u==v) t2_t3t4c[D(r,q,at,t,nocc,nvir)] -= tmp3[Dtmp34(p,u,at,v,nvir)] * t3 * scale;
// if (u==v) t2_t3t4c[D(r,p,at,t,nocc,nvir)] += tmp3[Dtmp34(q,u,at,v,nvir)] * t3 * scale;
// if (t==v) t2_t3t4c[D(r,q,u,at,nocc,nvir)] += tmp3[Dtmp34(p,t,at,v,nvir)] * t3 * scale;
// if (t==v) t2_t3t4c[D(r,p,u,at,nocc,nvir)] -= tmp3[Dtmp34(q,t,at,v,nvir)] * t3 * scale;
// if (u==v) t2_t3t4c[D(r,q,t,at,nocc,nvir)] -= tmp3[Dtmp34(p,u,at,v,nvir)] * t3 * scale;
// if (u==v) t2_t3t4c[D(r,p,t,at,nocc,nvir)] += tmp3[Dtmp34(q,u,at,v,nvir)] * t3 * scale;
//
// t2_t3t4c[D(q,r,at,v,nocc,nvir)] -= tmp4[Dtmp34(p,u,at,t,nvir)] * t3;
// t2_t3t4c[D(p,r,at,v,nocc,nvir)] += tmp4[Dtmp34(q,u,at,t,nvir)] * t3;
// if (v<t) t2_t3t4c[D(r,q,at,u,nocc,nvir)] += tmp4[Dtmp34(p,t,at,v,nvir)] * t3;
// if (v<t) t2_t3t4c[D(r,p,at,u,nocc,nvir)] -= tmp4[Dtmp34(q,t,at,v,nvir)] * t3;
// if (v<u) t2_t3t4c[D(r,q,at,t,nocc,nvir)] -= tmp4[Dtmp34(p,u,at,v,nvir)] * t3;
// if (v<u) t2_t3t4c[D(r,p,at,t,nocc,nvir)] += tmp4[Dtmp34(q,u,at,v,nvir)] * t3;
//
// t2_t3t4c[D(q,r,v,at,nocc,nvir)] -= tmp4[Dtmp34(p,u,at,t,nvir)] * t3;
// t2_t3t4c[D(p,r,v,at,nocc,nvir)] += tmp4[Dtmp34(q,u,at,t,nvir)] * t3;
// if (v<t) t2_t3t4c[D(r,q,u,at,nocc,nvir)] += tmp4[Dtmp34(p,t,at,v,nvir)] * t3;
// if (v<t) t2_t3t4c[D(r,p,u,at,nocc,nvir)] -= tmp4[Dtmp34(q,t,at,v,nvir)] * t3;
// if (v<u) t2_t3t4c[D(r,q,t,at,nocc,nvir)] -= tmp4[Dtmp34(p,u,at,v,nvir)] * t3;
// if (v<u) t2_t3t4c[D(r,p,t,at,nocc,nvir)] += tmp4[Dtmp34(q,u,at,v,nvir)] * t3;
//
// scale = 0.5;
// if (v==t) t2_t3t4c[D(r,q,at,u,nocc,nvir)] += tmp4[Dtmp34(p,t,at,v,nvir)] * t3 * scale;
// if (v==t) t2_t3t4c[D(r,p,at,u,nocc,nvir)] -= tmp4[Dtmp34(q,t,at,v,nvir)] * t3 * scale;
// if (v==u) t2_t3t4c[D(r,q,at,t,nocc,nvir)] -= tmp4[Dtmp34(p,u,at,v,nvir)] * t3 * scale;
// if (v==u) t2_t3t4c[D(r,p,at,t,nocc,nvir)] += tmp4[Dtmp34(q,u,at,v,nvir)] * t3 * scale;
// if (v==t) t2_t3t4c[D(r,q,u,at,nocc,nvir)] += tmp4[Dtmp34(p,t,at,v,nvir)] * t3 * scale;
// if (v==t) t2_t3t4c[D(r,p,u,at,nocc,nvir)] -= tmp4[Dtmp34(q,t,at,v,nvir)] * t3 * scale;
// if (v==u) t2_t3t4c[D(r,q,t,at,nocc,nvir)] -= tmp4[Dtmp34(p,u,at,v,nvir)] * t3 * scale;
// if (v==u) t2_t3t4c[D(r,p,t,at,nocc,nvir)] += tmp4[Dtmp34(q,u,at,v,nvir)] * t3 * scale;
// }
//
//// t2_t3t4c[D(q,r,u,v,nocc,nvir)] += tmp5[S(p,t,nvir)] * t3;
//// t2_t3t4c[D(p,r,u,v,nocc,nvir)] -= tmp5[S(q,t,nvir)] * t3;
//// t2_t3t4c[D(q,r,t,v,nocc,nvir)] -= tmp5[S(p,u,nvir)] * t3;
//// t2_t3t4c[D(p,r,t,v,nocc,nvir)] += tmp5[S(q,u,nvir)] * t3;
//// t2_t3t4c[D(r,q,v,u,nocc,nvir)] += tmp5[S(p,t,nvir)] * t3;
//// t2_t3t4c[D(r,p,v,u,nocc,nvir)] -= tmp5[S(q,t,nvir)] * t3;
//// t2_t3t4c[D(r,q,v,t,nocc,nvir)] -= tmp5[S(p,u,nvir)] * t3;
//// t2_t3t4c[D(r,p,v,t,nocc,nvir)] += tmp5[S(q,u,nvir)] * t3;
//
// }
// }
// fclose(fp);
// }
// else
// {
// // error message
// }
//
//
//// for (it=0; it< nocc; it++)
//// for (jt=0; jt< nocc; jt++)
//// for (at=0; at< nvir; at++)
//// for (bt=0; bt< nvir; bt++)
//// tmp[it][jt][at][bt] = t2_t3t4c[D(it,jt,at,bt,nocc,nvir)] + t2_t3t4c[D(jt,it,at,bt,nocc,nvir)];
////
//// for (it=0; it< nocc; it++)
//// for (jt=0; jt< nocc; jt++)
//// for (at=0; at< nvir; at++)
//// for (bt=0; bt< nvir; bt++)
//// t2_t3t4c[D(it,jt,at,bt,nocc,nvir)] = tmp[it][jt][at][bt];
//
// for (it=0; it< nocc; it++){
// for (jt=0; jt< nocc; jt++){
// for (at=0; at< nvir; at++){
// free(tmp[it][jt][at]);
// }
// free(tmp[it][jt]);
// }
// free(tmp[it]);
// }
// free(tmp);
//
//}
void c1_to_t1_mem(double *t1, double *c1, int nocc_cas, int nvir_cas, int nvir_corr, int nocc_iact)
{
int i, a, ia_c, ia_t;
ia_c = -1;
for (a = 0; a < nvir_cas; a++) {
for (i = nocc_cas-1; i > -1; i--) {
ia_c += 1;
ia_t = (i+nocc_iact) * nvir_corr + a;
t1[ia_t] = c1[ia_c];
}
}
}
void c2_to_t2_mem(double *t2aa, double *t2ab, double *c2aa, double *c2ab, double *t1, int nocc_cas, int nvir_cas, int nocc_corr, int nvir_corr, int nocc_iact, double numzero)
{
int i, j, a, b, ijab_c, ijab_t1, ijab_t2, ijab_t3, ijab_t4;
int ia, jb, iajb_c, ijab_t;
double tmp;
// t2aa
ijab_c = -1;
for (b = 1; b < nvir_cas; b++) {
for (a = 0; a < b; a++) {
for (j = nocc_cas-1; j > 0; j--) {
for (i = j-1; i > -1; i--) {
ijab_c += 1;
ijab_t1 = (((i+nocc_iact)*nocc_corr+j+nocc_iact)*nvir_corr+a)*nvir_corr+b;
ijab_t2 = (((i+nocc_iact)*nocc_corr+j+nocc_iact)*nvir_corr+b)*nvir_corr+a;
ijab_t3 = (((j+nocc_iact)*nocc_corr+i+nocc_iact)*nvir_corr+a)*nvir_corr+b;
ijab_t4 = (((j+nocc_iact)*nocc_corr+i+nocc_iact)*nvir_corr+b)*nvir_corr+a;
tmp = c2aa[ijab_c];
if(fabs(tmp) > numzero)
{
tmp -= t1xt1aa (i+nocc_iact, j+nocc_iact, a, b, nocc_corr, nvir_corr, t1);
t2aa[ijab_t1] = tmp;
t2aa[ijab_t2] = -tmp;
t2aa[ijab_t3] = -tmp;
t2aa[ijab_t4] = tmp;
}
}
}
}
}
ia = -1;
for (a = 0; a < nvir_cas; a++) {
for (i = nocc_cas-1; i > -1; i--) {
ia += 1;
jb =-1;
for (b = 0; b < nvir_cas; b++) {
for (j = nocc_cas-1; j > -1; j--) {
jb += 1;
iajb_c = ia * nocc_cas*nvir_cas + jb;
ijab_t = (((i+nocc_iact)*nocc_corr+j+nocc_iact)*nvir_corr+a)*nvir_corr+b;
tmp = c2ab[iajb_c];
if(fabs(tmp) > numzero)
{
tmp -= t1xt1ab (i+nocc_iact, j+nocc_iact, a, b, nocc_corr, nvir_corr, t1);
t2ab[ijab_t] = tmp;
}
}
}
}
}
}
void c2_to_t2_u_mem(double *t2aa, double *t2ab, double *t2bb, double *c2aa, double *c2ab, double *c2bb, double *t1a, double *t1b, int nocca_cas, int nvira_cas, int noccb_cas, int nvirb_cas, int nocca_corr, int nvira_corr, int noccb_corr, int nvirb_corr, int nocca_iact, int noccb_iact, double numzero)
{
int i, j, a, b, ijab_c, ijab_t1, ijab_t2, ijab_t3, ijab_t4;
int ia, jb, iajb_c, ijab_t;
double tmp;
// t2aa
ijab_c = -1;
for (b = 1; b < nvira_cas; b++) {
for (a = 0; a < b; a++) {
for (j = nocca_cas-1; j > 0; j--) {
for (i = j-1; i > -1; i--) {
ijab_c += 1;
ijab_t1 = (((i+nocca_iact)*nocca_corr+j+nocca_iact)*nvira_corr+a)*nvira_corr+b;
ijab_t2 = (((i+nocca_iact)*nocca_corr+j+nocca_iact)*nvira_corr+b)*nvira_corr+a;
ijab_t3 = (((j+nocca_iact)*nocca_corr+i+nocca_iact)*nvira_corr+a)*nvira_corr+b;
ijab_t4 = (((j+nocca_iact)*nocca_corr+i+nocca_iact)*nvira_corr+b)*nvira_corr+a;
tmp = c2aa[ijab_c];
if(fabs(tmp) > numzero)
{
tmp -= t1xt1aa (i+nocca_iact, j+nocca_iact, a, b, nocca_corr, nvira_corr, t1a);
t2aa[ijab_t1] = tmp;
t2aa[ijab_t2] = -tmp;
t2aa[ijab_t3] = -tmp;
t2aa[ijab_t4] = tmp;
}
}
}
}
}
// t2ab
ia = -1;
for (a = 0; a < nvira_cas; a++) {
for (i = nocca_cas-1; i > -1; i--) {
ia += 1;
jb =-1;
for (b = 0; b < nvirb_cas; b++) {
for (j = noccb_cas-1; j > -1; j--) {
jb += 1;
iajb_c = ia * noccb_cas*nvirb_cas + jb;
ijab_t = (((i+nocca_iact)*noccb_corr+j+noccb_iact)*nvira_corr+a)*nvirb_corr+b;
tmp = c2ab[iajb_c];
if(fabs(tmp) > numzero)
{
tmp -= t1xt1ab_u (i+nocca_iact, j+noccb_iact, a, b, nvira_corr, nvirb_corr, t1a, t1b);
t2ab[ijab_t] = tmp;
}
}
}
}
}
// t2bb
ijab_c = -1;
for (b = 1; b < nvirb_cas; b++) {
for (a = 0; a < b; a++) {
for (j = noccb_cas-1; j > 0; j--) {
for (i = j-1; i > -1; i--) {
ijab_c += 1;
ijab_t1 = (((i+noccb_iact)*noccb_corr+j+noccb_iact)*nvirb_corr+a)*nvirb_corr+b;
ijab_t2 = (((i+noccb_iact)*noccb_corr+j+noccb_iact)*nvirb_corr+b)*nvirb_corr+a;
ijab_t3 = (((j+noccb_iact)*noccb_corr+i+noccb_iact)*nvirb_corr+a)*nvirb_corr+b;
ijab_t4 = (((j+noccb_iact)*noccb_corr+i+noccb_iact)*nvirb_corr+b)*nvirb_corr+a;
tmp = c2bb[ijab_c];
if(fabs(tmp) > numzero)
{
tmp -= t1xt1aa (i+noccb_iact, j+noccb_iact, a, b, noccb_corr, nvirb_corr, t1b);
t2bb[ijab_t1] = tmp;
t2bb[ijab_t2] = -tmp;
t2bb[ijab_t3] = -tmp;
t2bb[ijab_t4] = tmp;
}
}
}
}
}
}
double c3tot3aab_mem(int i, int j, int k, int a, int b, int c, int nocc_corr, int nvir_corr, int nocc_cas, int nvir_cas, int nocc_iact, int nocc2, double *t1, double *t2aa, double *t2ab, double *c3aab, double c0)
{
double t3 = 0.0;
t3 = c3aab[DSc(i-nocc_iact, j-nocc_iact, k-nocc_iact, a, b, c, nocc_cas, nvir_cas, nocc2)] / c0;
t3-= t1xt2aab(i, j, k, a, b, c, nocc_corr, nvir_corr, t1, t2aa, t2ab);
t3-= t1xt1xt1aab(i, j, k, a, b, c, nocc_corr, nvir_corr, t1);
return t3;
}
double c3tot3aaa_mem(int i, int j, int k, int a, int b, int c, int nocc_corr, int nvir_corr, int nocc_cas, int nvir_cas, int nocc_iact, int nocc3, double *t1, double *t2aa, double *c3aaa, double c0)
{
double t3 = 0.0;
t3 = c3aaa[Tc(i-nocc_iact, j-nocc_iact, k-nocc_iact, a, b, c, nocc3)] / c0;
t3-= t1xt2aaa (i, j, k, a, b, c, nocc_corr, nvir_corr, t1, t2aa);
t3-= t1xt1xt1aaa (i, j, k, a, b, c, nocc_corr, nvir_corr, t1);
return t3;
}
double t1xc3aabb_mem(int i, int j, int k, int l, int a, int b, int c, int d, int nocc_corr, int nvir_corr, int nocc_cas, int nvir_cas, int nocc_iact, int nocc2, double *t1, double *t2aa, double *t2ab, double *c3aab, double c0)
{
double t1xt3 = 0.0;
t1xt3 += t1[S(i, a, nvir_corr)] * c3tot3aab_mem(k, l, j, c, d, b, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t1xt3 -= t1[S(i, b, nvir_corr)] * c3tot3aab_mem(k, l, j, c, d, a, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t1xt3 -= t1[S(j, a, nvir_corr)] * c3tot3aab_mem(k, l, i, c, d, b, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t1xt3 += t1[S(j, b, nvir_corr)] * c3tot3aab_mem(k, l, i, c, d, a, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t1xt3 += t1[S(k, c, nvir_corr)] * c3tot3aab_mem(i, j, l, a, b, d, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t1xt3 -= t1[S(k, d, nvir_corr)] * c3tot3aab_mem(i, j, l, a, b, c, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t1xt3 -= t1[S(l, c, nvir_corr)] * c3tot3aab_mem(i, j, k, a, b, d, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t1xt3 += t1[S(l, d, nvir_corr)] * c3tot3aab_mem(i, j, k, a, b, c, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
return t1xt3;
}
double t1xc3aaab_mem(int i, int j, int k, int l, int a, int b, int c, int d, int nocc_corr, int nvir_corr, int nocc_cas, int nvir_cas, int nocc_iact, int nocc2, int nocc3, double *t1, double *t2aa, double *t2ab, double *c3aaa, double *c3aab, double c0)
{
double t1xt3 = 0.0;
t1xt3 += t1[S(i, a, nvir_corr)] * c3tot3aab_mem(j, k, l, b, c, d, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t1xt3 -= t1[S(i, b, nvir_corr)] * c3tot3aab_mem(j, k, l, a, c, d, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t1xt3 += t1[S(i, c, nvir_corr)] * c3tot3aab_mem(j, k, l, a, b, d, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t1xt3 -= t1[S(j, a, nvir_corr)] * c3tot3aab_mem(i, k, l, b, c, d, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t1xt3 += t1[S(j, b, nvir_corr)] * c3tot3aab_mem(i, k, l, a, c, d, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t1xt3 -= t1[S(j, c, nvir_corr)] * c3tot3aab_mem(i, k, l, a, b, d, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t1xt3 += t1[S(k, a, nvir_corr)] * c3tot3aab_mem(i, j, l, b, c, d, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t1xt3 -= t1[S(k, b, nvir_corr)] * c3tot3aab_mem(i, j, l, a, c, d, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t1xt3 += t1[S(k, c, nvir_corr)] * c3tot3aab_mem(i, j, l, a, b, d, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t1xt3 += t1[S(l, d, nvir_corr)] * c3tot3aaa_mem(i, j, k, a, b, c, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc3, t1, t2aa, c3aaa, c0);
return t1xt3;
}
void t2t4c_dmrg_omp_otf_mem(double *t2t4c, double *t1, double *t2aa, double *t2ab, double *c3aaa, double *c3aab, double *e2ovov, const int nocc_iact, const int nocc_corr, const int nvir_corr, const int nocc_cas, const int nvir_cas, double numzero, const double c0, double norm)
{
//double numzero = 1e-7;
//numzero = 1e-3;
const int nocc2 = (int) nocc_cas*(nocc_cas-1)/2;
const int nocc3 = (int) nocc_cas*(nocc_cas-1)*(nocc_cas-2)/6;
double norm0SDT = norm;
const int t2size = nocc_corr*nocc_corr*nvir_corr*nvir_corr;
#pragma omp parallel default(none) \
shared(t1, t2aa, t2ab, c3aaa, c3aab, e2ovov, t2t4c, norm, numzero)
{
double t4, parity, scale;
int p, q, r, s, t, u, v, w, itmp, it, jt, at, bt, ifile;
char typ[4], line[255];
uint8_t Refdet[nocc_corr+nvir_corr], det_str[nocc_corr+nvir_corr];
for (itmp = 0; itmp < nocc_corr+nvir_corr; itmp++){
if (itmp<nocc_corr) Refdet[itmp] = 3;
else Refdet[itmp] = 0;
}
// double ****tmp;
// tmp = (double ****)malloc(sizeof(double ***) * nocc_corr);
// for (it=0; it< nocc_corr; it++){
// tmp[it] = (double ***)malloc(sizeof(double **) * nocc_corr);
// for (jt=0; jt< nocc_corr; jt++){
// tmp[it][jt] = (double **)malloc(sizeof(double *) * nvir_corr);
// for (at=0; at< nvir_corr; at++){
// tmp[it][jt][at] = (double *)malloc(sizeof(double) * nvir_corr);
// }
// }
// }
double *t2t4c_priv;
t2t4c_priv = (double *)malloc(sizeof(double) * t2size);
for (it=0; it< t2size; it++){
t2t4c_priv[it] = 0.0;
}
//lsh test
//printf ("num_threads = %d\n",omp_get_num_threads());
int i;
#pragma omp for reduction(+ : norm)
for (i=0; i<omp_get_num_threads(); i++){
char s0[20]="t4.";
char s1[4];
sprintf(s1, "%d", i);
char* filename = strcat(s0,s1);
FILE *fp = fopen(filename, "r");
//printf ("filename = %s\n",filename);
if (fp) {
while ( !feof(fp) ){
fscanf(fp, "%c%c%c%c,%s\n", &(typ[0]), &(typ[1]), &(typ[2]), &(typ[3]), line);
fscanf(fp, "%lf\n", &t4);
//lsh test
//printf ("typ=%c%c%c%c line=%s\n",typ[0],typ[1],typ[2],typ[3], line);
if (strncmp(typ, "aabb", 4) == 0 && fabs(t4) > numzero){
sscanf(line,"%d,%d,%d,%d,%d,%d,%d,%d",&p,&q,&t,&u,&r,&s,&v,&w);
p += nocc_iact;
q += nocc_iact;
r += nocc_iact;
s += nocc_iact;
t += - nocc_cas;
u += - nocc_cas;
v += - nocc_cas;
w += - nocc_cas;
// if(!(p == 2 && q == 3 && t == 0 && u == 1 && \
r == 2 && s == 3 && v == 0 && w == 1)) continue;
// if(!((p == 2 && q == 3 && t == 0 && u == 4 && \
// r == 1 && s == 3 && v == 0 && w == 4) || \
// (p == 1 && q == 3 && t == 0 && u == 4 && \
// r == 2 && s == 3 && v == 0 && w == 4)) ) continue;
// if(!(p == 2 && q == 3 && t == 0 && u == 2 && \
// r == 2 && s == 3 && v == 0 && w == 2)) continue;
norm += t4*t4;
for (itmp = 0; itmp < nocc_corr+nvir_corr; itmp++)
det_str[itmp] = Refdet[itmp];
det_str[p] = 1;
det_str[q] = 1;
det_str[t+nocc_corr] = 2;
det_str[u+nocc_corr] = 2;
if (p != r && q != r) det_str[r] = 2;
else det_str[r] = 0;
if (p != s && q != s) det_str[s] = 2;
else det_str[s] = 0;
if (t != v && u != v) det_str[v+nocc_corr] = 1;
else det_str[v+nocc_corr] = 3;
if (t != w && u != w) det_str[w+nocc_corr] = 1;
else det_str[w+nocc_corr] = 3;
//parity = parity_ab_str(det_str, nocc_corr+nvir_corr);
parity = parity_ci_to_cc(p+q, 2, nocc_corr);
parity *= parity_ci_to_cc(r+s, 2, nocc_corr);
// interm norm of c4
t4 = parity * t4 / c0;
// lsh test
// extract t4
t4-= t1xc3aabb_mem(p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t4-= t2xt2aabb(p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, t2aa, t2ab);
t4-= t1xt1xt2aabb(p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, t1, t2aa, t2ab);
t4-= t1xt1xt1xt1aabb(p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, t1); // may have bug
t2t4c_priv[D(q,s,u,w,nocc_corr,nvir_corr)] += e2ovov[De(p,t,r,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,u,w,nocc_corr,nvir_corr)] -= e2ovov[De(q,t,r,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,t,w,nocc_corr,nvir_corr)] -= e2ovov[De(p,u,r,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,t,w,nocc_corr,nvir_corr)] += e2ovov[De(q,u,r,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,r,u,w,nocc_corr,nvir_corr)] -= e2ovov[De(p,t,s,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,r,u,w,nocc_corr,nvir_corr)] += e2ovov[De(q,t,s,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,r,t,w,nocc_corr,nvir_corr)] += e2ovov[De(p,u,s,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,r,t,w,nocc_corr,nvir_corr)] -= e2ovov[De(q,u,s,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,u,v,nocc_corr,nvir_corr)] -= e2ovov[De(p,t,r,w,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,u,v,nocc_corr,nvir_corr)] += e2ovov[De(q,t,r,w,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,t,v,nocc_corr,nvir_corr)] += e2ovov[De(p,u,r,w,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,t,v,nocc_corr,nvir_corr)] -= e2ovov[De(q,u,r,w,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,r,u,v,nocc_corr,nvir_corr)] += e2ovov[De(p,t,s,w,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,r,u,v,nocc_corr,nvir_corr)] -= e2ovov[De(q,t,s,w,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,r,t,v,nocc_corr,nvir_corr)] -= e2ovov[De(p,u,s,w,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,r,t,v,nocc_corr,nvir_corr)] += e2ovov[De(q,u,s,w,nocc_corr,nvir_corr)] * t4;
}
else if (strncmp(typ, "aaab", 4) == 0 && fabs(t4) > numzero){
norm += 2.0*t4*t4;
//lsh test
//printf ("typ=%c%c%c%c line=%s c4=%lf\n",typ[0],typ[1],typ[2],typ[3], line, t4);
sscanf(line,"%d,%d,%d,%d,%d,%d,%d,%d",&p,&q,&r,&t,&u,&v,&s,&w);
p += nocc_iact;
q += nocc_iact;
r += nocc_iact;
s += nocc_iact;
t += - nocc_cas;
u += - nocc_cas;
v += - nocc_cas;
w += - nocc_cas;
//printf ("p=%d, q=%d, r=%d, t=%d, u=%d, v=%d, s=%d, w=%d",p,q,r,t,u,v,s,w);
for (itmp = 0; itmp < nocc_corr+nvir_corr; itmp++)
det_str[itmp] = Refdet[itmp];
det_str[p] = 2;
det_str[q] = 2;
det_str[r] = 2;
det_str[t+nocc_corr] = 1;
det_str[u+nocc_corr] = 1;
det_str[v+nocc_corr] = 1;
if (p != s && q != s && r != s) det_str[s] = 1;
else det_str[s] = 0;
if (t != w && u != w && v != w) det_str[w+nocc_corr] = 2;
else det_str[w+nocc_corr] = 3;
//parity = parity_ab_str(det_str, nocc_corr+nvir_corr);
parity = parity_ci_to_cc(p+q+r, 3, nocc_corr);
parity *= parity_ci_to_cc(s, 1, nocc_corr);
// interm norm of c4
t4 = parity * t4 / c0;
// extract t4
t4-= t1xc3aaab_mem (p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, nocc3, t1, t2aa, t2ab, c3aaa, c3aab, c0);
t4-= t2xt2aaab (p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, t2aa, t2ab); // may have 1e-3 bug
t4-= t1xt1xt2aaab (p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, t1, t2aa, t2ab); // may have 1e-5 bug
t4-= t1xt1xt1xt1aaab (p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, t1); // may have 1e-6 bug
t2t4c_priv[D(r,s,v,w,nocc_corr,nvir_corr)] += e2ovov[De(p,t,q,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,v,w,nocc_corr,nvir_corr)] += e2ovov[De(r,t,p,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,v,w,nocc_corr,nvir_corr)] += e2ovov[De(q,t,r,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,v,w,nocc_corr,nvir_corr)] -= e2ovov[De(q,t,p,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,v,w,nocc_corr,nvir_corr)] -= e2ovov[De(p,t,r,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,v,w,nocc_corr,nvir_corr)] -= e2ovov[De(r,t,q,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,u,w,nocc_corr,nvir_corr)] += e2ovov[De(p,v,q,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,u,w,nocc_corr,nvir_corr)] += e2ovov[De(r,v,p,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,u,w,nocc_corr,nvir_corr)] += e2ovov[De(q,v,r,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,u,w,nocc_corr,nvir_corr)] -= e2ovov[De(q,v,p,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,u,w,nocc_corr,nvir_corr)] -= e2ovov[De(p,v,r,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,u,w,nocc_corr,nvir_corr)] -= e2ovov[De(r,v,q,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,t,w,nocc_corr,nvir_corr)] += e2ovov[De(p,u,q,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,t,w,nocc_corr,nvir_corr)] += e2ovov[De(r,u,p,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,t,w,nocc_corr,nvir_corr)] += e2ovov[De(q,u,r,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,t,w,nocc_corr,nvir_corr)] -= e2ovov[De(q,u,p,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,t,w,nocc_corr,nvir_corr)] -= e2ovov[De(p,u,r,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,t,w,nocc_corr,nvir_corr)] -= e2ovov[De(r,u,q,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,u,w,nocc_corr,nvir_corr)] -= e2ovov[De(p,t,q,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,u,w,nocc_corr,nvir_corr)] -= e2ovov[De(r,t,p,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,u,w,nocc_corr,nvir_corr)] -= e2ovov[De(q,t,r,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,u,w,nocc_corr,nvir_corr)] += e2ovov[De(q,t,p,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,u,w,nocc_corr,nvir_corr)] += e2ovov[De(p,t,r,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,u,w,nocc_corr,nvir_corr)] += e2ovov[De(r,t,q,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,t,w,nocc_corr,nvir_corr)] -= e2ovov[De(p,v,q,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,t,w,nocc_corr,nvir_corr)] -= e2ovov[De(r,v,p,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,t,w,nocc_corr,nvir_corr)] -= e2ovov[De(q,v,r,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,t,w,nocc_corr,nvir_corr)] += e2ovov[De(q,v,p,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,t,w,nocc_corr,nvir_corr)] += e2ovov[De(p,v,r,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,t,w,nocc_corr,nvir_corr)] += e2ovov[De(r,v,q,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,v,w,nocc_corr,nvir_corr)] -= e2ovov[De(p,u,q,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,v,w,nocc_corr,nvir_corr)] -= e2ovov[De(r,u,p,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,v,w,nocc_corr,nvir_corr)] -= e2ovov[De(q,u,r,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,v,w,nocc_corr,nvir_corr)] += e2ovov[De(q,u,p,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,v,w,nocc_corr,nvir_corr)] += e2ovov[De(p,u,r,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,v,w,nocc_corr,nvir_corr)] += e2ovov[De(r,u,q,t,nocc_corr,nvir_corr)] * t4;
// for (it=0; it< nocc_corr; it++){
// for (jt=0; jt< nocc_corr; jt++){
// for (at=0; at< nvir_corr; at++){
// for (bt=0; bt< nvir_corr; bt++){
// tmp[it][jt][at][bt] = 0.0;
// }
// }
// }
// }
//
// tmp[r][s][v][w] += (e2ovov[De(p,t,q,u,nocc_corr,nvir_corr)]-e2ovov[De(p,u,q,t,nocc_corr,nvir_corr)]) * t4;
// tmp[q][s][v][w] -= (e2ovov[De(p,t,r,u,nocc_corr,nvir_corr)]-e2ovov[De(p,u,r,t,nocc_corr,nvir_corr)]) * t4;
// tmp[p][s][v][w] += (e2ovov[De(q,t,s,u,nocc_corr,nvir_corr)]-e2ovov[De(q,u,s,t,nocc_corr,nvir_corr)]) * t4;
// tmp[r][s][u][w] -= (e2ovov[De(p,t,q,v,nocc_corr,nvir_corr)]-e2ovov[De(p,v,q,t,nocc_corr,nvir_corr)]) * t4;
// tmp[q][s][u][w] += (e2ovov[De(p,t,r,v,nocc_corr,nvir_corr)]-e2ovov[De(p,v,r,t,nocc_corr,nvir_corr)]) * t4;
// tmp[p][s][u][w] -= (e2ovov[De(q,t,s,v,nocc_corr,nvir_corr)]-e2ovov[De(q,v,s,t,nocc_corr,nvir_corr)]) * t4;
// tmp[r][s][t][w] += (e2ovov[De(p,u,q,v,nocc_corr,nvir_corr)]-e2ovov[De(p,v,q,u,nocc_corr,nvir_corr)]) * t4;
// tmp[q][s][t][w] -= (e2ovov[De(p,u,r,v,nocc_corr,nvir_corr)]-e2ovov[De(p,v,r,u,nocc_corr,nvir_corr)]) * t4;
// tmp[p][s][t][w] += (e2ovov[De(q,u,s,v,nocc_corr,nvir_corr)]-e2ovov[De(q,v,s,u,nocc_corr,nvir_corr)]) * t4;
//
// for (it = 0; it < nocc_corr; it++)
// for (jt = 0; jt < nocc_corr; jt++)
// for (at = 0; at < nvir_corr; at++)
// for (bt = 0; bt < nvir_corr; bt++)
// t2t4c_priv[D(it,jt,at,bt,nocc_corr,nvir_corr)] += tmp[it][jt][at][bt] + tmp[jt][it][bt][at];
}
}
fclose(fp);
}
}
#pragma omp critical
{
for (it=0; it< nocc_corr; it++){
for (jt=0; jt< nocc_corr; jt++){
for (at=0; at< nvir_corr; at++){
for (bt=0; bt< nvir_corr; bt++){
//t2t4c[D(it,jt,at,bt,nocc_corr,nvir_corr)] += t2t4c_priv[D(it,jt,at,bt,nocc_corr,nvir_corr)];
t2t4c[D(it,jt,at,bt,nocc_corr,nvir_corr)] += 0.5*(t2t4c_priv[D(it,jt,at,bt,nocc_corr,nvir_corr)]+t2t4c_priv[D(jt,it,bt,at,nocc_corr,nvir_corr)]);
}
}
}
}
free(t2t4c_priv);
}
}
printf ("0SDTQ (Q) = %18.16f ( %18.16f )\n", norm, norm-norm0SDT);
}
void t2t4c_shci_omp_otf_mem(double *t2t4c, double *t1, double *t2aa, double *t2ab, double *c3aaa, double *c3aab, double *e2ovov, const int nocc_iact, const int nocc_corr, const int nvir_corr, const int nocc_cas, const int nvir_cas, double numzero, const double c0, double norm)
{
//double numzero = 1e-7;
//numzero = 1e-3;
const int nocc2 = (int) nocc_cas*(nocc_cas-1)/2;
const int nocc3 = (int) nocc_cas*(nocc_cas-1)*(nocc_cas-2)/6;
double norm0SDT = norm;
const int t2size = nocc_corr*nocc_corr*nvir_corr*nvir_corr;
#pragma omp parallel default(none) \
shared(t1, t2aa, t2ab, c3aaa, c3aab, e2ovov, t2t4c, norm, numzero)
{
double t4, parity, scale;
int p, q, r, s, t, u, v, w, itmp, it, jt, at, bt, ifile;
char typ[4], line[255];
uint8_t Refdet[nocc_corr+nvir_corr], det_str[nocc_corr+nvir_corr];
for (itmp = 0; itmp < nocc_corr+nvir_corr; itmp++){
if (itmp<nocc_corr) Refdet[itmp] = 3;
else Refdet[itmp] = 0;
}
// double ****tmp;
// tmp = (double ****)malloc(sizeof(double ***) * nocc_corr);
// for (it=0; it< nocc_corr; it++){
// tmp[it] = (double ***)malloc(sizeof(double **) * nocc_corr);
// for (jt=0; jt< nocc_corr; jt++){
// tmp[it][jt] = (double **)malloc(sizeof(double *) * nvir_corr);
// for (at=0; at< nvir_corr; at++){
// tmp[it][jt][at] = (double *)malloc(sizeof(double) * nvir_corr);
// }
// }
// }
double *t2t4c_priv;
t2t4c_priv = (double *)malloc(sizeof(double) * t2size);
for (it=0; it< t2size; it++){
t2t4c_priv[it] = 0.0;
}
//lsh test
//printf ("num_threads = %d\n",omp_get_num_threads());
int i;
#pragma omp for reduction(+ : norm)
for (i=0; i<omp_get_num_threads(); i++){
char s0[20]="t4.";
char s1[4];
sprintf(s1, "%d", i);
char* filename = strcat(s0,s1);
FILE *fp = fopen(filename, "r");
//printf ("filename = %s\n",filename);
if (fp) {
while ( !feof(fp) ){
fscanf(fp, "%c%c%c%c,%s\n", &(typ[0]), &(typ[1]), &(typ[2]), &(typ[3]), line);
fscanf(fp, "%lf\n", &t4);
//lsh test
//printf ("typ=%c%c%c%c line=%s\n",typ[0],typ[1],typ[2],typ[3], line);
if (strncmp(typ, "aabb", 4) == 0 && fabs(t4) > numzero){
sscanf(line,"%d,%d,%d,%d,%d,%d,%d,%d",&p,&q,&t,&u,&r,&s,&v,&w);
p += nocc_iact;
q += nocc_iact;
r += nocc_iact;
s += nocc_iact;
t += - nocc_cas;
u += - nocc_cas;
v += - nocc_cas;
w += - nocc_cas;
// if(!(p == 2 && q == 3 && t == 0 && u == 1 && \
r == 2 && s == 3 && v == 0 && w == 1)) continue;
// if(!((p == 2 && q == 3 && t == 0 && u == 4 && \
// r == 1 && s == 3 && v == 0 && w == 4) || \
// (p == 1 && q == 3 && t == 0 && u == 4 && \
// r == 2 && s == 3 && v == 0 && w == 4)) ) continue;
// if(!(p == 2 && q == 3 && t == 0 && u == 2 && \
// r == 2 && s == 3 && v == 0 && w == 2)) continue;
norm += t4*t4;
for (itmp = 0; itmp < nocc_corr+nvir_corr; itmp++)
det_str[itmp] = Refdet[itmp];
det_str[p] = 2;
det_str[q] = 2;
det_str[t+nocc_corr] = 1;
det_str[u+nocc_corr] = 1;
if (p != r && q != r) det_str[r] = 1;
else det_str[r] = 0;
if (p != s && q != s) det_str[s] = 1;
else det_str[s] = 0;
if (t != v && u != v) det_str[v+nocc_corr] = 2;
else det_str[v+nocc_corr] = 3;
if (t != w && u != w) det_str[w+nocc_corr] = 2;
else det_str[w+nocc_corr] = 3;
//parity = parity_ab_str(det_str, nocc_corr+nvir_corr);
parity = parity_ci_to_cc(p+q, 2, nocc_corr);
parity *= parity_ci_to_cc(r+s, 2, nocc_corr);
// interm norm of c4
t4 = parity * t4 / c0;
// lsh test
// extract t4
t4-= t1xc3aabb_mem(p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t4-= t2xt2aabb(p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, t2aa, t2ab);
t4-= t1xt1xt2aabb(p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, t1, t2aa, t2ab);
t4-= t1xt1xt1xt1aabb(p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, t1); // may have bug
t2t4c_priv[D(q,s,u,w,nocc_corr,nvir_corr)] += e2ovov[De(p,t,r,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,u,w,nocc_corr,nvir_corr)] -= e2ovov[De(q,t,r,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,t,w,nocc_corr,nvir_corr)] -= e2ovov[De(p,u,r,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,t,w,nocc_corr,nvir_corr)] += e2ovov[De(q,u,r,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,r,u,w,nocc_corr,nvir_corr)] -= e2ovov[De(p,t,s,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,r,u,w,nocc_corr,nvir_corr)] += e2ovov[De(q,t,s,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,r,t,w,nocc_corr,nvir_corr)] += e2ovov[De(p,u,s,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,r,t,w,nocc_corr,nvir_corr)] -= e2ovov[De(q,u,s,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,u,v,nocc_corr,nvir_corr)] -= e2ovov[De(p,t,r,w,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,u,v,nocc_corr,nvir_corr)] += e2ovov[De(q,t,r,w,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,t,v,nocc_corr,nvir_corr)] += e2ovov[De(p,u,r,w,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,t,v,nocc_corr,nvir_corr)] -= e2ovov[De(q,u,r,w,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,r,u,v,nocc_corr,nvir_corr)] += e2ovov[De(p,t,s,w,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,r,u,v,nocc_corr,nvir_corr)] -= e2ovov[De(q,t,s,w,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,r,t,v,nocc_corr,nvir_corr)] -= e2ovov[De(p,u,s,w,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,r,t,v,nocc_corr,nvir_corr)] += e2ovov[De(q,u,s,w,nocc_corr,nvir_corr)] * t4;
}
else if (strncmp(typ, "aaab", 4) == 0 && fabs(t4) > numzero){
norm += 2.0*t4*t4;
//lsh test
//printf ("typ=%c%c%c%c line=%s c4=%lf\n",typ[0],typ[1],typ[2],typ[3], line, t4);
sscanf(line,"%d,%d,%d,%d,%d,%d,%d,%d",&p,&q,&r,&t,&u,&v,&s,&w);
p += nocc_iact;
q += nocc_iact;
r += nocc_iact;
s += nocc_iact;
t += - nocc_cas;
u += - nocc_cas;
v += - nocc_cas;
w += - nocc_cas;
//printf ("p=%d, q=%d, r=%d, t=%d, u=%d, v=%d, s=%d, w=%d",p,q,r,t,u,v,s,w);
for (itmp = 0; itmp < nocc_corr+nvir_corr; itmp++)
det_str[itmp] = Refdet[itmp];
det_str[p] = 2;
det_str[q] = 2;
det_str[r] = 2;
det_str[t+nocc_corr] = 1;
det_str[u+nocc_corr] = 1;
det_str[v+nocc_corr] = 1;
if (p != s && q != s && r != s) det_str[s] = 1;
else det_str[s] = 0;
if (t != w && u != w && v != w) det_str[w+nocc_corr] = 2;
else det_str[w+nocc_corr] = 3;
//parity = parity_ab_str(det_str, nocc_corr+nvir_corr);
parity = parity_ci_to_cc(p+q+r, 3, nocc_corr);
parity *= parity_ci_to_cc(s, 1, nocc_corr);
// interm norm of c4
t4 = parity * t4 / c0;
// extract t4
t4-= t1xc3aaab_mem (p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, nocc3, t1, t2aa, t2ab, c3aaa, c3aab, c0);
t4-= t2xt2aaab (p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, t2aa, t2ab); // may have 1e-3 bug
t4-= t1xt1xt2aaab (p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, t1, t2aa, t2ab); // may have 1e-5 bug
t4-= t1xt1xt1xt1aaab (p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, t1); // may have 1e-6 bug
t2t4c_priv[D(r,s,v,w,nocc_corr,nvir_corr)] += e2ovov[De(p,t,q,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,v,w,nocc_corr,nvir_corr)] += e2ovov[De(r,t,p,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,v,w,nocc_corr,nvir_corr)] += e2ovov[De(q,t,r,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,v,w,nocc_corr,nvir_corr)] -= e2ovov[De(q,t,p,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,v,w,nocc_corr,nvir_corr)] -= e2ovov[De(p,t,r,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,v,w,nocc_corr,nvir_corr)] -= e2ovov[De(r,t,q,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,u,w,nocc_corr,nvir_corr)] += e2ovov[De(p,v,q,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,u,w,nocc_corr,nvir_corr)] += e2ovov[De(r,v,p,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,u,w,nocc_corr,nvir_corr)] += e2ovov[De(q,v,r,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,u,w,nocc_corr,nvir_corr)] -= e2ovov[De(q,v,p,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,u,w,nocc_corr,nvir_corr)] -= e2ovov[De(p,v,r,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,u,w,nocc_corr,nvir_corr)] -= e2ovov[De(r,v,q,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,t,w,nocc_corr,nvir_corr)] += e2ovov[De(p,u,q,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,t,w,nocc_corr,nvir_corr)] += e2ovov[De(r,u,p,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,t,w,nocc_corr,nvir_corr)] += e2ovov[De(q,u,r,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,t,w,nocc_corr,nvir_corr)] -= e2ovov[De(q,u,p,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,t,w,nocc_corr,nvir_corr)] -= e2ovov[De(p,u,r,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,t,w,nocc_corr,nvir_corr)] -= e2ovov[De(r,u,q,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,u,w,nocc_corr,nvir_corr)] -= e2ovov[De(p,t,q,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,u,w,nocc_corr,nvir_corr)] -= e2ovov[De(r,t,p,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,u,w,nocc_corr,nvir_corr)] -= e2ovov[De(q,t,r,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,u,w,nocc_corr,nvir_corr)] += e2ovov[De(q,t,p,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,u,w,nocc_corr,nvir_corr)] += e2ovov[De(p,t,r,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,u,w,nocc_corr,nvir_corr)] += e2ovov[De(r,t,q,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,t,w,nocc_corr,nvir_corr)] -= e2ovov[De(p,v,q,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,t,w,nocc_corr,nvir_corr)] -= e2ovov[De(r,v,p,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,t,w,nocc_corr,nvir_corr)] -= e2ovov[De(q,v,r,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,t,w,nocc_corr,nvir_corr)] += e2ovov[De(q,v,p,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,t,w,nocc_corr,nvir_corr)] += e2ovov[De(p,v,r,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,t,w,nocc_corr,nvir_corr)] += e2ovov[De(r,v,q,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,v,w,nocc_corr,nvir_corr)] -= e2ovov[De(p,u,q,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,v,w,nocc_corr,nvir_corr)] -= e2ovov[De(r,u,p,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,v,w,nocc_corr,nvir_corr)] -= e2ovov[De(q,u,r,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,v,w,nocc_corr,nvir_corr)] += e2ovov[De(q,u,p,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,v,w,nocc_corr,nvir_corr)] += e2ovov[De(p,u,r,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,v,w,nocc_corr,nvir_corr)] += e2ovov[De(r,u,q,t,nocc_corr,nvir_corr)] * t4;
// for (it=0; it< nocc_corr; it++){
// for (jt=0; jt< nocc_corr; jt++){
// for (at=0; at< nvir_corr; at++){
// for (bt=0; bt< nvir_corr; bt++){
// tmp[it][jt][at][bt] = 0.0;
// }
// }
// }
// }
//
// tmp[r][s][v][w] += (e2ovov[De(p,t,q,u,nocc_corr,nvir_corr)]-e2ovov[De(p,u,q,t,nocc_corr,nvir_corr)]) * t4;
// tmp[q][s][v][w] -= (e2ovov[De(p,t,r,u,nocc_corr,nvir_corr)]-e2ovov[De(p,u,r,t,nocc_corr,nvir_corr)]) * t4;
// tmp[p][s][v][w] += (e2ovov[De(q,t,s,u,nocc_corr,nvir_corr)]-e2ovov[De(q,u,s,t,nocc_corr,nvir_corr)]) * t4;
// tmp[r][s][u][w] -= (e2ovov[De(p,t,q,v,nocc_corr,nvir_corr)]-e2ovov[De(p,v,q,t,nocc_corr,nvir_corr)]) * t4;
// tmp[q][s][u][w] += (e2ovov[De(p,t,r,v,nocc_corr,nvir_corr)]-e2ovov[De(p,v,r,t,nocc_corr,nvir_corr)]) * t4;
// tmp[p][s][u][w] -= (e2ovov[De(q,t,s,v,nocc_corr,nvir_corr)]-e2ovov[De(q,v,s,t,nocc_corr,nvir_corr)]) * t4;
// tmp[r][s][t][w] += (e2ovov[De(p,u,q,v,nocc_corr,nvir_corr)]-e2ovov[De(p,v,q,u,nocc_corr,nvir_corr)]) * t4;
// tmp[q][s][t][w] -= (e2ovov[De(p,u,r,v,nocc_corr,nvir_corr)]-e2ovov[De(p,v,r,u,nocc_corr,nvir_corr)]) * t4;
// tmp[p][s][t][w] += (e2ovov[De(q,u,s,v,nocc_corr,nvir_corr)]-e2ovov[De(q,v,s,u,nocc_corr,nvir_corr)]) * t4;
//
// for (it = 0; it < nocc_corr; it++)
// for (jt = 0; jt < nocc_corr; jt++)
// for (at = 0; at < nvir_corr; at++)
// for (bt = 0; bt < nvir_corr; bt++)
// t2t4c_priv[D(it,jt,at,bt,nocc_corr,nvir_corr)] += tmp[it][jt][at][bt] + tmp[jt][it][bt][at];
}
}
fclose(fp);
}
}
#pragma omp critical
{
for (it=0; it< nocc_corr; it++){
for (jt=0; jt< nocc_corr; jt++){
for (at=0; at< nvir_corr; at++){
for (bt=0; bt< nvir_corr; bt++){
//t2t4c[D(it,jt,at,bt,nocc_corr,nvir_corr)] += t2t4c_priv[D(it,jt,at,bt,nocc_corr,nvir_corr)];
t2t4c[D(it,jt,at,bt,nocc_corr,nvir_corr)] += 0.5*(t2t4c_priv[D(it,jt,at,bt,nocc_corr,nvir_corr)]+t2t4c_priv[D(jt,it,bt,at,nocc_corr,nvir_corr)]);
}
}
}
}
free(t2t4c_priv);
}
}
printf ("0SDTQ (Q) = %18.16f ( %18.16f )\n", norm, norm-norm0SDT);
}
void t2t4c_shci_otf_mem(double *t2t4c, double *t1, double *t2aa, double *t2ab, double *c3aaa, double *c3aab, double *e2ovov, const int nocc_iact, const int nocc_corr, const int nvir_corr, const int nocc_cas, const int nvir_cas, const double numzero, const double c0, double norm)
{
//double numzero = 1e-7;
const int nocc2 = (int) nocc_cas*(nocc_cas-1)/2;
const int nocc3 = (int) nocc_cas*(nocc_cas-1)*(nocc_cas-2)/6;
double norm0SDT = norm;
const int t2size = nocc_corr*nocc_corr*nvir_corr*nvir_corr;
double t4, parity, scale;
int p, q, r, s, t, u, v, w, itmp, it, jt, at, bt, ifile;
char typ[4], line[255];
uint8_t Refdet[nocc_corr+nvir_corr], det_str[nocc_corr+nvir_corr];
for (itmp = 0; itmp < nocc_corr+nvir_corr; itmp++){
if (itmp<nocc_corr) Refdet[itmp] = 3;
else Refdet[itmp] = 0;
}
double *t2t4c_priv;
t2t4c_priv = (double *)malloc(sizeof(double) * t2size);
for (it=0; it< t2size; it++){
t2t4c_priv[it] = 0.0;
}
//lsh test
//printf ("num_threads = %d\n",omp_get_num_threads());
FILE *fp;
fp = fopen("CIcoeff_shci.out", "r");
fscanf(fp, "%s\n", line);
if (fp) {
while ( !feof(fp) ){
fscanf(fp, "%c%c%c%c,%s\n", &(typ[0]), &(typ[1]), &(typ[2]), &(typ[3]), line);
fscanf(fp, "%lf\n", &t4);
//lsh test
//printf ("typ=%c%c%c%c line=%s\n",typ[0],typ[1],typ[2],typ[3], line);
if (strncmp(typ, "aabb", 4) == 0 && fabs(t4) > numzero){
norm += t4*t4;
sscanf(line,"%d,%d,%d,%d,%d,%d,%d,%d",&p,&q,&t,&u,&r,&s,&v,&w);
p += nocc_iact;
q += nocc_iact;
r += nocc_iact;
s += nocc_iact;
t += - nocc_cas;
u += - nocc_cas;
v += - nocc_cas;
w += - nocc_cas;
// if(!(p == 2 && q == 3 && t == 0 && u == 1 && \
// r == 2 && s == 3 && v == 0 && w == 1)) continue;
for (itmp = 0; itmp < nocc_corr+nvir_corr; itmp++)
det_str[itmp] = Refdet[itmp];
det_str[p] = 2;
det_str[q] = 2;
det_str[t+nocc_corr] = 1;
det_str[u+nocc_corr] = 1;
if (p != r && q != r) det_str[r] = 1;
else det_str[r] = 0;
if (p != s && q != s) det_str[s] = 1;
else det_str[s] = 0;
if (t != v && u != v) det_str[v+nocc_corr] = 2;
else det_str[v+nocc_corr] = 3;
if (t != w && u != w) det_str[w+nocc_corr] = 2;
else det_str[w+nocc_corr] = 3;
//parity = parity_ab_str(det_str, nocc_corr+nvir_corr);
parity = parity_ci_to_cc(p+q, 2, nocc_corr);
parity *= parity_ci_to_cc(r+s, 2, nocc_corr);
// interm norm of c4
t4 = parity * t4 / c0;
// lsh test
// printf("c4 mem %20.10f \n",t4);
// extract t4
t4-= t1xc3aabb_mem(p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t4-= t2xt2aabb(p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, t2aa, t2ab);
t4-= t1xt1xt2aabb(p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, t1, t2aa, t2ab);
t4-= t1xt1xt1xt1aabb(p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, t1); // may have bug
//printf("t4 fast %d %d %d %d %d %d %d %d %20.10f \n",p,q,r,s,t,u,v,w,t4);
t2t4c_priv[D(q,s,u,w,nocc_corr,nvir_corr)] += e2ovov[De(p,t,r,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,u,w,nocc_corr,nvir_corr)] -= e2ovov[De(q,t,r,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,t,w,nocc_corr,nvir_corr)] -= e2ovov[De(p,u,r,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,t,w,nocc_corr,nvir_corr)] += e2ovov[De(q,u,r,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,r,u,w,nocc_corr,nvir_corr)] -= e2ovov[De(p,t,s,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,r,u,w,nocc_corr,nvir_corr)] += e2ovov[De(q,t,s,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,r,t,w,nocc_corr,nvir_corr)] += e2ovov[De(p,u,s,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,r,t,w,nocc_corr,nvir_corr)] -= e2ovov[De(q,u,s,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,u,v,nocc_corr,nvir_corr)] -= e2ovov[De(p,t,r,w,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,u,v,nocc_corr,nvir_corr)] += e2ovov[De(q,t,r,w,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,t,v,nocc_corr,nvir_corr)] += e2ovov[De(p,u,r,w,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,t,v,nocc_corr,nvir_corr)] -= e2ovov[De(q,u,r,w,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,r,u,v,nocc_corr,nvir_corr)] += e2ovov[De(p,t,s,w,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,r,u,v,nocc_corr,nvir_corr)] -= e2ovov[De(q,t,s,w,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,r,t,v,nocc_corr,nvir_corr)] -= e2ovov[De(p,u,s,w,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,r,t,v,nocc_corr,nvir_corr)] += e2ovov[De(q,u,s,w,nocc_corr,nvir_corr)] * t4;
}
else if (strncmp(typ, "aaab", 4) == 0 && fabs(t4) > numzero){
norm += 2.0*t4*t4;
//lsh test
//printf ("typ=%c%c%c%c line=%s c4=%lf\n",typ[0],typ[1],typ[2],typ[3], line, t4);
sscanf(line,"%d,%d,%d,%d,%d,%d,%d,%d",&p,&q,&r,&t,&u,&v,&s,&w);
p += nocc_iact;
q += nocc_iact;
r += nocc_iact;
s += nocc_iact;
t += - nocc_cas;
u += - nocc_cas;
v += - nocc_cas;
w += - nocc_cas;
//printf ("p=%d, q=%d, r=%d, t=%d, u=%d, v=%d, s=%d, w=%d",p,q,r,t,u,v,s,w);
for (itmp = 0; itmp < nocc_corr+nvir_corr; itmp++)
det_str[itmp] = Refdet[itmp];
det_str[p] = 2;
det_str[q] = 2;
det_str[r] = 2;
det_str[t+nocc_corr] = 1;
det_str[u+nocc_corr] = 1;
det_str[v+nocc_corr] = 1;
if (p != s && q != s && r != s) det_str[s] = 1;
else det_str[s] = 0;
if (t != w && u != w && v != w) det_str[w+nocc_corr] = 2;
else det_str[w+nocc_corr] = 3;
//parity = parity_ab_str(det_str, nocc_corr+nvir_corr);
parity = parity_ci_to_cc(p+q+r, 3, nocc_corr);
parity *= parity_ci_to_cc(s, 1, nocc_corr);
// interm norm of c4
t4 = parity * t4 / c0;
// extract t4
t4-= t1xc3aaab_mem (p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, nocc3, t1, t2aa, t2ab, c3aaa, c3aab, c0);
t4-= t2xt2aaab (p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, t2aa, t2ab); // may have 1e-3 bug
t4-= t1xt1xt2aaab (p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, t1, t2aa, t2ab); // may have 1e-5 bug
t4-= t1xt1xt1xt1aaab (p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, t1); // may have 1e-6 bug
t2t4c_priv[D(r,s,v,w,nocc_corr,nvir_corr)] += e2ovov[De(p,t,q,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,v,w,nocc_corr,nvir_corr)] += e2ovov[De(r,t,p,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,v,w,nocc_corr,nvir_corr)] += e2ovov[De(q,t,r,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,v,w,nocc_corr,nvir_corr)] -= e2ovov[De(q,t,p,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,v,w,nocc_corr,nvir_corr)] -= e2ovov[De(p,t,r,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,v,w,nocc_corr,nvir_corr)] -= e2ovov[De(r,t,q,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,u,w,nocc_corr,nvir_corr)] += e2ovov[De(p,v,q,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,u,w,nocc_corr,nvir_corr)] += e2ovov[De(r,v,p,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,u,w,nocc_corr,nvir_corr)] += e2ovov[De(q,v,r,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,u,w,nocc_corr,nvir_corr)] -= e2ovov[De(q,v,p,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,u,w,nocc_corr,nvir_corr)] -= e2ovov[De(p,v,r,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,u,w,nocc_corr,nvir_corr)] -= e2ovov[De(r,v,q,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,t,w,nocc_corr,nvir_corr)] += e2ovov[De(p,u,q,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,t,w,nocc_corr,nvir_corr)] += e2ovov[De(r,u,p,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,t,w,nocc_corr,nvir_corr)] += e2ovov[De(q,u,r,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,t,w,nocc_corr,nvir_corr)] -= e2ovov[De(q,u,p,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,t,w,nocc_corr,nvir_corr)] -= e2ovov[De(p,u,r,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,t,w,nocc_corr,nvir_corr)] -= e2ovov[De(r,u,q,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,u,w,nocc_corr,nvir_corr)] -= e2ovov[De(p,t,q,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,u,w,nocc_corr,nvir_corr)] -= e2ovov[De(r,t,p,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,u,w,nocc_corr,nvir_corr)] -= e2ovov[De(q,t,r,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,u,w,nocc_corr,nvir_corr)] += e2ovov[De(q,t,p,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,u,w,nocc_corr,nvir_corr)] += e2ovov[De(p,t,r,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,u,w,nocc_corr,nvir_corr)] += e2ovov[De(r,t,q,v,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,t,w,nocc_corr,nvir_corr)] -= e2ovov[De(p,v,q,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,t,w,nocc_corr,nvir_corr)] -= e2ovov[De(r,v,p,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,t,w,nocc_corr,nvir_corr)] -= e2ovov[De(q,v,r,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,t,w,nocc_corr,nvir_corr)] += e2ovov[De(q,v,p,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,t,w,nocc_corr,nvir_corr)] += e2ovov[De(p,v,r,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,t,w,nocc_corr,nvir_corr)] += e2ovov[De(r,v,q,u,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,v,w,nocc_corr,nvir_corr)] -= e2ovov[De(p,u,q,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,v,w,nocc_corr,nvir_corr)] -= e2ovov[De(r,u,p,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,v,w,nocc_corr,nvir_corr)] -= e2ovov[De(q,u,r,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(r,s,v,w,nocc_corr,nvir_corr)] += e2ovov[De(q,u,p,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(q,s,v,w,nocc_corr,nvir_corr)] += e2ovov[De(p,u,r,t,nocc_corr,nvir_corr)] * t4;
t2t4c_priv[D(p,s,v,w,nocc_corr,nvir_corr)] += e2ovov[De(r,u,q,t,nocc_corr,nvir_corr)] * t4;
}
}
fclose(fp);
}
for (it=0; it< nocc_corr; it++){
for (jt=0; jt< nocc_corr; jt++){
for (at=0; at< nvir_corr; at++){
for (bt=0; bt< nvir_corr; bt++){
t2t4c[D(it,jt,at,bt,nocc_corr,nvir_corr)] += 0.5*(t2t4c_priv[D(it,jt,at,bt,nocc_corr,nvir_corr)]+t2t4c_priv[D(jt,it,bt,at,nocc_corr,nvir_corr)]);
}
}
}
}
free(t2t4c_priv);
printf ("0SDTQ (Q) = %f ( %f )\n", norm, norm-norm0SDT);
}
double permut_value_xaaa(int i, int j, int k, int a, int b, int c, int nocc, int nvir, double t3, double *t1, double *t2aa)
{
double t3p = 2.0 * t3;
double t1t2p = 0.0, t1t1t1p = 0.0;
t1t2p += t1[S(i, a, nvir)] * t2aa[D(j, k, b, c, nocc, nvir)];
t1t2p += t1[S(j, a, nvir)] * t2aa[D(k, i, b, c, nocc, nvir)];
t1t2p += t1[S(k, a, nvir)] * t2aa[D(i, j, b, c, nocc, nvir)];
t1t2p -= t1[S(j, a, nvir)] * t2aa[D(i, k, b, c, nocc, nvir)];
t1t2p -= t1[S(i, a, nvir)] * t2aa[D(k, j, b, c, nocc, nvir)];
t1t2p -= t1[S(k, a, nvir)] * t2aa[D(j, i, b, c, nocc, nvir)];
t1t2p += t1[S(i, b, nvir)] * t2aa[D(j, k, c, a, nocc, nvir)];
t1t2p += t1[S(j, b, nvir)] * t2aa[D(k, i, c, a, nocc, nvir)];
t1t2p += t1[S(k, b, nvir)] * t2aa[D(i, j, c, a, nocc, nvir)];
t1t2p -= t1[S(j, b, nvir)] * t2aa[D(i, k, c, a, nocc, nvir)];
t1t2p -= t1[S(i, b, nvir)] * t2aa[D(k, j, c, a, nocc, nvir)];
t1t2p -= t1[S(k, b, nvir)] * t2aa[D(j, i, c, a, nocc, nvir)];
t1t2p += t1[S(i, c, nvir)] * t2aa[D(j, k, a, b, nocc, nvir)];
t1t2p += t1[S(j, c, nvir)] * t2aa[D(k, i, a, b, nocc, nvir)];
t1t2p += t1[S(k, c, nvir)] * t2aa[D(i, j, a, b, nocc, nvir)];
t1t2p -= t1[S(j, c, nvir)] * t2aa[D(i, k, a, b, nocc, nvir)];
t1t2p -= t1[S(i, c, nvir)] * t2aa[D(k, j, a, b, nocc, nvir)];
t1t2p -= t1[S(k, c, nvir)] * t2aa[D(j, i, a, b, nocc, nvir)];
t1t2p -= t1[S(i, b, nvir)] * t2aa[D(j, k, a, c, nocc, nvir)];
t1t2p -= t1[S(j, b, nvir)] * t2aa[D(k, i, a, c, nocc, nvir)];
t1t2p -= t1[S(k, b, nvir)] * t2aa[D(i, j, a, c, nocc, nvir)];
t1t2p += t1[S(j, b, nvir)] * t2aa[D(i, k, a, c, nocc, nvir)];
t1t2p += t1[S(i, b, nvir)] * t2aa[D(k, j, a, c, nocc, nvir)];
t1t2p += t1[S(k, b, nvir)] * t2aa[D(j, i, a, c, nocc, nvir)];
t1t2p -= t1[S(i, a, nvir)] * t2aa[D(j, k, c, b, nocc, nvir)];
t1t2p -= t1[S(j, a, nvir)] * t2aa[D(k, i, c, b, nocc, nvir)];
t1t2p -= t1[S(k, a, nvir)] * t2aa[D(i, j, c, b, nocc, nvir)];
t1t2p += t1[S(j, a, nvir)] * t2aa[D(i, k, c, b, nocc, nvir)];
t1t2p += t1[S(i, a, nvir)] * t2aa[D(k, j, c, b, nocc, nvir)];
t1t2p += t1[S(k, a, nvir)] * t2aa[D(j, i, c, b, nocc, nvir)];
t1t2p -= t1[S(i, c, nvir)] * t2aa[D(j, k, b, a, nocc, nvir)];
t1t2p -= t1[S(j, c, nvir)] * t2aa[D(k, i, b, a, nocc, nvir)];
t1t2p -= t1[S(k, c, nvir)] * t2aa[D(i, j, b, a, nocc, nvir)];
t1t2p += t1[S(j, c, nvir)] * t2aa[D(i, k, b, a, nocc, nvir)];
t1t2p += t1[S(i, c, nvir)] * t2aa[D(k, j, b, a, nocc, nvir)];
t1t2p += t1[S(k, c, nvir)] * t2aa[D(j, i, b, a, nocc, nvir)];
t1t1t1p += t1[S(i, a, nvir)] * t1[S(j, b, nvir)] * t1[S(k, c, nvir)];
t1t1t1p += t1[S(j, a, nvir)] * t1[S(k, b, nvir)] * t1[S(i, c, nvir)];
t1t1t1p += t1[S(k, a, nvir)] * t1[S(i, b, nvir)] * t1[S(j, c, nvir)];
t1t1t1p -= t1[S(j, a, nvir)] * t1[S(i, b, nvir)] * t1[S(k, c, nvir)];
t1t1t1p -= t1[S(i, a, nvir)] * t1[S(k, b, nvir)] * t1[S(j, c, nvir)];
t1t1t1p -= t1[S(k, a, nvir)] * t1[S(j, b, nvir)] * t1[S(i, c, nvir)];
t1t1t1p += t1[S(i, b, nvir)] * t1[S(j, c, nvir)] * t1[S(k, a, nvir)];
t1t1t1p += t1[S(j, b, nvir)] * t1[S(k, c, nvir)] * t1[S(i, a, nvir)];
t1t1t1p += t1[S(k, b, nvir)] * t1[S(i, c, nvir)] * t1[S(j, a, nvir)];
t1t1t1p -= t1[S(j, b, nvir)] * t1[S(i, c, nvir)] * t1[S(k, a, nvir)];
t1t1t1p -= t1[S(i, b, nvir)] * t1[S(k, c, nvir)] * t1[S(j, a, nvir)];
t1t1t1p -= t1[S(k, b, nvir)] * t1[S(j, c, nvir)] * t1[S(i, a, nvir)];
t1t1t1p += t1[S(i, c, nvir)] * t1[S(j, a, nvir)] * t1[S(k, b, nvir)];
t1t1t1p += t1[S(j, c, nvir)] * t1[S(k, a, nvir)] * t1[S(i, b, nvir)];
t1t1t1p += t1[S(k, c, nvir)] * t1[S(i, a, nvir)] * t1[S(j, b, nvir)];
t1t1t1p -= t1[S(j, c, nvir)] * t1[S(i, a, nvir)] * t1[S(k, b, nvir)];
t1t1t1p -= t1[S(i, c, nvir)] * t1[S(k, a, nvir)] * t1[S(j, b, nvir)];
t1t1t1p -= t1[S(k, c, nvir)] * t1[S(j, a, nvir)] * t1[S(i, b, nvir)];
t1t1t1p -= t1[S(i, b, nvir)] * t1[S(j, a, nvir)] * t1[S(k, c, nvir)];
t1t1t1p -= t1[S(j, b, nvir)] * t1[S(k, a, nvir)] * t1[S(i, c, nvir)];
t1t1t1p -= t1[S(k, b, nvir)] * t1[S(i, a, nvir)] * t1[S(j, c, nvir)];
t1t1t1p += t1[S(j, b, nvir)] * t1[S(i, a, nvir)] * t1[S(k, c, nvir)];
t1t1t1p += t1[S(i, b, nvir)] * t1[S(k, a, nvir)] * t1[S(j, c, nvir)];
t1t1t1p += t1[S(k, b, nvir)] * t1[S(j, a, nvir)] * t1[S(i, c, nvir)];
t1t1t1p -= t1[S(i, a, nvir)] * t1[S(j, c, nvir)] * t1[S(k, b, nvir)];
t1t1t1p -= t1[S(j, a, nvir)] * t1[S(k, c, nvir)] * t1[S(i, b, nvir)];
t1t1t1p -= t1[S(k, a, nvir)] * t1[S(i, c, nvir)] * t1[S(j, b, nvir)];
t1t1t1p += t1[S(j, a, nvir)] * t1[S(i, c, nvir)] * t1[S(k, b, nvir)];
t1t1t1p += t1[S(i, a, nvir)] * t1[S(k, c, nvir)] * t1[S(j, b, nvir)];
t1t1t1p += t1[S(k, a, nvir)] * t1[S(j, c, nvir)] * t1[S(i, b, nvir)];
t1t1t1p -= t1[S(i, c, nvir)] * t1[S(j, b, nvir)] * t1[S(k, a, nvir)];
t1t1t1p -= t1[S(j, c, nvir)] * t1[S(k, b, nvir)] * t1[S(i, a, nvir)];
t1t1t1p -= t1[S(k, c, nvir)] * t1[S(i, b, nvir)] * t1[S(j, a, nvir)];
t1t1t1p += t1[S(j, c, nvir)] * t1[S(i, b, nvir)] * t1[S(k, a, nvir)];
t1t1t1p += t1[S(i, c, nvir)] * t1[S(k, b, nvir)] * t1[S(j, a, nvir)];
t1t1t1p += t1[S(k, c, nvir)] * t1[S(j, b, nvir)] * t1[S(i, a, nvir)];
return t3p + t1t2p/2.0 + t1t1t1p/3.0;
}
double permut_value_xaab(int i, int j, int k, int a, int b, int c, int nocc, int nvir, double t3, double *t1, double *t2aa, double *t2ab)
{
double t3p = 2.0 * t3;
double t1t2aap = 0.0, t1t2abp = 0.0, t1t1t1p = 0.0;
t1t2aap += t2aa[D(i, j, a, b, nocc, nvir)];
t1t2aap -= t2aa[D(j, i, a, b, nocc, nvir)];
t1t2aap -= t2aa[D(i, j, b, a, nocc, nvir)];
t1t2aap += t2aa[D(j, i, b, a, nocc, nvir)];
t1t2aap *= t1[S(k, c, nvir)];
t1t2abp += t1[S(i, a, nvir)] * t2ab[D(j, k, b, c, nocc, nvir)];
t1t2abp -= t1[S(j, a, nvir)] * t2ab[D(i, k, b, c, nocc, nvir)];
t1t2abp -= t1[S(i, b, nvir)] * t2ab[D(j, k, a, c, nocc, nvir)];
t1t2abp += t1[S(j, b, nvir)] * t2ab[D(i, k, a, c, nocc, nvir)];
t1t1t1p += t1[S(i, a, nvir)] * t1[S(j, b, nvir)] * t1[S(k, c, nvir)];
t1t1t1p -= t1[S(j, a, nvir)] * t1[S(i, b, nvir)] * t1[S(k, c, nvir)];
t1t1t1p -= t1[S(i, b, nvir)] * t1[S(j, a, nvir)] * t1[S(k, c, nvir)];
t1t1t1p += t1[S(j, b, nvir)] * t1[S(i, a, nvir)] * t1[S(k, c, nvir)];
return t3p + t1t2aap/2.0 + 2.0*t1t2abp + t1t1t1p;
}
double permut_value_xaabb(const int i, const int j, const int k, const int l,
const int a, const int b, const int c, const int d, const int nocc_corr,
const int nvir_corr, const int nocc_cas, const int nvir_cas,
const int nocc_iact, const int nocc2, const double t4, double *t1,
double *t2aa, double *t2ab, double *c3aab, double *paab, const double c0)
{
double t4p = 2.0 * t4;
double t1t3aab = 0.0, t2t2aa = 0.0, t2t2ab = 0.0;
double t2aat1t1 = 0.0, t2abt1t1 = 0.0, t1t1t1t1 = 0.0;
//lsh test
//printf ("p=%d, q=%d, r=%d, s=%d, t=%d, u=%d, v=%d, w=%d",i,j,k,l,a,b,c,d);
//t1t3aab
double t3_ijk_abc, t3_ijl_abc, t3_ijk_abd, t3_ijl_abd;
t3_ijk_abc = c3tot3aab_mem(i, j, k, a, b, c, nocc_corr, nvir_corr,
nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t3_ijl_abc = c3tot3aab_mem(i, j, l, a, b, c, nocc_corr, nvir_corr,
nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t3_ijk_abd = c3tot3aab_mem(i, j, k, a, b, d, nocc_corr, nvir_corr,
nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t3_ijl_abd = c3tot3aab_mem(i, j, l, a, b, d, nocc_corr, nvir_corr,
nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t1t3aab += t1[S(l, d, nvir_corr)] * t3_ijk_abc;
t1t3aab -= t1[S(k, d, nvir_corr)] * t3_ijl_abc;
t1t3aab -= t1[S(l, c, nvir_corr)] * t3_ijk_abd;
t1t3aab += t1[S(k, c, nvir_corr)] * t3_ijl_abd;
t1t3aab *= 4.0;
//t2t2aa
t2t2aa += t2aa[D(i, j, a, b, nocc_corr, nvir_corr)] * t2aa[D(k, l, c, d, nocc_corr, nvir_corr)];
t2t2aa *= 16.0;
//t2t2ab
t2t2ab += t2ab[D(i, k, a, c, nocc_corr, nvir_corr)] * t2ab[D(j, l, b, d, nocc_corr, nvir_corr)];
t2t2ab -= t2ab[D(j, k, a, c, nocc_corr, nvir_corr)] * t2ab[D(i, l, b, d, nocc_corr, nvir_corr)];
t2t2ab -= t2ab[D(i, k, b, c, nocc_corr, nvir_corr)] * t2ab[D(j, l, a, d, nocc_corr, nvir_corr)];
t2t2ab += t2ab[D(j, k, b, c, nocc_corr, nvir_corr)] * t2ab[D(i, l, a, d, nocc_corr, nvir_corr)];
t2t2ab -= t2ab[D(i, l, a, c, nocc_corr, nvir_corr)] * t2ab[D(j, k, b, d, nocc_corr, nvir_corr)];
t2t2ab += t2ab[D(j, l, a, c, nocc_corr, nvir_corr)] * t2ab[D(i, k, b, d, nocc_corr, nvir_corr)];
t2t2ab += t2ab[D(i, l, b, c, nocc_corr, nvir_corr)] * t2ab[D(j, k, a, d, nocc_corr, nvir_corr)];
t2t2ab -= t2ab[D(j, l, b, c, nocc_corr, nvir_corr)] * t2ab[D(i, k, a, d, nocc_corr, nvir_corr)];
t2t2ab -= t2ab[D(i, k, a, d, nocc_corr, nvir_corr)] * t2ab[D(j, l, b, c, nocc_corr, nvir_corr)];
t2t2ab += t2ab[D(j, k, a, d, nocc_corr, nvir_corr)] * t2ab[D(i, l, b, c, nocc_corr, nvir_corr)];
t2t2ab += t2ab[D(i, k, b, d, nocc_corr, nvir_corr)] * t2ab[D(j, l, a, c, nocc_corr, nvir_corr)];
t2t2ab -= t2ab[D(j, k, b, d, nocc_corr, nvir_corr)] * t2ab[D(i, l, a, c, nocc_corr, nvir_corr)];
t2t2ab += t2ab[D(i, l, a, d, nocc_corr, nvir_corr)] * t2ab[D(j, k, b, c, nocc_corr, nvir_corr)];
t2t2ab -= t2ab[D(j, l, a, d, nocc_corr, nvir_corr)] * t2ab[D(i, k, b, c, nocc_corr, nvir_corr)];
t2t2ab -= t2ab[D(i, l, b, d, nocc_corr, nvir_corr)] * t2ab[D(j, k, a, c, nocc_corr, nvir_corr)];
t2t2ab += t2ab[D(j, l, b, d, nocc_corr, nvir_corr)] * t2ab[D(i, k, a, c, nocc_corr, nvir_corr)];
// t2aat1t1
t2aat1t1 += t2aa[D(i, j, a, b, nocc_corr, nvir_corr)] * t1[S(k, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 -= t2aa[D(i, j, a, b, nocc_corr, nvir_corr)] * t1[S(l, c, nvir_corr)] * t1[S(k, d, nvir_corr)];
t2aat1t1 *= 8.0;
// t2abt1t1
t2abt1t1 += t2ab[D(i, k, a, c, nocc_corr, nvir_corr)] * t1[S(j, b, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2abt1t1 -= t2ab[D(j, k, a, c, nocc_corr, nvir_corr)] * t1[S(i, b, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2abt1t1 -= t2ab[D(i, k, b, c, nocc_corr, nvir_corr)] * t1[S(j, a, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2abt1t1 += t2ab[D(j, k, b, c, nocc_corr, nvir_corr)] * t1[S(i, a, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2abt1t1 -= t2ab[D(i, l, a, c, nocc_corr, nvir_corr)] * t1[S(j, b, nvir_corr)] * t1[S(k, d, nvir_corr)];
t2abt1t1 += t2ab[D(j, l, a, c, nocc_corr, nvir_corr)] * t1[S(i, b, nvir_corr)] * t1[S(k, d, nvir_corr)];
t2abt1t1 += t2ab[D(i, l, b, c, nocc_corr, nvir_corr)] * t1[S(j, a, nvir_corr)] * t1[S(k, d, nvir_corr)];
t2abt1t1 -= t2ab[D(j, l, b, c, nocc_corr, nvir_corr)] * t1[S(i, a, nvir_corr)] * t1[S(k, d, nvir_corr)];
t2abt1t1 -= t2ab[D(i, k, a, d, nocc_corr, nvir_corr)] * t1[S(j, b, nvir_corr)] * t1[S(l, c, nvir_corr)];
t2abt1t1 += t2ab[D(j, k, a, d, nocc_corr, nvir_corr)] * t1[S(i, b, nvir_corr)] * t1[S(l, c, nvir_corr)];
t2abt1t1 += t2ab[D(i, k, b, d, nocc_corr, nvir_corr)] * t1[S(j, a, nvir_corr)] * t1[S(l, c, nvir_corr)];
t2abt1t1 -= t2ab[D(j, k, b, d, nocc_corr, nvir_corr)] * t1[S(i, a, nvir_corr)] * t1[S(l, c, nvir_corr)];
t2abt1t1 += t2ab[D(i, l, a, d, nocc_corr, nvir_corr)] * t1[S(j, b, nvir_corr)] * t1[S(k, c, nvir_corr)];
t2abt1t1 -= t2ab[D(j, l, a, d, nocc_corr, nvir_corr)] * t1[S(i, b, nvir_corr)] * t1[S(k, c, nvir_corr)];
t2abt1t1 -= t2ab[D(i, l, b, d, nocc_corr, nvir_corr)] * t1[S(j, a, nvir_corr)] * t1[S(k, c, nvir_corr)];
t2abt1t1 += t2ab[D(j, l, b, d, nocc_corr, nvir_corr)] * t1[S(i, a, nvir_corr)] * t1[S(k, c, nvir_corr)];
//t1t1t1t1
t1t1t1t1 += t1[S(i, a, nvir_corr)] * t1[S(j, b, nvir_corr)] * t1[S(k, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 -= t1[S(j, a, nvir_corr)] * t1[S(i, b, nvir_corr)] * t1[S(k, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 -= t1[S(i, a, nvir_corr)] * t1[S(j, b, nvir_corr)] * t1[S(l, c, nvir_corr)] * t1[S(k, d, nvir_corr)];
t1t1t1t1 += t1[S(j, a, nvir_corr)] * t1[S(i, b, nvir_corr)] * t1[S(l, c, nvir_corr)] * t1[S(k, d, nvir_corr)];
t1t1t1t1 *= 4.0;
return t4p + t1t3aab/2.0 + t2t2aa/16.0 + t2t2ab/2.0 + t2aat1t1/4.0 + t2abt1t1 + t1t1t1t1/4.0;
}
double permut_value_xaaab(const int i, const int j, const int k, const int l,
const int a, const int b, const int c, const int d, const int nocc_corr,
const int nvir_corr, const int nocc_cas, const int nvir_cas,
const int nocc_iact, const int nocc2, const int nocc3, const double t4, double *t1,
double *t2aa, double *t2ab, double *c3aaa, double *c3aab,
double *paaa, double *paab, const double c0)
{
double t4p = 2.0 * t4;
double t1t3aaa = 0.0, t1t3aab = 0.0, t2t2 = 0.0;
double t2aat1t1 = 0.0, t2abt1t1 = 0.0, t1t1t1t1 = 0.0;
//lsh test
//printf ("p=%d, q=%d, r=%d, s=%d, t=%d, u=%d, v=%d, w=%d",i,j,k,l,a,b,c,d);
//t1t3aaa
double t3aaa;
t3aaa = c3tot3aaa_mem(i, j, k, a, b, c, nocc_corr, nvir_corr,
nocc_cas, nvir_cas, nocc_iact, nocc3, t1, t2aa, c3aaa, c0);
t1t3aaa = 2.0 * t1[S(l, d, nvir_corr)] * t3aaa;
//t1t3aab
double t3_jkl_bcd, t3_ikl_bcd, t3_ijl_bcd;
double t3_jkl_acd, t3_ikl_acd, t3_ijl_acd;
double t3_jkl_abd, t3_ikl_abd, t3_ijl_abd;
t3_jkl_bcd = c3tot3aab_mem(j, k, l, b, c, d, nocc_corr, nvir_corr,
nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t3_ikl_bcd = c3tot3aab_mem(i, k, l, b, c, d, nocc_corr, nvir_corr,
nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t3_ijl_bcd = c3tot3aab_mem(i, j, l, b, c, d, nocc_corr, nvir_corr,
nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t3_jkl_acd = c3tot3aab_mem(j, k, l, a, c, d, nocc_corr, nvir_corr,
nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t3_ikl_acd = c3tot3aab_mem(i, k, l, a, c, d, nocc_corr, nvir_corr,
nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t3_ijl_acd = c3tot3aab_mem(i, j, l, a, c, d, nocc_corr, nvir_corr,
nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t3_jkl_abd = c3tot3aab_mem(j, k, l, a, b, d, nocc_corr, nvir_corr,
nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t3_ikl_abd = c3tot3aab_mem(i, k, l, a, b, d, nocc_corr, nvir_corr,
nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t3_ijl_abd = c3tot3aab_mem(i, j, l, a, b, d, nocc_corr, nvir_corr,
nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t1t3aab += t1[S(i, a, nvir_corr)] * t3_jkl_bcd;
t1t3aab -= t1[S(j, a, nvir_corr)] * t3_ikl_bcd;
t1t3aab += t1[S(k, a, nvir_corr)] * t3_ijl_bcd;
t1t3aab -= t1[S(j, a, nvir_corr)] * t3_ikl_bcd;
t1t3aab += t1[S(i, a, nvir_corr)] * t3_jkl_bcd;
t1t3aab += t1[S(k, a, nvir_corr)] * t3_ijl_bcd;
t1t3aab -= t1[S(i, b, nvir_corr)] * t3_jkl_acd;
t1t3aab += t1[S(j, b, nvir_corr)] * t3_ikl_acd;
t1t3aab -= t1[S(k, b, nvir_corr)] * t3_ijl_acd;
t1t3aab += t1[S(j, b, nvir_corr)] * t3_ikl_acd;
t1t3aab -= t1[S(i, b, nvir_corr)] * t3_jkl_acd;
t1t3aab -= t1[S(k, b, nvir_corr)] * t3_ijl_acd;
t1t3aab += t1[S(i, c, nvir_corr)] * t3_jkl_abd;
t1t3aab -= t1[S(j, c, nvir_corr)] * t3_ikl_abd;
t1t3aab += t1[S(k, c, nvir_corr)] * t3_ijl_abd;
t1t3aab -= t1[S(j, c, nvir_corr)] * t3_ikl_abd;
t1t3aab += t1[S(i, c, nvir_corr)] * t3_jkl_abd;
t1t3aab += t1[S(k, c, nvir_corr)] * t3_ijl_abd;
t1t3aab -= t1[S(i, b, nvir_corr)] * t3_jkl_acd;
t1t3aab += t1[S(j, b, nvir_corr)] * t3_ikl_acd;
t1t3aab -= t1[S(k, b, nvir_corr)] * t3_ijl_acd;
t1t3aab += t1[S(j, b, nvir_corr)] * t3_ikl_acd;
t1t3aab -= t1[S(i, b, nvir_corr)] * t3_jkl_acd;
t1t3aab -= t1[S(k, b, nvir_corr)] * t3_ijl_acd;
t1t3aab += t1[S(i, a, nvir_corr)] * t3_jkl_bcd;
t1t3aab -= t1[S(j, a, nvir_corr)] * t3_ikl_bcd;
t1t3aab += t1[S(k, a, nvir_corr)] * t3_ijl_bcd;
t1t3aab -= t1[S(j, a, nvir_corr)] * t3_ikl_bcd;
t1t3aab += t1[S(i, a, nvir_corr)] * t3_jkl_bcd;
t1t3aab += t1[S(k, a, nvir_corr)] * t3_ijl_bcd;
t1t3aab += t1[S(i, c, nvir_corr)] * t3_jkl_abd;
t1t3aab -= t1[S(j, c, nvir_corr)] * t3_ikl_abd;
t1t3aab += t1[S(k, c, nvir_corr)] * t3_ijl_abd;
t1t3aab -= t1[S(j, c, nvir_corr)] * t3_ikl_abd;
t1t3aab += t1[S(i, c, nvir_corr)] * t3_jkl_abd;
t1t3aab += t1[S(k, c, nvir_corr)] * t3_ijl_abd;
//t2t2
t2t2 += t2aa[D(i, j, a, b, nocc_corr, nvir_corr)] * t2ab[D(k, l, c, d, nocc_corr, nvir_corr)];
t2t2 += t2aa[D(j, k, a, b, nocc_corr, nvir_corr)] * t2ab[D(i, l, c, d, nocc_corr, nvir_corr)];
t2t2 += t2aa[D(k, i, a, b, nocc_corr, nvir_corr)] * t2ab[D(j, l, c, d, nocc_corr, nvir_corr)];
t2t2 -= t2aa[D(j, i, a, b, nocc_corr, nvir_corr)] * t2ab[D(k, l, c, d, nocc_corr, nvir_corr)];
t2t2 -= t2aa[D(i, k, a, b, nocc_corr, nvir_corr)] * t2ab[D(j, l, c, d, nocc_corr, nvir_corr)];
t2t2 -= t2aa[D(k, j, a, b, nocc_corr, nvir_corr)] * t2ab[D(i, l, c, d, nocc_corr, nvir_corr)];
t2t2 += t2aa[D(i, j, b, c, nocc_corr, nvir_corr)] * t2ab[D(k, l, a, d, nocc_corr, nvir_corr)];
t2t2 += t2aa[D(j, k, b, c, nocc_corr, nvir_corr)] * t2ab[D(i, l, a, d, nocc_corr, nvir_corr)];
t2t2 += t2aa[D(k, i, b, c, nocc_corr, nvir_corr)] * t2ab[D(j, l, a, d, nocc_corr, nvir_corr)];
t2t2 -= t2aa[D(j, i, b, c, nocc_corr, nvir_corr)] * t2ab[D(k, l, a, d, nocc_corr, nvir_corr)];
t2t2 -= t2aa[D(i, k, b, c, nocc_corr, nvir_corr)] * t2ab[D(j, l, a, d, nocc_corr, nvir_corr)];
t2t2 -= t2aa[D(k, j, b, c, nocc_corr, nvir_corr)] * t2ab[D(i, l, a, d, nocc_corr, nvir_corr)];
t2t2 += t2aa[D(i, j, c, a, nocc_corr, nvir_corr)] * t2ab[D(k, l, b, d, nocc_corr, nvir_corr)];
t2t2 += t2aa[D(j, k, c, a, nocc_corr, nvir_corr)] * t2ab[D(i, l, b, d, nocc_corr, nvir_corr)];
t2t2 += t2aa[D(k, i, c, a, nocc_corr, nvir_corr)] * t2ab[D(j, l, b, d, nocc_corr, nvir_corr)];
t2t2 -= t2aa[D(j, i, c, a, nocc_corr, nvir_corr)] * t2ab[D(k, l, b, d, nocc_corr, nvir_corr)];
t2t2 -= t2aa[D(i, k, c, a, nocc_corr, nvir_corr)] * t2ab[D(j, l, b, d, nocc_corr, nvir_corr)];
t2t2 -= t2aa[D(k, j, c, a, nocc_corr, nvir_corr)] * t2ab[D(i, l, b, d, nocc_corr, nvir_corr)];
t2t2 -= t2aa[D(i, j, b, a, nocc_corr, nvir_corr)] * t2ab[D(k, l, c, d, nocc_corr, nvir_corr)];
t2t2 -= t2aa[D(j, k, b, a, nocc_corr, nvir_corr)] * t2ab[D(i, l, c, d, nocc_corr, nvir_corr)];
t2t2 -= t2aa[D(k, i, b, a, nocc_corr, nvir_corr)] * t2ab[D(j, l, c, d, nocc_corr, nvir_corr)];
t2t2 += t2aa[D(j, i, b, a, nocc_corr, nvir_corr)] * t2ab[D(k, l, c, d, nocc_corr, nvir_corr)];
t2t2 += t2aa[D(i, k, b, a, nocc_corr, nvir_corr)] * t2ab[D(j, l, c, d, nocc_corr, nvir_corr)];
t2t2 += t2aa[D(k, j, b, a, nocc_corr, nvir_corr)] * t2ab[D(i, l, c, d, nocc_corr, nvir_corr)];
t2t2 -= t2aa[D(i, j, a, c, nocc_corr, nvir_corr)] * t2ab[D(k, l, b, d, nocc_corr, nvir_corr)];
t2t2 -= t2aa[D(j, k, a, c, nocc_corr, nvir_corr)] * t2ab[D(i, l, b, d, nocc_corr, nvir_corr)];
t2t2 -= t2aa[D(k, i, a, c, nocc_corr, nvir_corr)] * t2ab[D(j, l, b, d, nocc_corr, nvir_corr)];
t2t2 += t2aa[D(j, i, a, c, nocc_corr, nvir_corr)] * t2ab[D(k, l, b, d, nocc_corr, nvir_corr)];
t2t2 += t2aa[D(i, k, a, c, nocc_corr, nvir_corr)] * t2ab[D(j, l, b, d, nocc_corr, nvir_corr)];
t2t2 += t2aa[D(k, j, a, c, nocc_corr, nvir_corr)] * t2ab[D(i, l, b, d, nocc_corr, nvir_corr)];
t2t2 -= t2aa[D(i, j, c, b, nocc_corr, nvir_corr)] * t2ab[D(k, l, a, d, nocc_corr, nvir_corr)];
t2t2 -= t2aa[D(j, k, c, b, nocc_corr, nvir_corr)] * t2ab[D(i, l, a, d, nocc_corr, nvir_corr)];
t2t2 -= t2aa[D(k, i, c, b, nocc_corr, nvir_corr)] * t2ab[D(j, l, a, d, nocc_corr, nvir_corr)];
t2t2 += t2aa[D(j, i, c, b, nocc_corr, nvir_corr)] * t2ab[D(k, l, a, d, nocc_corr, nvir_corr)];
t2t2 += t2aa[D(i, k, c, b, nocc_corr, nvir_corr)] * t2ab[D(j, l, a, d, nocc_corr, nvir_corr)];
t2t2 += t2aa[D(k, j, c, b, nocc_corr, nvir_corr)] * t2ab[D(i, l, a, d, nocc_corr, nvir_corr)];
// t2aat1t1
t2aat1t1 += t2aa[D(i, j, a, b, nocc_corr, nvir_corr)] * t1[S(k, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 += t2aa[D(j, k, a, b, nocc_corr, nvir_corr)] * t1[S(i, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 += t2aa[D(i, j, a, b, nocc_corr, nvir_corr)] * t1[S(j, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 -= t2aa[D(j, i, a, b, nocc_corr, nvir_corr)] * t1[S(k, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 -= t2aa[D(i, k, a, b, nocc_corr, nvir_corr)] * t1[S(j, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 -= t2aa[D(k, j, a, b, nocc_corr, nvir_corr)] * t1[S(i, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 += t2aa[D(i, j, b, c, nocc_corr, nvir_corr)] * t1[S(k, a, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 += t2aa[D(j, k, b, c, nocc_corr, nvir_corr)] * t1[S(i, a, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 += t2aa[D(i, j, b, c, nocc_corr, nvir_corr)] * t1[S(j, a, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 -= t2aa[D(j, i, b, c, nocc_corr, nvir_corr)] * t1[S(k, a, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 -= t2aa[D(i, k, b, c, nocc_corr, nvir_corr)] * t1[S(j, a, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 -= t2aa[D(k, j, b, c, nocc_corr, nvir_corr)] * t1[S(i, a, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 += t2aa[D(i, j, c, a, nocc_corr, nvir_corr)] * t1[S(k, b, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 += t2aa[D(j, k, c, a, nocc_corr, nvir_corr)] * t1[S(i, b, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 += t2aa[D(i, j, c, a, nocc_corr, nvir_corr)] * t1[S(j, b, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 -= t2aa[D(j, i, c, a, nocc_corr, nvir_corr)] * t1[S(k, b, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 -= t2aa[D(i, k, c, a, nocc_corr, nvir_corr)] * t1[S(j, b, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 -= t2aa[D(k, j, c, a, nocc_corr, nvir_corr)] * t1[S(i, b, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 -= t2aa[D(i, j, b, a, nocc_corr, nvir_corr)] * t1[S(k, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 -= t2aa[D(j, k, b, a, nocc_corr, nvir_corr)] * t1[S(i, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 -= t2aa[D(i, j, b, a, nocc_corr, nvir_corr)] * t1[S(j, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 += t2aa[D(j, i, b, a, nocc_corr, nvir_corr)] * t1[S(k, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 += t2aa[D(i, k, b, a, nocc_corr, nvir_corr)] * t1[S(j, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 += t2aa[D(k, j, b, a, nocc_corr, nvir_corr)] * t1[S(i, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 -= t2aa[D(i, j, a, c, nocc_corr, nvir_corr)] * t1[S(k, b, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 -= t2aa[D(j, k, a, c, nocc_corr, nvir_corr)] * t1[S(i, b, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 -= t2aa[D(i, j, a, c, nocc_corr, nvir_corr)] * t1[S(j, b, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 += t2aa[D(j, i, a, c, nocc_corr, nvir_corr)] * t1[S(k, b, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 += t2aa[D(i, k, a, c, nocc_corr, nvir_corr)] * t1[S(j, b, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 += t2aa[D(k, j, a, c, nocc_corr, nvir_corr)] * t1[S(i, b, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 -= t2aa[D(i, j, c, b, nocc_corr, nvir_corr)] * t1[S(k, a, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 -= t2aa[D(j, k, c, b, nocc_corr, nvir_corr)] * t1[S(i, a, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 -= t2aa[D(i, j, c, b, nocc_corr, nvir_corr)] * t1[S(j, a, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 += t2aa[D(j, i, c, b, nocc_corr, nvir_corr)] * t1[S(k, a, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 += t2aa[D(i, k, c, b, nocc_corr, nvir_corr)] * t1[S(j, a, nvir_corr)] * t1[S(l, d, nvir_corr)];
t2aat1t1 += t2aa[D(k, j, c, b, nocc_corr, nvir_corr)] * t1[S(i, a, nvir_corr)] * t1[S(l, d, nvir_corr)];
// t2abt1t1
t2abt1t1 += t2ab[D(i, l, a, d, nocc_corr, nvir_corr)] * t1[S(j, b, nvir_corr)] * t1[S(k, c, nvir_corr)];
t2abt1t1 += t2ab[D(j, l, a, d, nocc_corr, nvir_corr)] * t1[S(k, b, nvir_corr)] * t1[S(i, c, nvir_corr)];
t2abt1t1 += t2ab[D(i, l, a, d, nocc_corr, nvir_corr)] * t1[S(j, b, nvir_corr)] * t1[S(j, c, nvir_corr)];
t2abt1t1 -= t2ab[D(j, l, a, d, nocc_corr, nvir_corr)] * t1[S(i, b, nvir_corr)] * t1[S(k, c, nvir_corr)];
t2abt1t1 -= t2ab[D(i, l, a, d, nocc_corr, nvir_corr)] * t1[S(k, b, nvir_corr)] * t1[S(j, c, nvir_corr)];
t2abt1t1 -= t2ab[D(k, l, a, d, nocc_corr, nvir_corr)] * t1[S(j, b, nvir_corr)] * t1[S(i, c, nvir_corr)];
t2abt1t1 += t2ab[D(i, l, b, d, nocc_corr, nvir_corr)] * t1[S(j, c, nvir_corr)] * t1[S(k, a, nvir_corr)];
t2abt1t1 += t2ab[D(j, l, b, d, nocc_corr, nvir_corr)] * t1[S(k, c, nvir_corr)] * t1[S(i, a, nvir_corr)];
t2abt1t1 += t2ab[D(i, l, b, d, nocc_corr, nvir_corr)] * t1[S(j, c, nvir_corr)] * t1[S(j, a, nvir_corr)];
t2abt1t1 -= t2ab[D(j, l, b, d, nocc_corr, nvir_corr)] * t1[S(i, c, nvir_corr)] * t1[S(k, a, nvir_corr)];
t2abt1t1 -= t2ab[D(i, l, b, d, nocc_corr, nvir_corr)] * t1[S(k, c, nvir_corr)] * t1[S(j, a, nvir_corr)];
t2abt1t1 -= t2ab[D(k, l, b, d, nocc_corr, nvir_corr)] * t1[S(j, c, nvir_corr)] * t1[S(i, a, nvir_corr)];
t2abt1t1 += t2ab[D(i, l, c, d, nocc_corr, nvir_corr)] * t1[S(j, a, nvir_corr)] * t1[S(k, b, nvir_corr)];
t2abt1t1 += t2ab[D(j, l, c, d, nocc_corr, nvir_corr)] * t1[S(k, a, nvir_corr)] * t1[S(i, b, nvir_corr)];
t2abt1t1 += t2ab[D(i, l, c, d, nocc_corr, nvir_corr)] * t1[S(j, a, nvir_corr)] * t1[S(j, b, nvir_corr)];
t2abt1t1 -= t2ab[D(j, l, c, d, nocc_corr, nvir_corr)] * t1[S(i, a, nvir_corr)] * t1[S(k, b, nvir_corr)];
t2abt1t1 -= t2ab[D(i, l, c, d, nocc_corr, nvir_corr)] * t1[S(k, a, nvir_corr)] * t1[S(j, b, nvir_corr)];
t2abt1t1 -= t2ab[D(k, l, c, d, nocc_corr, nvir_corr)] * t1[S(j, a, nvir_corr)] * t1[S(i, b, nvir_corr)];
t2abt1t1 -= t2ab[D(i, l, b, d, nocc_corr, nvir_corr)] * t1[S(j, a, nvir_corr)] * t1[S(k, c, nvir_corr)];
t2abt1t1 -= t2ab[D(j, l, b, d, nocc_corr, nvir_corr)] * t1[S(k, a, nvir_corr)] * t1[S(i, c, nvir_corr)];
t2abt1t1 -= t2ab[D(i, l, b, d, nocc_corr, nvir_corr)] * t1[S(j, a, nvir_corr)] * t1[S(j, c, nvir_corr)];
t2abt1t1 += t2ab[D(j, l, b, d, nocc_corr, nvir_corr)] * t1[S(i, a, nvir_corr)] * t1[S(k, c, nvir_corr)];
t2abt1t1 += t2ab[D(i, l, b, d, nocc_corr, nvir_corr)] * t1[S(k, a, nvir_corr)] * t1[S(j, c, nvir_corr)];
t2abt1t1 += t2ab[D(k, l, b, d, nocc_corr, nvir_corr)] * t1[S(j, a, nvir_corr)] * t1[S(i, c, nvir_corr)];
t2abt1t1 -= t2ab[D(i, l, a, d, nocc_corr, nvir_corr)] * t1[S(j, c, nvir_corr)] * t1[S(k, b, nvir_corr)];
t2abt1t1 -= t2ab[D(j, l, a, d, nocc_corr, nvir_corr)] * t1[S(k, c, nvir_corr)] * t1[S(i, b, nvir_corr)];
t2abt1t1 -= t2ab[D(i, l, a, d, nocc_corr, nvir_corr)] * t1[S(j, c, nvir_corr)] * t1[S(j, b, nvir_corr)];
t2abt1t1 += t2ab[D(j, l, a, d, nocc_corr, nvir_corr)] * t1[S(i, c, nvir_corr)] * t1[S(k, b, nvir_corr)];
t2abt1t1 += t2ab[D(i, l, a, d, nocc_corr, nvir_corr)] * t1[S(k, c, nvir_corr)] * t1[S(j, b, nvir_corr)];
t2abt1t1 += t2ab[D(k, l, a, d, nocc_corr, nvir_corr)] * t1[S(j, c, nvir_corr)] * t1[S(i, b, nvir_corr)];
t2abt1t1 -= t2ab[D(i, l, c, d, nocc_corr, nvir_corr)] * t1[S(j, b, nvir_corr)] * t1[S(k, a, nvir_corr)];
t2abt1t1 -= t2ab[D(j, l, c, d, nocc_corr, nvir_corr)] * t1[S(k, b, nvir_corr)] * t1[S(i, a, nvir_corr)];
t2abt1t1 -= t2ab[D(i, l, c, d, nocc_corr, nvir_corr)] * t1[S(j, b, nvir_corr)] * t1[S(j, a, nvir_corr)];
t2abt1t1 += t2ab[D(j, l, c, d, nocc_corr, nvir_corr)] * t1[S(i, b, nvir_corr)] * t1[S(k, a, nvir_corr)];
t2abt1t1 += t2ab[D(i, l, c, d, nocc_corr, nvir_corr)] * t1[S(k, b, nvir_corr)] * t1[S(j, a, nvir_corr)];
t2abt1t1 += t2ab[D(k, l, c, d, nocc_corr, nvir_corr)] * t1[S(j, b, nvir_corr)] * t1[S(i, a, nvir_corr)];
//t1t1t1t1
t1t1t1t1 += t1[S(i, a, nvir_corr)] * t1[S(j, b, nvir_corr)] * t1[S(k, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 += t1[S(j, a, nvir_corr)] * t1[S(k, b, nvir_corr)] * t1[S(i, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 += t1[S(k, a, nvir_corr)] * t1[S(i, b, nvir_corr)] * t1[S(j, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 -= t1[S(j, a, nvir_corr)] * t1[S(i, b, nvir_corr)] * t1[S(k, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 -= t1[S(i, a, nvir_corr)] * t1[S(k, b, nvir_corr)] * t1[S(j, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 -= t1[S(k, a, nvir_corr)] * t1[S(j, b, nvir_corr)] * t1[S(i, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 += t1[S(i, b, nvir_corr)] * t1[S(j, c, nvir_corr)] * t1[S(k, a, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 += t1[S(j, b, nvir_corr)] * t1[S(k, c, nvir_corr)] * t1[S(i, a, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 += t1[S(k, b, nvir_corr)] * t1[S(i, c, nvir_corr)] * t1[S(j, a, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 -= t1[S(j, b, nvir_corr)] * t1[S(i, c, nvir_corr)] * t1[S(k, a, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 -= t1[S(i, b, nvir_corr)] * t1[S(k, c, nvir_corr)] * t1[S(j, a, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 -= t1[S(k, b, nvir_corr)] * t1[S(j, c, nvir_corr)] * t1[S(i, a, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 += t1[S(i, c, nvir_corr)] * t1[S(j, a, nvir_corr)] * t1[S(k, b, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 += t1[S(j, c, nvir_corr)] * t1[S(k, a, nvir_corr)] * t1[S(i, b, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 += t1[S(k, c, nvir_corr)] * t1[S(i, a, nvir_corr)] * t1[S(j, b, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 -= t1[S(j, c, nvir_corr)] * t1[S(i, a, nvir_corr)] * t1[S(k, b, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 -= t1[S(i, c, nvir_corr)] * t1[S(k, a, nvir_corr)] * t1[S(j, b, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 -= t1[S(k, c, nvir_corr)] * t1[S(j, a, nvir_corr)] * t1[S(i, b, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 -= t1[S(i, b, nvir_corr)] * t1[S(j, a, nvir_corr)] * t1[S(k, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 -= t1[S(j, b, nvir_corr)] * t1[S(k, a, nvir_corr)] * t1[S(i, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 -= t1[S(k, b, nvir_corr)] * t1[S(i, a, nvir_corr)] * t1[S(j, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 += t1[S(j, b, nvir_corr)] * t1[S(i, a, nvir_corr)] * t1[S(k, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 += t1[S(i, b, nvir_corr)] * t1[S(k, a, nvir_corr)] * t1[S(j, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 += t1[S(k, b, nvir_corr)] * t1[S(j, a, nvir_corr)] * t1[S(i, c, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 -= t1[S(i, a, nvir_corr)] * t1[S(j, c, nvir_corr)] * t1[S(k, b, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 -= t1[S(j, a, nvir_corr)] * t1[S(k, c, nvir_corr)] * t1[S(i, b, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 -= t1[S(k, a, nvir_corr)] * t1[S(i, c, nvir_corr)] * t1[S(j, b, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 += t1[S(j, a, nvir_corr)] * t1[S(i, c, nvir_corr)] * t1[S(k, b, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 += t1[S(i, a, nvir_corr)] * t1[S(k, c, nvir_corr)] * t1[S(j, b, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 += t1[S(k, a, nvir_corr)] * t1[S(j, c, nvir_corr)] * t1[S(i, b, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 -= t1[S(i, c, nvir_corr)] * t1[S(j, b, nvir_corr)] * t1[S(k, a, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 -= t1[S(j, c, nvir_corr)] * t1[S(k, b, nvir_corr)] * t1[S(i, a, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 -= t1[S(k, c, nvir_corr)] * t1[S(i, b, nvir_corr)] * t1[S(j, a, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 += t1[S(j, c, nvir_corr)] * t1[S(i, b, nvir_corr)] * t1[S(k, a, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 += t1[S(i, c, nvir_corr)] * t1[S(k, b, nvir_corr)] * t1[S(j, a, nvir_corr)] * t1[S(l, d, nvir_corr)];
t1t1t1t1 += t1[S(k, c, nvir_corr)] * t1[S(j, b, nvir_corr)] * t1[S(i, a, nvir_corr)] * t1[S(l, d, nvir_corr)];
return t4p + t1t3aaa + t1t3aab/2.0 + t2t2 + t2aat1t1/2.0 + t2abt1t1 + t1t1t1t1/3.0;
}
void denom_t3_shci(double *t1, double *t2aa, double *t2ab, const int nc, const int nocc, const int nvir, const double numzero, const double c0, double denom)
{
const int t1size = nocc*nvir;
#pragma omp parallel default(none) \
shared(t1, t2aa, t2ab, denom)
{
int p, q, r, t, u, v, itmp, it, at;
double t3, parity, scale;
int i;
#pragma omp for reduction(+ : denom)
for (i=0; i<omp_get_num_threads(); i++){
char line[255], typ[4];
//char *ptr;
char s0[20]="t3.";
char s1[4];
double xaaa, xaab;
sprintf(s1, "%d", i);
char* filename = strcat(s0,s1);
FILE *fp = fopen(filename, "r");
//printf ("filename = %s\n",filename);
fp = fopen(filename, "r");
if (fp) {
while ( !feof(fp) ){
fscanf(fp, "%c%c%c,%s\n", &(typ[0]), &(typ[1]), &(typ[2]), line);
fscanf(fp, "%lf\n", &t3);
if (strncmp(typ, "aaa", 3) == 0 && fabs(t3) > numzero){
sscanf(line,"%d,%d,%d,%d,%d,%d",&p,&q,&r,&t,&u,&v);
p += nc;
q += nc;
r += nc;
t += - nocc + nc;
u += - nocc + nc;
v += - nocc + nc;
parity = parity_ci_to_cc(p+q+r, 3, nocc);
// interm norm of c3
t3 = parity * t3 / c0;
// extract t3
t3-= t1xt2aaa (p, q, r, t, u, v, nocc, nvir, t1, t2aa);
t3-= t1xt1xt1aaa (p, q, r, t, u, v, nocc, nvir, t1);
xaaa = permut_value_xaaa (p, q, r, t, u, v, nocc, nvir, t3, t1, t2aa);
denom += t3 * xaaa;
}
else if (strncmp(typ, "aab", 3) == 0 && fabs(t3) > numzero){
sscanf(line,"%d,%d,%d,%d,%d,%d",&p,&q,&t,&u,&r,&v);
//printf("'%s, %d, %d, %d, %d, %d, %d, %15.8f'\n", typ, p,q,t,u,r,v,t3);
p += nc;
q += nc;
r += nc;
t += - nocc + nc;
u += - nocc + nc;
v += - nocc + nc;
//lsh test
//if(!(p == 2 && q == 3 && r == 3 && \
// t == 0 && u == 1 && v == 1)) continue;
//printf("c3 in OTF: %15.8f\n",t3);
parity = parity_ci_to_cc(p+q, 2, nocc);
parity *= parity_ci_to_cc(r, 1, nocc);
// interm norm of c3
t3 = parity * t3 / c0;
// extract t3
t3-= t1xt2aab(p, q, r, t, u, v, nocc, nvir, t1, t2aa, t2ab);
t3-= t1xt1xt1aab(p, q, r, t, u, v, nocc, nvir, t1);
xaab = permut_value_xaab (p, q, r, t, u, v, nocc, nvir, t3, t1, t2aa, t2ab);
denom += t3 * xaab;
}
}
fclose(fp);
}
else
{
// error message
}
}
}
}
void denom_t4_shci(double *t1, double *t2aa, double *t2ab, double *c3aaa, double *c3aab, double *paaa, double *paab, const int nocc_iact, const int nocc_corr, const int nvir_corr, const int nocc_cas, const int nvir_cas, const double numzero, const double c0, double denom)
{
//double numzero = 1e-7;
//numzero = 1e-3;
const int nocc2 = (int) nocc_cas*(nocc_cas-1)/2;
const int nocc3 = (int) nocc_cas*(nocc_cas-1)*(nocc_cas-2)/6;
const int t2size = nocc_corr*nocc_corr*nvir_corr*nvir_corr;
#pragma omp parallel default(none) \
shared(t1, t2aa, t2ab, c3aaa, c3aab, paaa, paab, denom)
{
double t4, parity, scale, xaaab, xaabb;
int p, q, r, s, t, u, v, w, itmp, it, jt, at, bt, ifile;
char typ[4], line[255];
int i;
#pragma omp for reduction(+ : denom)
for (i=0; i<omp_get_num_threads(); i++){
char s0[20]="t4.";
char s1[4];
sprintf(s1, "%d", i);
char* filename = strcat(s0,s1);
FILE *fp = fopen(filename, "r");
//printf ("filename = %s\n",filename);
if (fp) {
while ( !feof(fp) ){
fscanf(fp, "%c%c%c%c,%s\n", &(typ[0]), &(typ[1]), &(typ[2]), &(typ[3]), line);
fscanf(fp, "%lf\n", &t4);
//lsh test
//printf ("typ=%c%c%c%c line=%s\n",typ[0],typ[1],typ[2],typ[3], line);
if (strncmp(typ, "aabb", 4) == 0 && fabs(t4) > numzero){
sscanf(line,"%d,%d,%d,%d,%d,%d,%d,%d",&p,&q,&t,&u,&r,&s,&v,&w);
p += nocc_iact;
q += nocc_iact;
r += nocc_iact;
s += nocc_iact;
t += - nocc_cas;
u += - nocc_cas;
v += - nocc_cas;
w += - nocc_cas;
// if(!(p == 2 && q == 3 && t == 0 && u == 1 && \
r == 2 && s == 3 && v == 0 && w == 1)) continue;
// if(!((p == 2 && q == 3 && t == 0 && u == 4 && \
// r == 1 && s == 3 && v == 0 && w == 4) || \
// (p == 1 && q == 3 && t == 0 && u == 4 && \
// r == 2 && s == 3 && v == 0 && w == 4)) ) continue;
// if(!(p == 2 && q == 3 && t == 0 && u == 2 && \
// r == 2 && s == 3 && v == 0 && w == 2)) continue;
parity = parity_ci_to_cc(p+q, 2, nocc_corr);
parity *= parity_ci_to_cc(r+s, 2, nocc_corr);
// interm norm of c4
t4 = parity * t4 / c0;
// lsh test
// extract t4
t4-= t1xc3aabb_mem(p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, t1, t2aa, t2ab, c3aab, c0);
t4-= t2xt2aabb(p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, t2aa, t2ab);
t4-= t1xt1xt2aabb(p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, t1, t2aa, t2ab);
t4-= t1xt1xt1xt1aabb(p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, t1); // may have bug
xaabb = permut_value_xaabb (p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, t4, t1, t2aa, t2ab, c3aab, paab, c0);
denom += t4 * xaabb;
}
else if (strncmp(typ, "aaab", 4) == 0 && fabs(t4) > numzero){
//lsh test
//printf ("typ=%c%c%c%c line=%s c4=%lf\n",typ[0],typ[1],typ[2],typ[3], line, t4);
sscanf(line,"%d,%d,%d,%d,%d,%d,%d,%d",&p,&q,&r,&t,&u,&v,&s,&w);
p += nocc_iact;
q += nocc_iact;
r += nocc_iact;
s += nocc_iact;
t += - nocc_cas;
u += - nocc_cas;
v += - nocc_cas;
w += - nocc_cas;
//printf ("p=%d, q=%d, r=%d, t=%d, u=%d, v=%d, s=%d, w=%d",p,q,r,t,u,v,s,w);
parity = parity_ci_to_cc(p+q+r, 3, nocc_corr);
parity *= parity_ci_to_cc(s, 1, nocc_corr);
// interm norm of c4
t4 = parity * t4 / c0;
// extract t4
t4-= t1xc3aaab_mem (p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, nocc3, t1, t2aa, t2ab, c3aaa, c3aab, c0);
t4-= t2xt2aaab (p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, t2aa, t2ab); // may have 1e-3 bug
t4-= t1xt1xt2aaab (p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, t1, t2aa, t2ab); // may have 1e-5 bug
t4-= t1xt1xt1xt1aaab (p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, t1); // may have 1e-6 bug
xaaab = permut_value_xaaab (p, q, r, s, t, u, v, w, nocc_corr, nvir_corr, nocc_cas, nvir_cas, nocc_iact, nocc2, nocc3, t4, t1, t2aa, t2ab, c3aaa, c3aab, paaa, paab, c0);
denom += t4 * xaaab;
}
}
fclose(fp);
}
}
}
}
|
GB_binop__rdiv_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__rdiv_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__rdiv_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_int32)
// A*D function (colscale): GB (_AxD__rdiv_int32)
// D*A function (rowscale): GB (_DxB__rdiv_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_int32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_int32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_int32)
// C=scalar+B GB (_bind1st__rdiv_int32)
// C=scalar+B' GB (_bind1st_tran__rdiv_int32)
// C=A+scalar GB (_bind2nd__rdiv_int32)
// C=A'+scalar GB (_bind2nd_tran__rdiv_int32)
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 32)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_SIGNED (y, x, 32) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_INT32 || GxB_NO_RDIV_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_int32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_int32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rdiv_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rdiv_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_SIGNED (bij, x, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_SIGNED (y, aij, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (aij, x, 32) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_SIGNED (y, aij, 32) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
coordinate_common.h | /*!
* Copyright 2018 by Contributors
* \author Rory Mitchell
*/
#pragma once
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include <limits>
#include "xgboost/data.h"
#include "xgboost/parameter.h"
#include "./param.h"
#include "../gbm/gblinear_model.h"
#include "../common/random.h"
namespace xgboost {
namespace linear {
struct CoordinateParam : public XGBoostParameter<CoordinateParam> {
int top_k;
DMLC_DECLARE_PARAMETER(CoordinateParam) {
DMLC_DECLARE_FIELD(top_k)
.set_lower_bound(0)
.set_default(0)
.describe("The number of top features to select in 'thrifty' feature_selector. "
"The value of zero means using all the features.");
}
};
/**
* \brief Calculate change in weight for a given feature. Applies l1/l2 penalty normalised by the
* number of training instances.
*
* \param sum_grad The sum gradient.
* \param sum_hess The sum hess.
* \param w The weight.
* \param reg_alpha Unnormalised L1 penalty.
* \param reg_lambda Unnormalised L2 penalty.
*
* \return The weight update.
*/
inline double CoordinateDelta(double sum_grad, double sum_hess, double w,
double reg_alpha, double reg_lambda) {
if (sum_hess < 1e-5f) return 0.0f;
const double sum_grad_l2 = sum_grad + reg_lambda * w;
const double sum_hess_l2 = sum_hess + reg_lambda;
const double tmp = w - sum_grad_l2 / sum_hess_l2;
if (tmp >= 0) {
return std::max(-(sum_grad_l2 + reg_alpha) / sum_hess_l2, -w);
} else {
return std::min(-(sum_grad_l2 - reg_alpha) / sum_hess_l2, -w);
}
}
/**
* \brief Calculate update to bias.
*
* \param sum_grad The sum gradient.
* \param sum_hess The sum hess.
*
* \return The weight update.
*/
inline double CoordinateDeltaBias(double sum_grad, double sum_hess) {
return -sum_grad / sum_hess;
}
/**
* \brief Get the gradient with respect to a single feature.
*
* \param group_idx Zero-based index of the group.
* \param num_group Number of groups.
* \param fidx The target feature.
* \param gpair Gradients.
* \param p_fmat The feature matrix.
*
* \return The gradient and diagonal Hessian entry for a given feature.
*/
inline std::pair<double, double> GetGradient(int group_idx, int num_group, int fidx,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat) {
double sum_grad = 0.0, sum_hess = 0.0;
for (const auto &batch : p_fmat->GetBatches<CSCPage>()) {
auto col = batch[fidx];
const auto ndata = static_cast<bst_omp_uint>(col.size());
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_float v = col[j].fvalue;
auto &p = gpair[col[j].index * num_group + group_idx];
if (p.GetHess() < 0.0f) continue;
sum_grad += p.GetGrad() * v;
sum_hess += p.GetHess() * v * v;
}
}
return std::make_pair(sum_grad, sum_hess);
}
/**
* \brief Get the gradient with respect to a single feature. Row-wise multithreaded.
*
* \param group_idx Zero-based index of the group.
* \param num_group Number of groups.
* \param fidx The target feature.
* \param gpair Gradients.
* \param p_fmat The feature matrix.
*
* \return The gradient and diagonal Hessian entry for a given feature.
*/
inline std::pair<double, double> GetGradientParallel(int group_idx, int num_group, int fidx,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat) {
double sum_grad = 0.0, sum_hess = 0.0;
for (const auto &batch : p_fmat->GetBatches<CSCPage>()) {
auto col = batch[fidx];
const auto ndata = static_cast<bst_omp_uint>(col.size());
#pragma omp parallel for schedule(static) reduction(+ : sum_grad, sum_hess)
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_float v = col[j].fvalue;
auto &p = gpair[col[j].index * num_group + group_idx];
if (p.GetHess() < 0.0f) continue;
sum_grad += p.GetGrad() * v;
sum_hess += p.GetHess() * v * v;
}
}
return std::make_pair(sum_grad, sum_hess);
}
/**
* \brief Get the gradient with respect to the bias. Row-wise multithreaded.
*
* \param group_idx Zero-based index of the group.
* \param num_group Number of groups.
* \param gpair Gradients.
* \param p_fmat The feature matrix.
*
* \return The gradient and diagonal Hessian entry for the bias.
*/
inline std::pair<double, double> GetBiasGradientParallel(int group_idx, int num_group,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat) {
double sum_grad = 0.0, sum_hess = 0.0;
const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_);
#pragma omp parallel for schedule(static) reduction(+ : sum_grad, sum_hess)
for (bst_omp_uint i = 0; i < ndata; ++i) {
auto &p = gpair[i * num_group + group_idx];
if (p.GetHess() >= 0.0f) {
sum_grad += p.GetGrad();
sum_hess += p.GetHess();
}
}
return std::make_pair(sum_grad, sum_hess);
}
/**
* \brief Updates the gradient vector with respect to a change in weight.
*
* \param fidx The feature index.
* \param group_idx Zero-based index of the group.
* \param num_group Number of groups.
* \param dw The change in weight.
* \param in_gpair The gradient vector to be updated.
* \param p_fmat The input feature matrix.
*/
inline void UpdateResidualParallel(int fidx, int group_idx, int num_group,
float dw, std::vector<GradientPair> *in_gpair,
DMatrix *p_fmat) {
if (dw == 0.0f) return;
for (const auto &batch : p_fmat->GetBatches<CSCPage>()) {
auto col = batch[fidx];
// update grad value
const auto num_row = static_cast<bst_omp_uint>(col.size());
#pragma omp parallel for schedule(static)
for (bst_omp_uint j = 0; j < num_row; ++j) {
GradientPair &p = (*in_gpair)[col[j].index * num_group + group_idx];
if (p.GetHess() < 0.0f) continue;
p += GradientPair(p.GetHess() * col[j].fvalue * dw, 0);
}
}
}
/**
* \brief Updates the gradient vector based on a change in the bias.
*
* \param group_idx Zero-based index of the group.
* \param num_group Number of groups.
* \param dbias The change in bias.
* \param in_gpair The gradient vector to be updated.
* \param p_fmat The input feature matrix.
*/
inline void UpdateBiasResidualParallel(int group_idx, int num_group, float dbias,
std::vector<GradientPair> *in_gpair,
DMatrix *p_fmat) {
if (dbias == 0.0f) return;
const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_);
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < ndata; ++i) {
GradientPair &g = (*in_gpair)[i * num_group + group_idx];
if (g.GetHess() < 0.0f) continue;
g += GradientPair(g.GetHess() * dbias, 0);
}
}
/**
* \brief Abstract class for stateful feature selection or ordering
* in coordinate descent algorithms.
*/
class FeatureSelector {
public:
/*! \brief factory method */
static FeatureSelector *Create(int choice);
/*! \brief virtual destructor */
virtual ~FeatureSelector() = default;
/**
* \brief Setting up the selector state prior to looping through features.
*
* \param model The model.
* \param gpair The gpair.
* \param p_fmat The feature matrix.
* \param alpha Regularisation alpha.
* \param lambda Regularisation lambda.
* \param param A parameter with algorithm-dependent use.
*/
virtual void Setup(const gbm::GBLinearModel &model,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat,
float alpha, float lambda, int param) {}
/**
* \brief Select next coordinate to update.
*
* \param iteration The iteration in a loop through features
* \param model The model.
* \param group_idx Zero-based index of the group.
* \param gpair The gpair.
* \param p_fmat The feature matrix.
* \param alpha Regularisation alpha.
* \param lambda Regularisation lambda.
*
* \return The index of the selected feature. -1 indicates none selected.
*/
virtual int NextFeature(int iteration,
const gbm::GBLinearModel &model,
int group_idx,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda) = 0;
};
/**
* \brief Deterministic selection by cycling through features one at a time.
*/
class CyclicFeatureSelector : public FeatureSelector {
public:
int NextFeature(int iteration, const gbm::GBLinearModel &model,
int group_idx, const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda) override {
return iteration % model.learner_model_param->num_feature;
}
};
/**
* \brief Similar to Cyclic but with random feature shuffling prior to each update.
* \note Its randomness is controllable by setting a random seed.
*/
class ShuffleFeatureSelector : public FeatureSelector {
public:
void Setup(const gbm::GBLinearModel &model,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda, int param) override {
if (feat_index_.size() == 0) {
feat_index_.resize(model.learner_model_param->num_feature);
std::iota(feat_index_.begin(), feat_index_.end(), 0);
}
std::shuffle(feat_index_.begin(), feat_index_.end(), common::GlobalRandom());
}
int NextFeature(int iteration, const gbm::GBLinearModel &model,
int group_idx, const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda) override {
return feat_index_[iteration % model.learner_model_param->num_feature];
}
protected:
std::vector<bst_uint> feat_index_;
};
/**
* \brief A random (with replacement) coordinate selector.
* \note Its randomness is controllable by setting a random seed.
*/
class RandomFeatureSelector : public FeatureSelector {
public:
int NextFeature(int iteration, const gbm::GBLinearModel &model,
int group_idx, const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda) override {
return common::GlobalRandom()() % model.learner_model_param->num_feature;
}
};
/**
* \brief Select coordinate with the greatest gradient magnitude.
* \note It has O(num_feature^2) complexity. It is fully deterministic.
*
* \note It allows restricting the selection to top_k features per group with
* the largest magnitude of univariate weight change, by passing the top_k value
* through the `param` argument of Setup(). That would reduce the complexity to
* O(num_feature*top_k).
*/
class GreedyFeatureSelector : public FeatureSelector {
public:
void Setup(const gbm::GBLinearModel &model,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda, int param) override {
top_k_ = static_cast<bst_uint>(param);
const bst_uint ngroup = model.learner_model_param->num_output_group;
if (param <= 0) top_k_ = std::numeric_limits<bst_uint>::max();
if (counter_.size() == 0) {
counter_.resize(ngroup);
gpair_sums_.resize(model.learner_model_param->num_feature * ngroup);
}
for (bst_uint gid = 0u; gid < ngroup; ++gid) {
counter_[gid] = 0u;
}
}
int NextFeature(int iteration, const gbm::GBLinearModel &model,
int group_idx, const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda) override {
// k-th selected feature for a group
auto k = counter_[group_idx]++;
// stop after either reaching top-K or going through all the features in a group
if (k >= top_k_ || counter_[group_idx] == model.learner_model_param->num_feature) return -1;
const int ngroup = model.learner_model_param->num_output_group;
const bst_omp_uint nfeat = model.learner_model_param->num_feature;
// Calculate univariate gradient sums
std::fill(gpair_sums_.begin(), gpair_sums_.end(), std::make_pair(0., 0.));
for (const auto &batch : p_fmat->GetBatches<CSCPage>()) {
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < nfeat; ++i) {
const auto col = batch[i];
const bst_uint ndata = col.size();
auto &sums = gpair_sums_[group_idx * nfeat + i];
for (bst_uint j = 0u; j < ndata; ++j) {
const bst_float v = col[j].fvalue;
auto &p = gpair[col[j].index * ngroup + group_idx];
if (p.GetHess() < 0.f) continue;
sums.first += p.GetGrad() * v;
sums.second += p.GetHess() * v * v;
}
}
}
// Find a feature with the largest magnitude of weight change
int best_fidx = 0;
double best_weight_update = 0.0f;
for (bst_omp_uint fidx = 0; fidx < nfeat; ++fidx) {
auto &s = gpair_sums_[group_idx * nfeat + fidx];
float dw = std::abs(static_cast<bst_float>(
CoordinateDelta(s.first, s.second, model[fidx][group_idx], alpha, lambda)));
if (dw > best_weight_update) {
best_weight_update = dw;
best_fidx = fidx;
}
}
return best_fidx;
}
protected:
bst_uint top_k_;
std::vector<bst_uint> counter_;
std::vector<std::pair<double, double>> gpair_sums_;
};
/**
* \brief Thrifty, approximately-greedy feature selector.
*
* \note Prior to cyclic updates, reorders features in descending magnitude of
* their univariate weight changes. This operation is multithreaded and is a
* linear complexity approximation of the quadratic greedy selection.
*
* \note It allows restricting the selection to top_k features per group with
* the largest magnitude of univariate weight change, by passing the top_k value
* through the `param` argument of Setup().
*/
class ThriftyFeatureSelector : public FeatureSelector {
public:
void Setup(const gbm::GBLinearModel &model,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda, int param) override {
top_k_ = static_cast<bst_uint>(param);
if (param <= 0) top_k_ = std::numeric_limits<bst_uint>::max();
const bst_uint ngroup = model.learner_model_param->num_output_group;
const bst_omp_uint nfeat = model.learner_model_param->num_feature;
if (deltaw_.size() == 0) {
deltaw_.resize(nfeat * ngroup);
sorted_idx_.resize(nfeat * ngroup);
counter_.resize(ngroup);
gpair_sums_.resize(nfeat * ngroup);
}
// Calculate univariate gradient sums
std::fill(gpair_sums_.begin(), gpair_sums_.end(), std::make_pair(0., 0.));
for (const auto &batch : p_fmat->GetBatches<CSCPage>()) {
// column-parallel is usually faster than row-parallel
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < nfeat; ++i) {
const auto col = batch[i];
const bst_uint ndata = col.size();
for (bst_uint gid = 0u; gid < ngroup; ++gid) {
auto &sums = gpair_sums_[gid * nfeat + i];
for (bst_uint j = 0u; j < ndata; ++j) {
const bst_float v = col[j].fvalue;
auto &p = gpair[col[j].index * ngroup + gid];
if (p.GetHess() < 0.f) continue;
sums.first += p.GetGrad() * v;
sums.second += p.GetHess() * v * v;
}
}
}
}
// rank by descending weight magnitude within the groups
std::fill(deltaw_.begin(), deltaw_.end(), 0.f);
std::iota(sorted_idx_.begin(), sorted_idx_.end(), 0);
bst_float *pdeltaw = &deltaw_[0];
for (bst_uint gid = 0u; gid < ngroup; ++gid) {
// Calculate univariate weight changes
for (bst_omp_uint i = 0; i < nfeat; ++i) {
auto ii = gid * nfeat + i;
auto &s = gpair_sums_[ii];
deltaw_[ii] = static_cast<bst_float>(CoordinateDelta(
s.first, s.second, model[i][gid], alpha, lambda));
}
// sort in descending order of deltaw abs values
auto start = sorted_idx_.begin() + gid * nfeat;
std::sort(start, start + nfeat,
[pdeltaw](size_t i, size_t j) {
return std::abs(*(pdeltaw + i)) > std::abs(*(pdeltaw + j));
});
counter_[gid] = 0u;
}
}
int NextFeature(int iteration, const gbm::GBLinearModel &model,
int group_idx, const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda) override {
// k-th selected feature for a group
auto k = counter_[group_idx]++;
// stop after either reaching top-N or going through all the features in a group
if (k >= top_k_ || counter_[group_idx] == model.learner_model_param->num_feature) return -1;
// note that sorted_idx stores the "long" indices
const size_t grp_offset = group_idx * model.learner_model_param->num_feature;
return static_cast<int>(sorted_idx_[grp_offset + k] - grp_offset);
}
protected:
bst_uint top_k_;
std::vector<bst_float> deltaw_;
std::vector<size_t> sorted_idx_;
std::vector<bst_uint> counter_;
std::vector<std::pair<double, double>> gpair_sums_;
};
inline FeatureSelector *FeatureSelector::Create(int choice) {
switch (choice) {
case kCyclic:
return new CyclicFeatureSelector();
case kShuffle:
return new ShuffleFeatureSelector();
case kThrifty:
return new ThriftyFeatureSelector();
case kGreedy:
return new GreedyFeatureSelector();
case kRandom:
return new RandomFeatureSelector();
default:
LOG(FATAL) << "unknown coordinate selector: " << choice;
}
return nullptr;
}
} // namespace linear
} // namespace xgboost
|
bfs_one_sided.c | /* Copyright (C) 2010 The Trustees of Indiana University. */
/* */
/* Use, modification and distribution is subject to the Boost Software */
/* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */
/* http://www.boost.org/LICENSE_1_0.txt) */
/* */
/* Authors: Jeremiah Willcock */
/* Andrew Lumsdaine */
#include "common.h"
#include "oned_csr.h"
#include <mpi.h>
#include <stdint.h>
#include <inttypes.h>
#include <stdlib.h>
#include <stddef.h>
#include <string.h>
#include <limits.h>
static oned_csr_graph g;
void make_graph_data_structure(const tuple_graph* const tg) {
convert_graph_to_oned_csr(tg, &g);
}
void free_graph_data_structure(void) {
free_oned_csr_graph(&g);
}
int bfs_writes_depth_map(void) {
return 0;
}
/* This BFS represents its queues as bitmaps and uses some data representation
* tricks to fit with the use of MPI one-sided operations. It is not much
* faster than the standard version on the machines I have tested it on, but
* systems that have good RDMA hardware and good MPI one-sided implementations
* might get better performance from it. This code might also be good to
* translate to UPC, Co-array Fortran, SHMEM, or GASNet since those systems are
* more designed for one-sided remote memory operations. */
void run_bfs(int64_t root, int64_t* pred) {
const size_t nlocalverts = g.nlocalverts;
const int64_t nglobalverts = g.nglobalverts;
/* Set up a second predecessor map so we can read from one and modify the
* other. */
int64_t* orig_pred = pred;
int64_t* pred2 = (int64_t*)xMPI_Alloc_mem(nlocalverts * sizeof(int64_t));
/* The queues (old and new) are represented as bitmaps. Each bit in the
* queue bitmap says to check elts_per_queue_bit elements in the predecessor
* map for vertices that need to be visited. In other words, the queue
* bitmap is an overapproximation of the actual queue; because MPI_Accumulate
* does not get any information on the result of the update, sometimes
* elements are also added to the bitmap when they were actually already
* black. Because of this, the predecessor map needs to be checked to be
* sure a given vertex actually needs to be processed. */
const int elts_per_queue_bit = 4;
const int ulong_bits = sizeof(unsigned long) * CHAR_BIT;
int64_t queue_nbits = (nlocalverts + elts_per_queue_bit - 1) / elts_per_queue_bit;
int64_t queue_nwords = (queue_nbits + ulong_bits - 1) / ulong_bits;
unsigned long* queue_bitmap1 = (unsigned long*)xMPI_Alloc_mem(queue_nwords * sizeof(unsigned long));
unsigned long* queue_bitmap2 = (unsigned long*)xMPI_Alloc_mem(queue_nwords * sizeof(unsigned long));
memset(queue_bitmap1, 0, queue_nwords * sizeof(unsigned long));
/* List of local vertices (used as sources in MPI_Accumulate). */
int64_t* local_vertices = (int64_t*)xMPI_Alloc_mem(nlocalverts * sizeof(int64_t));
{size_t i; for (i = 0; i < nlocalverts; ++i) local_vertices[i] = VERTEX_TO_GLOBAL(rank, i);}
/* List of all bit masks for an unsigned long (used as sources in
* MPI_Accumulate). */
unsigned long masks[ulong_bits];
{int i; for (i = 0; i < ulong_bits; ++i) masks[i] = (1UL << i);}
/* Coding of predecessor map: */
/* - White (not visited): INT64_MAX */
/* - Grey (in queue): 0 .. nglobalverts-1 */
/* - Black (done): -nglobalverts .. -1 */
/* Set initial predecessor map. */
{size_t i; for (i = 0; i < nlocalverts; ++i) pred[i] = INT64_MAX;}
/* Mark root as grey and add it to the queue. */
if (VERTEX_OWNER(root) == rank) {
pred[VERTEX_LOCAL(root)] = root;
queue_bitmap1[VERTEX_LOCAL(root) / elts_per_queue_bit / ulong_bits] |= (1UL << ((VERTEX_LOCAL(root) / elts_per_queue_bit) % ulong_bits));
}
/* Create MPI windows on the two predecessor arrays and the two queues. */
MPI_Win pred_win, pred2_win, queue1_win, queue2_win;
MPI_Win_create(pred, nlocalverts * sizeof(int64_t), sizeof(int64_t), MPI_INFO_NULL, MPI_COMM_WORLD, &pred_win);
MPI_Win_create(pred2, nlocalverts * sizeof(int64_t), sizeof(int64_t), MPI_INFO_NULL, MPI_COMM_WORLD, &pred2_win);
MPI_Win_create(queue_bitmap1, queue_nwords * sizeof(unsigned long), sizeof(unsigned long), MPI_INFO_NULL, MPI_COMM_WORLD, &queue1_win);
MPI_Win_create(queue_bitmap2, queue_nwords * sizeof(unsigned long), sizeof(unsigned long), MPI_INFO_NULL, MPI_COMM_WORLD, &queue2_win);
while (1) {
int64_t i;
/* Clear the next-level queue. */
memset(queue_bitmap2, 0, queue_nwords * sizeof(unsigned long));
/* The pred2 array is pred with all grey vertices changed to black. */
memcpy(pred2, pred, nlocalverts * sizeof(int64_t));
for (i = 0; i < (int64_t)nlocalverts; ++i) {
if (pred2[i] >= 0 && pred2[i] < nglobalverts) pred2[i] -= nglobalverts;
}
/* Start one-sided operations for this level. */
MPI_Win_fence(MPI_MODE_NOPRECEDE, pred2_win);
MPI_Win_fence(MPI_MODE_NOPRECEDE, queue2_win);
/* Step through the words of the queue bitmap. */
for (i = 0; i < queue_nwords; ++i) {
unsigned long val = queue_bitmap1[i];
int bitnum;
/* Skip any that are all zero. */
if (!val) continue;
/* Scan the bits in the word. */
for (bitnum = 0; bitnum < ulong_bits; ++bitnum) {
size_t first_v_local = (size_t)((i * ulong_bits + bitnum) * elts_per_queue_bit);
if (first_v_local >= nlocalverts) break;
int bit = (int)((val >> bitnum) & 1);
/* Skip any that are zero. */
if (!bit) continue;
/* Scan the queue elements corresponding to this bit. */
int qelem_idx;
for (qelem_idx = 0; qelem_idx < elts_per_queue_bit; ++qelem_idx) {
size_t v_local = first_v_local + qelem_idx;
if (v_local >= nlocalverts) continue;
/* Since the queue is an overapproximation, check the predecessor map
* to be sure this vertex is grey. */
if (pred[v_local] >= 0 && pred[v_local] < nglobalverts) {
size_t ei, ei_end = g.rowstarts[v_local + 1];
/* Walk the incident edges. */
for (ei = g.rowstarts[v_local]; ei < ei_end; ++ei) {
int64_t w = g.column[ei];
if (w == VERTEX_TO_GLOBAL(rank, v_local)) continue; /* Self-loop */
/* Set the predecessor of the other edge endpoint (note use of
* MPI_MIN and the coding of the predecessor map). */
MPI_Accumulate(&local_vertices[v_local], 1, MPI_INT64_T, VERTEX_OWNER(w), VERTEX_LOCAL(w), 1, MPI_INT64_T, MPI_MIN, pred2_win);
/* Mark the endpoint in the remote queue (note that the min may
* not do an update, so the queue is an overapproximation in this
* way as well). */
MPI_Accumulate(&masks[((VERTEX_LOCAL(w) / elts_per_queue_bit) % ulong_bits)], 1, MPI_UNSIGNED_LONG, VERTEX_OWNER(w), VERTEX_LOCAL(w) / elts_per_queue_bit / ulong_bits, 1, MPI_UNSIGNED_LONG, MPI_BOR, queue2_win);
}
}
}
}
}
/* End one-sided operations. */
MPI_Win_fence(MPI_MODE_NOSUCCEED, queue2_win);
MPI_Win_fence(MPI_MODE_NOSUCCEED, pred2_win);
/* Test if there are any elements in the next-level queue (globally); stop
* if none. */
int any_set = 0;
for (i = 0; i < queue_nwords; ++i) {
if (queue_bitmap2[i] != 0) {any_set = 1; break;}
}
MPI_Allreduce(MPI_IN_PLACE, &any_set, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD);
if (!any_set) break;
/* Swap queues and predecessor maps. */
{MPI_Win temp = queue1_win; queue1_win = queue2_win; queue2_win = temp;}
{unsigned long* temp = queue_bitmap1; queue_bitmap1 = queue_bitmap2; queue_bitmap2 = temp;}
{MPI_Win temp = pred_win; pred_win = pred2_win; pred2_win = temp;}
{int64_t* temp = pred; pred = pred2; pred2 = temp;}
}
MPI_Win_free(&pred_win);
MPI_Win_free(&pred2_win);
MPI_Win_free(&queue1_win);
MPI_Win_free(&queue2_win);
MPI_Free_mem(local_vertices);
MPI_Free_mem(queue_bitmap1);
MPI_Free_mem(queue_bitmap2);
/* Clean up the predecessor map swapping since the surrounding code does not
* allow the BFS to change the predecessor map pointer. */
if (pred2 != orig_pred) {
memcpy(orig_pred, pred2, nlocalverts * sizeof(int64_t));
MPI_Free_mem(pred2);
} else {
MPI_Free_mem(pred);
}
/* Change from special coding of predecessor map to the one the benchmark
* requires. */
size_t i;
for (i = 0; i < nlocalverts; ++i) {
if (orig_pred[i] < 0) {
orig_pred[i] += nglobalverts;
} else if (orig_pred[i] == INT64_MAX) {
orig_pred[i] = -1;
}
}
}
void get_vertex_distribution_for_pred(size_t count, const int64_t* vertex_p, int* owner_p, size_t* local_p) {
const int64_t* restrict vertex = vertex_p;
int* restrict owner = owner_p;
size_t* restrict local = local_p;
ptrdiff_t i;
#pragma omp parallel for
for (i = 0; i < (ptrdiff_t)count; ++i) {
owner[i] = VERTEX_OWNER(vertex[i]);
local[i] = VERTEX_LOCAL(vertex[i]);
}
}
int64_t vertex_to_global_for_pred(int v_rank, size_t v_local) {
return VERTEX_TO_GLOBAL(v_rank, v_local);
}
size_t get_nlocalverts_for_pred(void) {
return g.nlocalverts;
}
|
GB_split_full_template.c | //------------------------------------------------------------------------------
// GB_split_full_template: split a full matrix into a full tile
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// This method is not used when the matrices are iso.
{
//--------------------------------------------------------------------------
// get C and the tile A
//--------------------------------------------------------------------------
const GB_CTYPE *restrict Ax = (GB_CTYPE *) A->x ;
GB_CTYPE *restrict Cx = (GB_CTYPE *) C->x ;
ASSERT (!A->iso) ;
int64_t pC ;
#pragma omp parallel for num_threads(C_nthreads) schedule(static)
for (pC = 0 ; pC < cnz ; pC++)
{
int64_t i = pC % cvlen ;
int64_t j = pC / cvlen ;
int64_t iA = aistart + i ;
int64_t jA = avstart + j ;
int64_t pA = iA + jA * avlen ;
// Cx [pC] = Ax [pA] ;
GB_COPY (pC, pA) ;
}
done = true ;
}
#undef GB_CTYPE
|
GB_binop__bget_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bget_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__bget_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__bget_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__bget_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bget_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__bget_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_int64)
// C=scalar+B GB (_bind1st__bget_int64)
// C=scalar+B' GB (_bind1st_tran__bget_int64)
// C=A+scalar GB (_bind2nd__bget_int64)
// C=A'+scalar GB (_bind2nd_tran__bget_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = GB_BITGET (aij, bij, int64_t, 64)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITGET (x, y, int64_t, 64) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BGET || GxB_NO_INT64 || GxB_NO_BGET_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bget_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bget_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bget_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bget_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bget_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bget_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bget_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bget_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bget_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITGET (x, bij, int64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bget_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITGET (aij, y, int64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITGET (x, aij, int64_t, 64) ; \
}
GrB_Info GB (_bind1st_tran__bget_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITGET (aij, y, int64_t, 64) ; \
}
GrB_Info GB (_bind2nd_tran__bget_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
statistic.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC %
% SS T A A T I SS T I C %
% SSS T AAAAA T I SSS T I C %
% SS T A A T I SS T I C %
% SSSSS T A A T IIIII SSSSS T IIIII CCCC %
% %
% %
% MagickCore Image Statistical Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/accelerate-private.h"
#include "magick/animate.h"
#include "magick/animate.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/compress.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/display.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/list.h"
#include "magick/image-private.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-private.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/random-private.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/semaphore.h"
#include "magick/signature-private.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/timer.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E v a l u a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EvaluateImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the EvaluateImageChannel method is:
%
% MagickBooleanType EvaluateImage(Image *image,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImages(Image *images,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImageChannel(Image *image,
% const ChannelType channel,const MagickEvaluateOperator op,
% const double value,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o op: A channel op.
%
% o value: A value value.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickPixelPacket **DestroyPixelThreadSet(const Image *images,
MagickPixelPacket **pixels)
{
register ssize_t
i;
size_t
rows;
assert(pixels != (MagickPixelPacket **) NULL);
rows=MagickMax(GetImageListLength(images),
(size_t) GetMagickResourceLimit(ThreadResource));
for (i=0; i < (ssize_t) rows; i++)
if (pixels[i] != (MagickPixelPacket *) NULL)
pixels[i]=(MagickPixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(MagickPixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static MagickPixelPacket **AcquirePixelThreadSet(const Image *images)
{
const Image
*next;
MagickPixelPacket
**pixels;
register ssize_t
i,
j;
size_t
columns,
rows;
rows=MagickMax(GetImageListLength(images),
(size_t) GetMagickResourceLimit(ThreadResource));
pixels=(MagickPixelPacket **) AcquireQuantumMemory(rows,sizeof(*pixels));
if (pixels == (MagickPixelPacket **) NULL)
return((MagickPixelPacket **) NULL);
(void) memset(pixels,0,rows*sizeof(*pixels));
columns=GetImageListLength(images);
for (next=images; next != (Image *) NULL; next=next->next)
columns=MagickMax(next->columns,columns);
for (i=0; i < (ssize_t) rows; i++)
{
pixels[i]=(MagickPixelPacket *) AcquireQuantumMemory(columns,
sizeof(**pixels));
if (pixels[i] == (MagickPixelPacket *) NULL)
return(DestroyPixelThreadSet(images,pixels));
for (j=0; j < (ssize_t) columns; j++)
GetMagickPixelPacket(images,&pixels[i][j]);
}
return(pixels);
}
static inline double EvaluateMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const MagickPixelPacket
*color_1,
*color_2;
int
intensity;
color_1=(const MagickPixelPacket *) x;
color_2=(const MagickPixelPacket *) y;
intensity=(int) MagickPixelIntensity(color_2)-(int)
MagickPixelIntensity(color_1);
return(intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickRealType ApplyEvaluateOperator(RandomInfo *random_info,
const Quantum pixel,const MagickEvaluateOperator op,
const MagickRealType value)
{
MagickRealType
result;
register ssize_t
i;
result=0.0;
switch (op)
{
case UndefinedEvaluateOperator:
break;
case AbsEvaluateOperator:
{
result=(MagickRealType) fabs((double) (pixel+value));
break;
}
case AddEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case AddModulusEvaluateOperator:
{
/*
This returns a 'floored modulus' of the addition which is a
positive result. It differs from % or fmod() which returns a
'truncated modulus' result, where floor() is replaced by trunc()
and could return a negative result (which is clipped).
*/
result=pixel+value;
result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0));
break;
}
case AndEvaluateOperator:
{
result=(MagickRealType) ((ssize_t) pixel & (ssize_t) (value+0.5));
break;
}
case CosineEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*(0.5*cos((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case DivideEvaluateOperator:
{
result=pixel/(value == 0.0 ? 1.0 : value);
break;
}
case ExponentialEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*exp((double) (value*QuantumScale*
pixel)));
break;
}
case GaussianNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
GaussianNoise,value);
break;
}
case ImpulseNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
ImpulseNoise,value);
break;
}
case LaplacianNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
LaplacianNoise,value);
break;
}
case LeftShiftEvaluateOperator:
{
result=(double) pixel;
for (i=0; i < (ssize_t) value; i++)
result*=2.0;
break;
}
case LogEvaluateOperator:
{
if ((QuantumScale*pixel) >= MagickEpsilon)
result=(MagickRealType) (QuantumRange*log((double) (QuantumScale*value*
pixel+1.0))/log((double) (value+1.0)));
break;
}
case MaxEvaluateOperator:
{
result=(MagickRealType) EvaluateMax((double) pixel,value);
break;
}
case MeanEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case MedianEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case MinEvaluateOperator:
{
result=(MagickRealType) MagickMin((double) pixel,value);
break;
}
case MultiplicativeNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
MultiplicativeGaussianNoise,value);
break;
}
case MultiplyEvaluateOperator:
{
result=(MagickRealType) (value*pixel);
break;
}
case OrEvaluateOperator:
{
result=(MagickRealType) ((ssize_t) pixel | (ssize_t) (value+0.5));
break;
}
case PoissonNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
PoissonNoise,value);
break;
}
case PowEvaluateOperator:
{
if (pixel < 0)
result=(MagickRealType) -(QuantumRange*pow((double) -(QuantumScale*
pixel),(double) value));
else
result=(MagickRealType) (QuantumRange*pow((double) (QuantumScale*pixel),
(double) value));
break;
}
case RightShiftEvaluateOperator:
{
result=(MagickRealType) pixel;
for (i=0; i < (ssize_t) value; i++)
result/=2.0;
break;
}
case RootMeanSquareEvaluateOperator:
{
result=((MagickRealType) pixel*pixel+value);
break;
}
case SetEvaluateOperator:
{
result=value;
break;
}
case SineEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*(0.5*sin((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case SubtractEvaluateOperator:
{
result=(MagickRealType) (pixel-value);
break;
}
case SumEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case ThresholdEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 :
QuantumRange);
break;
}
case ThresholdBlackEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 : pixel);
break;
}
case ThresholdWhiteEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel > value) ? QuantumRange :
pixel);
break;
}
case UniformNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
UniformNoise,value);
break;
}
case XorEvaluateOperator:
{
result=(MagickRealType) ((ssize_t) pixel ^ (ssize_t) (value+0.5));
break;
}
}
return(result);
}
static Image *AcquireImageCanvas(const Image *images,ExceptionInfo *exception)
{
const Image
*p,
*q;
size_t
columns,
number_channels,
rows;
q=images;
columns=images->columns;
rows=images->rows;
number_channels=0;
for (p=images; p != (Image *) NULL; p=p->next)
{
size_t
channels;
channels=3;
if (p->matte != MagickFalse)
channels+=1;
if (p->colorspace == CMYKColorspace)
channels+=1;
if (channels > number_channels)
{
number_channels=channels;
q=p;
}
if (p->columns > columns)
columns=p->columns;
if (p->rows > rows)
rows=p->rows;
}
return(CloneImage(q,columns,rows,MagickTrue,exception));
}
MagickExport MagickBooleanType EvaluateImage(Image *image,
const MagickEvaluateOperator op,const double value,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=EvaluateImageChannel(image,CompositeChannels,op,value,exception);
return(status);
}
MagickExport Image *EvaluateImages(const Image *images,
const MagickEvaluateOperator op,ExceptionInfo *exception)
{
#define EvaluateImageTag "Evaluate/Image"
CacheView
*evaluate_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
**magick_restrict evaluate_pixels,
zero;
RandomInfo
**magick_restrict random_info;
size_t
number_images;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
image=DestroyImage(image);
return((Image *) NULL);
}
evaluate_pixels=AcquirePixelThreadSet(images);
if (evaluate_pixels == (MagickPixelPacket **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Evaluate image pixels.
*/
status=MagickTrue;
progress=0;
number_images=GetImageListLength(images);
GetMagickPixelPacket(images,&zero);
random_info=AcquireRandomInfoThreadSet();
evaluate_view=AcquireAuthenticCacheView(image,exception);
if (op == MedianEvaluateOperator)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register IndexPacket
*magick_restrict evaluate_indexes;
register MagickPixelPacket
*evaluate_pixel;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view);
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) number_images; i++)
evaluate_pixel[i]=zero;
next=images;
for (i=0; i < (ssize_t) number_images; i++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,x,y,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
evaluate_pixel[i].red=ApplyEvaluateOperator(random_info[id],
GetPixelRed(p),op,evaluate_pixel[i].red);
evaluate_pixel[i].green=ApplyEvaluateOperator(random_info[id],
GetPixelGreen(p),op,evaluate_pixel[i].green);
evaluate_pixel[i].blue=ApplyEvaluateOperator(random_info[id],
GetPixelBlue(p),op,evaluate_pixel[i].blue);
evaluate_pixel[i].opacity=ApplyEvaluateOperator(random_info[id],
GetPixelAlpha(p),op,evaluate_pixel[i].opacity);
if (image->colorspace == CMYKColorspace)
evaluate_pixel[i].index=ApplyEvaluateOperator(random_info[id],
*indexes,op,evaluate_pixel[i].index);
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel),
IntensityCompare);
SetPixelRed(q,ClampToQuantum(evaluate_pixel[i/2].red));
SetPixelGreen(q,ClampToQuantum(evaluate_pixel[i/2].green));
SetPixelBlue(q,ClampToQuantum(evaluate_pixel[i/2].blue));
SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[i/2].opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(evaluate_indexes+i,ClampToQuantum(
evaluate_pixel[i/2].index));
q++;
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,EvaluateImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
else
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register IndexPacket
*magick_restrict evaluate_indexes;
register ssize_t
i,
x;
register MagickPixelPacket
*evaluate_pixel;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view);
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
evaluate_pixel[x]=zero;
next=images;
for (i=0; i < (ssize_t) number_images; i++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,
exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
evaluate_pixel[x].red=ApplyEvaluateOperator(random_info[id],
GetPixelRed(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].red);
evaluate_pixel[x].green=ApplyEvaluateOperator(random_info[id],
GetPixelGreen(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].green);
evaluate_pixel[x].blue=ApplyEvaluateOperator(random_info[id],
GetPixelBlue(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].blue);
evaluate_pixel[x].opacity=ApplyEvaluateOperator(random_info[id],
GetPixelAlpha(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].opacity);
if (image->colorspace == CMYKColorspace)
evaluate_pixel[x].index=ApplyEvaluateOperator(random_info[id],
GetPixelIndex(indexes+x),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].index);
p++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (op == MeanEvaluateOperator)
for (x=0; x < (ssize_t) image->columns; x++)
{
evaluate_pixel[x].red/=number_images;
evaluate_pixel[x].green/=number_images;
evaluate_pixel[x].blue/=number_images;
evaluate_pixel[x].opacity/=number_images;
evaluate_pixel[x].index/=number_images;
}
if (op == RootMeanSquareEvaluateOperator)
for (x=0; x < (ssize_t) image->columns; x++)
{
evaluate_pixel[x].red=sqrt((double) evaluate_pixel[x].red/
number_images);
evaluate_pixel[x].green=sqrt((double) evaluate_pixel[x].green/
number_images);
evaluate_pixel[x].blue=sqrt((double) evaluate_pixel[x].blue/
number_images);
evaluate_pixel[x].opacity=sqrt((double) evaluate_pixel[x].opacity/
number_images);
evaluate_pixel[x].index=sqrt((double) evaluate_pixel[x].index/
number_images);
}
if (op == MultiplyEvaluateOperator)
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) (number_images-1); j++)
{
evaluate_pixel[x].red*=(MagickRealType) QuantumScale;
evaluate_pixel[x].green*=(MagickRealType) QuantumScale;
evaluate_pixel[x].blue*=(MagickRealType) QuantumScale;
evaluate_pixel[x].opacity*=(MagickRealType) QuantumScale;
evaluate_pixel[x].index*=(MagickRealType) QuantumScale;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(evaluate_pixel[x].red));
SetPixelGreen(q,ClampToQuantum(evaluate_pixel[x].green));
SetPixelBlue(q,ClampToQuantum(evaluate_pixel[x].blue));
SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[x].opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(evaluate_indexes+x,ClampToQuantum(
evaluate_pixel[x].index));
q++;
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(images,EvaluateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
evaluate_view=DestroyCacheView(evaluate_view);
evaluate_pixels=DestroyPixelThreadSet(images,evaluate_pixels);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
MagickExport MagickBooleanType EvaluateImageChannel(Image *image,
const ChannelType channel,const MagickEvaluateOperator op,const double value,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
result;
if ((channel & RedChannel) != 0)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelRed(q),op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelRed(q,ClampToQuantum(result));
}
if ((channel & GreenChannel) != 0)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelGreen(q),op,
value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelGreen(q,ClampToQuantum(result));
}
if ((channel & BlueChannel) != 0)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelBlue(q),op,
value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelBlue(q,ClampToQuantum(result));
}
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelOpacity(q),
op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelOpacity(q,ClampToQuantum(result));
}
else
{
result=ApplyEvaluateOperator(random_info[id],GetPixelAlpha(q),
op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelAlpha(q,ClampToQuantum(result));
}
}
if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL))
{
result=ApplyEvaluateOperator(random_info[id],GetPixelIndex(indexes+x),
op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelIndex(indexes+x,ClampToQuantum(result));
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,EvaluateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F u n c t i o n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FunctionImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the FunctionImageChannel method is:
%
% MagickBooleanType FunctionImage(Image *image,
% const MagickFunction function,const ssize_t number_parameters,
% const double *parameters,ExceptionInfo *exception)
% MagickBooleanType FunctionImageChannel(Image *image,
% const ChannelType channel,const MagickFunction function,
% const ssize_t number_parameters,const double *argument,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o function: A channel function.
%
% o parameters: one or more parameters.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum ApplyFunction(Quantum pixel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
MagickRealType
result;
register ssize_t
i;
(void) exception;
result=0.0;
switch (function)
{
case PolynomialFunction:
{
/*
* Polynomial
* Parameters: polynomial constants, highest to lowest order
* For example: c0*x^3 + c1*x^2 + c2*x + c3
*/
result=0.0;
for (i=0; i < (ssize_t) number_parameters; i++)
result=result*QuantumScale*pixel + parameters[i];
result*=QuantumRange;
break;
}
case SinusoidFunction:
{
/* Sinusoid Function
* Parameters: Freq, Phase, Ampl, bias
*/
double freq,phase,ampl,bias;
freq = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
phase = ( number_parameters >= 2 ) ? parameters[1] : 0.0;
ampl = ( number_parameters >= 3 ) ? parameters[2] : 0.5;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result=(MagickRealType) (QuantumRange*(ampl*sin((double) (2.0*MagickPI*
(freq*QuantumScale*pixel + phase/360.0) )) + bias ) );
break;
}
case ArcsinFunction:
{
/* Arcsin Function (peged at range limits for invalid results)
* Parameters: Width, Center, Range, Bias
*/
double width,range,center,bias;
width = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
center = ( number_parameters >= 2 ) ? parameters[1] : 0.5;
range = ( number_parameters >= 3 ) ? parameters[2] : 1.0;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result = 2.0/width*(QuantumScale*pixel - center);
if ( result <= -1.0 )
result = bias - range/2.0;
else if ( result >= 1.0 )
result = bias + range/2.0;
else
result=(MagickRealType) (range/MagickPI*asin((double) result)+bias);
result *= QuantumRange;
break;
}
case ArctanFunction:
{
/* Arctan Function
* Parameters: Slope, Center, Range, Bias
*/
double slope,range,center,bias;
slope = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
center = ( number_parameters >= 2 ) ? parameters[1] : 0.5;
range = ( number_parameters >= 3 ) ? parameters[2] : 1.0;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result=(MagickRealType) (MagickPI*slope*(QuantumScale*pixel-center));
result=(MagickRealType) (QuantumRange*(range/MagickPI*atan((double)
result) + bias ) );
break;
}
case UndefinedFunction:
break;
}
return(ClampToQuantum(result));
}
MagickExport MagickBooleanType FunctionImage(Image *image,
const MagickFunction function,const size_t number_parameters,
const double *parameters,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FunctionImageChannel(image,CompositeChannels,function,
number_parameters,parameters,exception);
return(status);
}
MagickExport MagickBooleanType FunctionImageChannel(Image *image,
const ChannelType channel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
#define FunctionImageTag "Function/Image "
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
status=AccelerateFunctionImage(image,channel,function,number_parameters,
parameters,exception);
if (status != MagickFalse)
return(status);
#endif
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ApplyFunction(GetPixelRed(q),function,
number_parameters,parameters,exception));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ApplyFunction(GetPixelGreen(q),function,
number_parameters,parameters,exception));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ApplyFunction(GetPixelBlue(q),function,
number_parameters,parameters,exception));
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
SetPixelOpacity(q,ApplyFunction(GetPixelOpacity(q),function,
number_parameters,parameters,exception));
else
SetPixelAlpha(q,ApplyFunction((Quantum) GetPixelAlpha(q),function,
number_parameters,parameters,exception));
}
if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL))
SetPixelIndex(indexes+x,ApplyFunction(GetPixelIndex(indexes+x),function,
number_parameters,parameters,exception));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,FunctionImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l E n t r o p y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelEntropy() returns the entropy of one or more image channels.
%
% The format of the GetImageChannelEntropy method is:
%
% MagickBooleanType GetImageChannelEntropy(const Image *image,
% const ChannelType channel,double *entropy,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o entropy: the average entropy of the selected channels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageEntropy(const Image *image,
double *entropy,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelEntropy(image,CompositeChannels,entropy,exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelEntropy(const Image *image,
const ChannelType channel,double *entropy,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
size_t
channels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageChannelStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
channels=0;
channel_statistics[CompositeChannels].entropy=0.0;
if ((channel & RedChannel) != 0)
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[RedChannel].entropy;
channels++;
}
if ((channel & GreenChannel) != 0)
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[GreenChannel].entropy;
channels++;
}
if ((channel & BlueChannel) != 0)
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[BlueChannel].entropy;
channels++;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[OpacityChannel].entropy;
channels++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_statistics[CompositeChannels].entropy+=
channel_statistics[BlackChannel].entropy;
channels++;
}
channel_statistics[CompositeChannels].entropy/=channels;
*entropy=channel_statistics[CompositeChannels].entropy;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e C h a n n e l E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelExtrema() returns the extrema of one or more image channels.
%
% The format of the GetImageChannelExtrema method is:
%
% MagickBooleanType GetImageChannelExtrema(const Image *image,
% const ChannelType channel,size_t *minima,size_t *maxima,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageExtrema(const Image *image,
size_t *minima,size_t *maxima,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelExtrema(image,CompositeChannels,minima,maxima,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelExtrema(const Image *image,
const ChannelType channel,size_t *minima,size_t *maxima,
ExceptionInfo *exception)
{
double
max,
min;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageChannelRange(image,channel,&min,&max,exception);
*minima=(size_t) ceil(min-0.5);
*maxima=(size_t) floor(max+0.5);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l K u r t o s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelKurtosis() returns the kurtosis and skewness of one or more
% image channels.
%
% The format of the GetImageChannelKurtosis method is:
%
% MagickBooleanType GetImageChannelKurtosis(const Image *image,
% const ChannelType channel,double *kurtosis,double *skewness,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o kurtosis: the kurtosis of the channel.
%
% o skewness: the skewness of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageKurtosis(const Image *image,
double *kurtosis,double *skewness,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelKurtosis(image,CompositeChannels,kurtosis,skewness,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelKurtosis(const Image *image,
const ChannelType channel,double *kurtosis,double *skewness,
ExceptionInfo *exception)
{
double
area,
mean,
standard_deviation,
sum_squares,
sum_cubes,
sum_fourth_power;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*kurtosis=0.0;
*skewness=0.0;
area=0.0;
mean=0.0;
standard_deviation=0.0;
sum_squares=0.0;
sum_cubes=0.0;
sum_fourth_power=0.0;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
mean+=GetPixelRed(p);
sum_squares+=(double) GetPixelRed(p)*GetPixelRed(p);
sum_cubes+=(double) GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
sum_fourth_power+=(double) GetPixelRed(p)*GetPixelRed(p)*
GetPixelRed(p)*GetPixelRed(p);
area++;
}
if ((channel & GreenChannel) != 0)
{
mean+=GetPixelGreen(p);
sum_squares+=(double) GetPixelGreen(p)*GetPixelGreen(p);
sum_cubes+=(double) GetPixelGreen(p)*GetPixelGreen(p)*
GetPixelGreen(p);
sum_fourth_power+=(double) GetPixelGreen(p)*GetPixelGreen(p)*
GetPixelGreen(p)*GetPixelGreen(p);
area++;
}
if ((channel & BlueChannel) != 0)
{
mean+=GetPixelBlue(p);
sum_squares+=(double) GetPixelBlue(p)*GetPixelBlue(p);
sum_cubes+=(double) GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p);
sum_fourth_power+=(double) GetPixelBlue(p)*GetPixelBlue(p)*
GetPixelBlue(p)*GetPixelBlue(p);
area++;
}
if ((channel & OpacityChannel) != 0)
{
mean+=GetPixelAlpha(p);
sum_squares+=(double) GetPixelOpacity(p)*GetPixelAlpha(p);
sum_cubes+=(double) GetPixelOpacity(p)*GetPixelAlpha(p)*
GetPixelAlpha(p);
sum_fourth_power+=(double) GetPixelAlpha(p)*GetPixelAlpha(p)*
GetPixelAlpha(p)*GetPixelAlpha(p);
area++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
double
index;
index=(double) GetPixelIndex(indexes+x);
mean+=index;
sum_squares+=index*index;
sum_cubes+=index*index*index;
sum_fourth_power+=index*index*index*index;
area++;
}
p++;
}
}
if (y < (ssize_t) image->rows)
return(MagickFalse);
if (area != 0.0)
{
mean/=area;
sum_squares/=area;
sum_cubes/=area;
sum_fourth_power/=area;
}
standard_deviation=sqrt(sum_squares-(mean*mean));
if (standard_deviation != 0.0)
{
*kurtosis=sum_fourth_power-4.0*mean*sum_cubes+6.0*mean*mean*sum_squares-
3.0*mean*mean*mean*mean;
*kurtosis/=standard_deviation*standard_deviation*standard_deviation*
standard_deviation;
*kurtosis-=3.0;
*skewness=sum_cubes-3.0*mean*sum_squares+2.0*mean*mean*mean;
*skewness/=standard_deviation*standard_deviation*standard_deviation;
}
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l M e a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelMean() returns the mean and standard deviation of one or more
% image channels.
%
% The format of the GetImageChannelMean method is:
%
% MagickBooleanType GetImageChannelMean(const Image *image,
% const ChannelType channel,double *mean,double *standard_deviation,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o mean: the average value in the channel.
%
% o standard_deviation: the standard deviation of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean,
double *standard_deviation,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelMean(image,CompositeChannels,mean,standard_deviation,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelMean(const Image *image,
const ChannelType channel,double *mean,double *standard_deviation,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
size_t
channels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageChannelStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
channels=0;
channel_statistics[CompositeChannels].mean=0.0;
channel_statistics[CompositeChannels].standard_deviation=0.0;
if ((channel & RedChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[RedChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[RedChannel].standard_deviation;
channels++;
}
if ((channel & GreenChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[GreenChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[GreenChannel].standard_deviation;
channels++;
}
if ((channel & BlueChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[BlueChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[BlueChannel].standard_deviation;
channels++;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
{
channel_statistics[CompositeChannels].mean+=
(QuantumRange-channel_statistics[OpacityChannel].mean);
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[OpacityChannel].standard_deviation;
channels++;
}
if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[BlackChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[CompositeChannels].standard_deviation;
channels++;
}
channel_statistics[CompositeChannels].mean/=channels;
channel_statistics[CompositeChannels].standard_deviation/=channels;
*mean=channel_statistics[CompositeChannels].mean;
*standard_deviation=channel_statistics[CompositeChannels].standard_deviation;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l M o m e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelMoments() returns the normalized moments of one or more image
% channels.
%
% The format of the GetImageChannelMoments method is:
%
% ChannelMoments *GetImageChannelMoments(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ChannelMoments *GetImageChannelMoments(const Image *image,
ExceptionInfo *exception)
{
#define MaxNumberImageMoments 8
ChannelMoments
*channel_moments;
double
M00[CompositeChannels+1],
M01[CompositeChannels+1],
M02[CompositeChannels+1],
M03[CompositeChannels+1],
M10[CompositeChannels+1],
M11[CompositeChannels+1],
M12[CompositeChannels+1],
M20[CompositeChannels+1],
M21[CompositeChannels+1],
M22[CompositeChannels+1],
M30[CompositeChannels+1];
MagickPixelPacket
pixel;
PointInfo
centroid[CompositeChannels+1];
ssize_t
channel,
channels,
y;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
length=CompositeChannels+1UL;
channel_moments=(ChannelMoments *) AcquireQuantumMemory(length,
sizeof(*channel_moments));
if (channel_moments == (ChannelMoments *) NULL)
return(channel_moments);
(void) memset(channel_moments,0,length*sizeof(*channel_moments));
(void) memset(centroid,0,sizeof(centroid));
(void) memset(M00,0,sizeof(M00));
(void) memset(M01,0,sizeof(M01));
(void) memset(M02,0,sizeof(M02));
(void) memset(M03,0,sizeof(M03));
(void) memset(M10,0,sizeof(M10));
(void) memset(M11,0,sizeof(M11));
(void) memset(M12,0,sizeof(M12));
(void) memset(M20,0,sizeof(M20));
(void) memset(M21,0,sizeof(M21));
(void) memset(M22,0,sizeof(M22));
(void) memset(M30,0,sizeof(M30));
GetMagickPixelPacket(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
/*
Compute center of mass (centroid).
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
M00[RedChannel]+=QuantumScale*pixel.red;
M10[RedChannel]+=x*QuantumScale*pixel.red;
M01[RedChannel]+=y*QuantumScale*pixel.red;
M00[GreenChannel]+=QuantumScale*pixel.green;
M10[GreenChannel]+=x*QuantumScale*pixel.green;
M01[GreenChannel]+=y*QuantumScale*pixel.green;
M00[BlueChannel]+=QuantumScale*pixel.blue;
M10[BlueChannel]+=x*QuantumScale*pixel.blue;
M01[BlueChannel]+=y*QuantumScale*pixel.blue;
if (image->matte != MagickFalse)
{
M00[OpacityChannel]+=QuantumScale*pixel.opacity;
M10[OpacityChannel]+=x*QuantumScale*pixel.opacity;
M01[OpacityChannel]+=y*QuantumScale*pixel.opacity;
}
if (image->colorspace == CMYKColorspace)
{
M00[IndexChannel]+=QuantumScale*pixel.index;
M10[IndexChannel]+=x*QuantumScale*pixel.index;
M01[IndexChannel]+=y*QuantumScale*pixel.index;
}
p++;
}
}
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Compute center of mass (centroid).
*/
if (M00[channel] < MagickEpsilon)
{
M00[channel]+=MagickEpsilon;
centroid[channel].x=(double) image->columns/2.0;
centroid[channel].y=(double) image->rows/2.0;
continue;
}
M00[channel]+=MagickEpsilon;
centroid[channel].x=M10[channel]/M00[channel];
centroid[channel].y=M01[channel]/M00[channel];
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
/*
Compute the image moments.
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
M11[RedChannel]+=(x-centroid[RedChannel].x)*(y-
centroid[RedChannel].y)*QuantumScale*pixel.red;
M20[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*QuantumScale*pixel.red;
M02[RedChannel]+=(y-centroid[RedChannel].y)*(y-
centroid[RedChannel].y)*QuantumScale*pixel.red;
M21[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*(y-centroid[RedChannel].y)*QuantumScale*
pixel.red;
M12[RedChannel]+=(x-centroid[RedChannel].x)*(y-
centroid[RedChannel].y)*(y-centroid[RedChannel].y)*QuantumScale*
pixel.red;
M22[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*(y-centroid[RedChannel].y)*(y-
centroid[RedChannel].y)*QuantumScale*pixel.red;
M30[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*(x-centroid[RedChannel].x)*QuantumScale*
pixel.red;
M03[RedChannel]+=(y-centroid[RedChannel].y)*(y-
centroid[RedChannel].y)*(y-centroid[RedChannel].y)*QuantumScale*
pixel.red;
M11[GreenChannel]+=(x-centroid[GreenChannel].x)*(y-
centroid[GreenChannel].y)*QuantumScale*pixel.green;
M20[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*QuantumScale*pixel.green;
M02[GreenChannel]+=(y-centroid[GreenChannel].y)*(y-
centroid[GreenChannel].y)*QuantumScale*pixel.green;
M21[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*(y-centroid[GreenChannel].y)*QuantumScale*
pixel.green;
M12[GreenChannel]+=(x-centroid[GreenChannel].x)*(y-
centroid[GreenChannel].y)*(y-centroid[GreenChannel].y)*QuantumScale*
pixel.green;
M22[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*(y-centroid[GreenChannel].y)*(y-
centroid[GreenChannel].y)*QuantumScale*pixel.green;
M30[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*(x-centroid[GreenChannel].x)*QuantumScale*
pixel.green;
M03[GreenChannel]+=(y-centroid[GreenChannel].y)*(y-
centroid[GreenChannel].y)*(y-centroid[GreenChannel].y)*QuantumScale*
pixel.green;
M11[BlueChannel]+=(x-centroid[BlueChannel].x)*(y-
centroid[BlueChannel].y)*QuantumScale*pixel.blue;
M20[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*QuantumScale*pixel.blue;
M02[BlueChannel]+=(y-centroid[BlueChannel].y)*(y-
centroid[BlueChannel].y)*QuantumScale*pixel.blue;
M21[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*(y-centroid[BlueChannel].y)*QuantumScale*
pixel.blue;
M12[BlueChannel]+=(x-centroid[BlueChannel].x)*(y-
centroid[BlueChannel].y)*(y-centroid[BlueChannel].y)*QuantumScale*
pixel.blue;
M22[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*(y-centroid[BlueChannel].y)*(y-
centroid[BlueChannel].y)*QuantumScale*pixel.blue;
M30[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*(x-centroid[BlueChannel].x)*QuantumScale*
pixel.blue;
M03[BlueChannel]+=(y-centroid[BlueChannel].y)*(y-
centroid[BlueChannel].y)*(y-centroid[BlueChannel].y)*QuantumScale*
pixel.blue;
if (image->matte != MagickFalse)
{
M11[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(y-
centroid[OpacityChannel].y)*QuantumScale*pixel.opacity;
M20[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*QuantumScale*pixel.opacity;
M02[OpacityChannel]+=(y-centroid[OpacityChannel].y)*(y-
centroid[OpacityChannel].y)*QuantumScale*pixel.opacity;
M21[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*(y-centroid[OpacityChannel].y)*
QuantumScale*pixel.opacity;
M12[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(y-
centroid[OpacityChannel].y)*(y-centroid[OpacityChannel].y)*
QuantumScale*pixel.opacity;
M22[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*(y-centroid[OpacityChannel].y)*(y-
centroid[OpacityChannel].y)*QuantumScale*pixel.opacity;
M30[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*(x-centroid[OpacityChannel].x)*
QuantumScale*pixel.opacity;
M03[OpacityChannel]+=(y-centroid[OpacityChannel].y)*(y-
centroid[OpacityChannel].y)*(y-centroid[OpacityChannel].y)*
QuantumScale*pixel.opacity;
}
if (image->colorspace == CMYKColorspace)
{
M11[IndexChannel]+=(x-centroid[IndexChannel].x)*(y-
centroid[IndexChannel].y)*QuantumScale*pixel.index;
M20[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*QuantumScale*pixel.index;
M02[IndexChannel]+=(y-centroid[IndexChannel].y)*(y-
centroid[IndexChannel].y)*QuantumScale*pixel.index;
M21[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*(y-centroid[IndexChannel].y)*
QuantumScale*pixel.index;
M12[IndexChannel]+=(x-centroid[IndexChannel].x)*(y-
centroid[IndexChannel].y)*(y-centroid[IndexChannel].y)*
QuantumScale*pixel.index;
M22[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*(y-centroid[IndexChannel].y)*(y-
centroid[IndexChannel].y)*QuantumScale*pixel.index;
M30[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*(x-centroid[IndexChannel].x)*
QuantumScale*pixel.index;
M03[IndexChannel]+=(y-centroid[IndexChannel].y)*(y-
centroid[IndexChannel].y)*(y-centroid[IndexChannel].y)*
QuantumScale*pixel.index;
}
p++;
}
}
channels=3;
M00[CompositeChannels]+=(M00[RedChannel]+M00[GreenChannel]+M00[BlueChannel]);
M01[CompositeChannels]+=(M01[RedChannel]+M01[GreenChannel]+M01[BlueChannel]);
M02[CompositeChannels]+=(M02[RedChannel]+M02[GreenChannel]+M02[BlueChannel]);
M03[CompositeChannels]+=(M03[RedChannel]+M03[GreenChannel]+M03[BlueChannel]);
M10[CompositeChannels]+=(M10[RedChannel]+M10[GreenChannel]+M10[BlueChannel]);
M11[CompositeChannels]+=(M11[RedChannel]+M11[GreenChannel]+M11[BlueChannel]);
M12[CompositeChannels]+=(M12[RedChannel]+M12[GreenChannel]+M12[BlueChannel]);
M20[CompositeChannels]+=(M20[RedChannel]+M20[GreenChannel]+M20[BlueChannel]);
M21[CompositeChannels]+=(M21[RedChannel]+M21[GreenChannel]+M21[BlueChannel]);
M22[CompositeChannels]+=(M22[RedChannel]+M22[GreenChannel]+M22[BlueChannel]);
M30[CompositeChannels]+=(M30[RedChannel]+M30[GreenChannel]+M30[BlueChannel]);
if (image->matte != MagickFalse)
{
channels+=1;
M00[CompositeChannels]+=M00[OpacityChannel];
M01[CompositeChannels]+=M01[OpacityChannel];
M02[CompositeChannels]+=M02[OpacityChannel];
M03[CompositeChannels]+=M03[OpacityChannel];
M10[CompositeChannels]+=M10[OpacityChannel];
M11[CompositeChannels]+=M11[OpacityChannel];
M12[CompositeChannels]+=M12[OpacityChannel];
M20[CompositeChannels]+=M20[OpacityChannel];
M21[CompositeChannels]+=M21[OpacityChannel];
M22[CompositeChannels]+=M22[OpacityChannel];
M30[CompositeChannels]+=M30[OpacityChannel];
}
if (image->colorspace == CMYKColorspace)
{
channels+=1;
M00[CompositeChannels]+=M00[IndexChannel];
M01[CompositeChannels]+=M01[IndexChannel];
M02[CompositeChannels]+=M02[IndexChannel];
M03[CompositeChannels]+=M03[IndexChannel];
M10[CompositeChannels]+=M10[IndexChannel];
M11[CompositeChannels]+=M11[IndexChannel];
M12[CompositeChannels]+=M12[IndexChannel];
M20[CompositeChannels]+=M20[IndexChannel];
M21[CompositeChannels]+=M21[IndexChannel];
M22[CompositeChannels]+=M22[IndexChannel];
M30[CompositeChannels]+=M30[IndexChannel];
}
M00[CompositeChannels]/=(double) channels;
M01[CompositeChannels]/=(double) channels;
M02[CompositeChannels]/=(double) channels;
M03[CompositeChannels]/=(double) channels;
M10[CompositeChannels]/=(double) channels;
M11[CompositeChannels]/=(double) channels;
M12[CompositeChannels]/=(double) channels;
M20[CompositeChannels]/=(double) channels;
M21[CompositeChannels]/=(double) channels;
M22[CompositeChannels]/=(double) channels;
M30[CompositeChannels]/=(double) channels;
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Compute elliptical angle, major and minor axes, eccentricity, & intensity.
*/
channel_moments[channel].centroid=centroid[channel];
channel_moments[channel].ellipse_axis.x=sqrt((2.0*
PerceptibleReciprocal(M00[channel]))*((M20[channel]+M02[channel])+
sqrt(4.0*M11[channel]*M11[channel]+(M20[channel]-M02[channel])*
(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_axis.y=sqrt((2.0*
PerceptibleReciprocal(M00[channel]))*((M20[channel]+M02[channel])-
sqrt(4.0*M11[channel]*M11[channel]+(M20[channel]-M02[channel])*
(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_angle=RadiansToDegrees(1.0/2.0*atan(2.0*
M11[channel]*PerceptibleReciprocal(M20[channel]-M02[channel])));
if (fabs(M11[channel]) < 0.0)
{
if ((fabs(M20[channel]-M02[channel]) >= 0.0) &&
((M20[channel]-M02[channel]) < 0.0))
channel_moments[channel].ellipse_angle+=90.0;
}
else
if (M11[channel] < 0.0)
{
if (fabs(M20[channel]-M02[channel]) >= 0.0)
{
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=180.0;
}
}
else
if ((fabs(M20[channel]-M02[channel]) >= 0.0) &&
((M20[channel]-M02[channel]) < 0.0))
channel_moments[channel].ellipse_angle+=90.0;
channel_moments[channel].ellipse_eccentricity=sqrt(1.0-(
channel_moments[channel].ellipse_axis.y*
channel_moments[channel].ellipse_axis.y*PerceptibleReciprocal(
channel_moments[channel].ellipse_axis.x*
channel_moments[channel].ellipse_axis.x)));
channel_moments[channel].ellipse_intensity=M00[channel]/
(MagickPI*channel_moments[channel].ellipse_axis.x*
channel_moments[channel].ellipse_axis.y+MagickEpsilon);
}
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Normalize image moments.
*/
M10[channel]=0.0;
M01[channel]=0.0;
M11[channel]/=pow(M00[channel],1.0+(1.0+1.0)/2.0);
M20[channel]/=pow(M00[channel],1.0+(2.0+0.0)/2.0);
M02[channel]/=pow(M00[channel],1.0+(0.0+2.0)/2.0);
M21[channel]/=pow(M00[channel],1.0+(2.0+1.0)/2.0);
M12[channel]/=pow(M00[channel],1.0+(1.0+2.0)/2.0);
M22[channel]/=pow(M00[channel],1.0+(2.0+2.0)/2.0);
M30[channel]/=pow(M00[channel],1.0+(3.0+0.0)/2.0);
M03[channel]/=pow(M00[channel],1.0+(0.0+3.0)/2.0);
M00[channel]=1.0;
}
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Compute Hu invariant moments.
*/
channel_moments[channel].I[0]=M20[channel]+M02[channel];
channel_moments[channel].I[1]=(M20[channel]-M02[channel])*
(M20[channel]-M02[channel])+4.0*M11[channel]*M11[channel];
channel_moments[channel].I[2]=(M30[channel]-3.0*M12[channel])*
(M30[channel]-3.0*M12[channel])+(3.0*M21[channel]-M03[channel])*
(3.0*M21[channel]-M03[channel]);
channel_moments[channel].I[3]=(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])+(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]);
channel_moments[channel].I[4]=(M30[channel]-3.0*M12[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))+(3.0*M21[channel]-M03[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].I[5]=(M20[channel]-M02[channel])*
((M30[channel]+M12[channel])*(M30[channel]+M12[channel])-
(M21[channel]+M03[channel])*(M21[channel]+M03[channel]))+
4.0*M11[channel]*(M30[channel]+M12[channel])*(M21[channel]+M03[channel]);
channel_moments[channel].I[6]=(3.0*M21[channel]-M03[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))-(M30[channel]-3*M12[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].I[7]=M11[channel]*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M03[channel]+M21[channel])*
(M03[channel]+M21[channel]))-(M20[channel]-M02[channel])*
(M30[channel]+M12[channel])*(M03[channel]+M21[channel]);
}
if (y < (ssize_t) image->rows)
channel_moments=(ChannelMoments *) RelinquishMagickMemory(channel_moments);
return(channel_moments);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l P e r c e p t u a l H a s h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelPerceptualHash() returns the perceptual hash of one or more
% image channels.
%
% The format of the GetImageChannelPerceptualHash method is:
%
% ChannelPerceptualHash *GetImageChannelPerceptualHash(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelPerceptualHash *GetImageChannelPerceptualHash(
const Image *image,ExceptionInfo *exception)
{
ChannelMoments
*moments;
ChannelPerceptualHash
*perceptual_hash;
Image
*hash_image;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
channel;
/*
Blur then transform to sRGB colorspace.
*/
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
return((ChannelPerceptualHash *) NULL);
hash_image->depth=8;
status=TransformImageColorspace(hash_image,sRGBColorspace);
if (status == MagickFalse)
return((ChannelPerceptualHash *) NULL);
moments=GetImageChannelMoments(hash_image,exception);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
return((ChannelPerceptualHash *) NULL);
perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory(
CompositeChannels+1UL,sizeof(*perceptual_hash));
if (perceptual_hash == (ChannelPerceptualHash *) NULL)
return((ChannelPerceptualHash *) NULL);
for (channel=0; channel <= CompositeChannels; channel++)
for (i=0; i < MaximumNumberOfImageMoments; i++)
perceptual_hash[channel].P[i]=(-MagickLog10(moments[channel].I[i]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
/*
Blur then transform to HCLp colorspace.
*/
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
hash_image->depth=8;
status=TransformImageColorspace(hash_image,HCLpColorspace);
if (status == MagickFalse)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
moments=GetImageChannelMoments(hash_image,exception);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
for (channel=0; channel <= CompositeChannels; channel++)
for (i=0; i < MaximumNumberOfImageMoments; i++)
perceptual_hash[channel].Q[i]=(-MagickLog10(moments[channel].I[i]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
return(perceptual_hash);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l R a n g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelRange() returns the range of one or more image channels.
%
% The format of the GetImageChannelRange method is:
%
% MagickBooleanType GetImageChannelRange(const Image *image,
% const ChannelType channel,double *minima,double *maxima,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageRange(const Image *image,
double *minima,double *maxima,ExceptionInfo *exception)
{
return(GetImageChannelRange(image,CompositeChannels,minima,maxima,exception));
}
MagickExport MagickBooleanType GetImageChannelRange(const Image *image,
const ChannelType channel,double *minima,double *maxima,
ExceptionInfo *exception)
{
MagickPixelPacket
pixel;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*maxima=(-MagickMaximumValue);
*minima=MagickMaximumValue;
GetMagickPixelPacket(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((channel & RedChannel) != 0)
{
if (pixel.red < *minima)
*minima=(double) pixel.red;
if (pixel.red > *maxima)
*maxima=(double) pixel.red;
}
if ((channel & GreenChannel) != 0)
{
if (pixel.green < *minima)
*minima=(double) pixel.green;
if (pixel.green > *maxima)
*maxima=(double) pixel.green;
}
if ((channel & BlueChannel) != 0)
{
if (pixel.blue < *minima)
*minima=(double) pixel.blue;
if (pixel.blue > *maxima)
*maxima=(double) pixel.blue;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
{
if ((QuantumRange-pixel.opacity) < *minima)
*minima=(double) (QuantumRange-pixel.opacity);
if ((QuantumRange-pixel.opacity) > *maxima)
*maxima=(double) (QuantumRange-pixel.opacity);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if ((double) pixel.index < *minima)
*minima=(double) pixel.index;
if ((double) pixel.index > *maxima)
*maxima=(double) pixel.index;
}
p++;
}
}
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l S t a t i s t i c s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelStatistics() returns statistics for each channel in the
% image. The statistics include the channel depth, its minima, maxima, mean,
% standard deviation, kurtosis and skewness. You can access the red channel
% mean, for example, like this:
%
% channel_statistics=GetImageChannelStatistics(image,exception);
% red_mean=channel_statistics[RedChannel].mean;
%
% Use MagickRelinquishMemory() to free the statistics buffer.
%
% The format of the GetImageChannelStatistics method is:
%
% ChannelStatistics *GetImageChannelStatistics(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ChannelStatistics *GetImageChannelStatistics(const Image *image,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
double
area,
standard_deviation;
MagickPixelPacket
number_bins,
*histogram;
QuantumAny
range;
register ssize_t
i;
size_t
channels,
depth,
length;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
length=CompositeChannels+1UL;
channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(length,
sizeof(*channel_statistics));
histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1U,
sizeof(*histogram));
if ((channel_statistics == (ChannelStatistics *) NULL) ||
(histogram == (MagickPixelPacket *) NULL))
{
if (histogram != (MagickPixelPacket *) NULL)
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
if (channel_statistics != (ChannelStatistics *) NULL)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
(void) memset(channel_statistics,0,length*
sizeof(*channel_statistics));
for (i=0; i <= (ssize_t) CompositeChannels; i++)
{
channel_statistics[i].depth=1;
channel_statistics[i].maxima=(-MagickMaximumValue);
channel_statistics[i].minima=MagickMaximumValue;
}
(void) memset(histogram,0,(MaxMap+1U)*sizeof(*histogram));
(void) memset(&number_bins,0,sizeof(number_bins));
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
/*
Compute pixel statistics.
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; )
{
if (channel_statistics[RedChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[RedChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelRed(p),range) == MagickFalse)
{
channel_statistics[RedChannel].depth++;
continue;
}
}
if (channel_statistics[GreenChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[GreenChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelGreen(p),range) == MagickFalse)
{
channel_statistics[GreenChannel].depth++;
continue;
}
}
if (channel_statistics[BlueChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[BlueChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelBlue(p),range) == MagickFalse)
{
channel_statistics[BlueChannel].depth++;
continue;
}
}
if (image->matte != MagickFalse)
{
if (channel_statistics[OpacityChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[OpacityChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelAlpha(p),range) == MagickFalse)
{
channel_statistics[OpacityChannel].depth++;
continue;
}
}
}
if (image->colorspace == CMYKColorspace)
{
if (channel_statistics[BlackChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[BlackChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelIndex(indexes+x),range) == MagickFalse)
{
channel_statistics[BlackChannel].depth++;
continue;
}
}
}
if ((double) GetPixelRed(p) < channel_statistics[RedChannel].minima)
channel_statistics[RedChannel].minima=(double) GetPixelRed(p);
if ((double) GetPixelRed(p) > channel_statistics[RedChannel].maxima)
channel_statistics[RedChannel].maxima=(double) GetPixelRed(p);
channel_statistics[RedChannel].sum+=GetPixelRed(p);
channel_statistics[RedChannel].sum_squared+=(double) GetPixelRed(p)*
GetPixelRed(p);
channel_statistics[RedChannel].sum_cubed+=(double)
GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
channel_statistics[RedChannel].sum_fourth_power+=(double)
GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
if ((double) GetPixelGreen(p) < channel_statistics[GreenChannel].minima)
channel_statistics[GreenChannel].minima=(double) GetPixelGreen(p);
if ((double) GetPixelGreen(p) > channel_statistics[GreenChannel].maxima)
channel_statistics[GreenChannel].maxima=(double) GetPixelGreen(p);
channel_statistics[GreenChannel].sum+=GetPixelGreen(p);
channel_statistics[GreenChannel].sum_squared+=(double) GetPixelGreen(p)*
GetPixelGreen(p);
channel_statistics[GreenChannel].sum_cubed+=(double) GetPixelGreen(p)*
GetPixelGreen(p)*GetPixelGreen(p);
channel_statistics[GreenChannel].sum_fourth_power+=(double)
GetPixelGreen(p)*GetPixelGreen(p)*GetPixelGreen(p)*GetPixelGreen(p);
if ((double) GetPixelBlue(p) < channel_statistics[BlueChannel].minima)
channel_statistics[BlueChannel].minima=(double) GetPixelBlue(p);
if ((double) GetPixelBlue(p) > channel_statistics[BlueChannel].maxima)
channel_statistics[BlueChannel].maxima=(double) GetPixelBlue(p);
channel_statistics[BlueChannel].sum+=GetPixelBlue(p);
channel_statistics[BlueChannel].sum_squared+=(double) GetPixelBlue(p)*
GetPixelBlue(p);
channel_statistics[BlueChannel].sum_cubed+=(double) GetPixelBlue(p)*
GetPixelBlue(p)*GetPixelBlue(p);
channel_statistics[BlueChannel].sum_fourth_power+=(double)
GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p);
histogram[ScaleQuantumToMap(GetPixelRed(p))].red++;
histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++;
histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++;
if (image->matte != MagickFalse)
{
if ((double) GetPixelAlpha(p) < channel_statistics[OpacityChannel].minima)
channel_statistics[OpacityChannel].minima=(double) GetPixelAlpha(p);
if ((double) GetPixelAlpha(p) > channel_statistics[OpacityChannel].maxima)
channel_statistics[OpacityChannel].maxima=(double) GetPixelAlpha(p);
channel_statistics[OpacityChannel].sum+=GetPixelAlpha(p);
channel_statistics[OpacityChannel].sum_squared+=(double)
GetPixelAlpha(p)*GetPixelAlpha(p);
channel_statistics[OpacityChannel].sum_cubed+=(double)
GetPixelAlpha(p)*GetPixelAlpha(p)*GetPixelAlpha(p);
channel_statistics[OpacityChannel].sum_fourth_power+=(double)
GetPixelAlpha(p)*GetPixelAlpha(p)*GetPixelAlpha(p)*GetPixelAlpha(p);
histogram[ScaleQuantumToMap(GetPixelAlpha(p))].opacity++;
}
if (image->colorspace == CMYKColorspace)
{
if ((double) GetPixelIndex(indexes+x) < channel_statistics[BlackChannel].minima)
channel_statistics[BlackChannel].minima=(double)
GetPixelIndex(indexes+x);
if ((double) GetPixelIndex(indexes+x) > channel_statistics[BlackChannel].maxima)
channel_statistics[BlackChannel].maxima=(double)
GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum+=GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_squared+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_cubed+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_fourth_power+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x);
histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++;
}
x++;
p++;
}
}
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
double
area,
mean,
standard_deviation;
/*
Normalize pixel statistics.
*/
area=PerceptibleReciprocal((double) image->columns*image->rows);
mean=channel_statistics[i].sum*area;
channel_statistics[i].sum=mean;
channel_statistics[i].sum_squared*=area;
channel_statistics[i].sum_cubed*=area;
channel_statistics[i].sum_fourth_power*=area;
channel_statistics[i].mean=mean;
channel_statistics[i].variance=channel_statistics[i].sum_squared;
standard_deviation=sqrt(channel_statistics[i].variance-(mean*mean));
area=PerceptibleReciprocal((double) image->columns*image->rows-1.0)*
((double) image->columns*image->rows);
standard_deviation=sqrt(area*standard_deviation*standard_deviation);
channel_statistics[i].standard_deviation=standard_deviation;
}
for (i=0; i < (ssize_t) (MaxMap+1U); i++)
{
if (histogram[i].red > 0.0)
number_bins.red++;
if (histogram[i].green > 0.0)
number_bins.green++;
if (histogram[i].blue > 0.0)
number_bins.blue++;
if ((image->matte != MagickFalse) && (histogram[i].opacity > 0.0))
number_bins.opacity++;
if ((image->colorspace == CMYKColorspace) && (histogram[i].index > 0.0))
number_bins.index++;
}
area=PerceptibleReciprocal((double) image->columns*image->rows);
for (i=0; i < (ssize_t) (MaxMap+1U); i++)
{
/*
Compute pixel entropy.
*/
histogram[i].red*=area;
channel_statistics[RedChannel].entropy+=-histogram[i].red*
MagickLog10(histogram[i].red)*
PerceptibleReciprocal(MagickLog10((double) number_bins.red));
histogram[i].green*=area;
channel_statistics[GreenChannel].entropy+=-histogram[i].green*
MagickLog10(histogram[i].green)*
PerceptibleReciprocal(MagickLog10((double) number_bins.green));
histogram[i].blue*=area;
channel_statistics[BlueChannel].entropy+=-histogram[i].blue*
MagickLog10(histogram[i].blue)*
PerceptibleReciprocal(MagickLog10((double) number_bins.blue));
if (image->matte != MagickFalse)
{
histogram[i].opacity*=area;
channel_statistics[OpacityChannel].entropy+=-histogram[i].opacity*
MagickLog10(histogram[i].opacity)*
PerceptibleReciprocal(MagickLog10((double) number_bins.opacity));
}
if (image->colorspace == CMYKColorspace)
{
histogram[i].index*=area;
channel_statistics[IndexChannel].entropy+=-histogram[i].index*
MagickLog10(histogram[i].index)*
PerceptibleReciprocal(MagickLog10((double) number_bins.index));
}
}
/*
Compute overall statistics.
*/
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
channel_statistics[CompositeChannels].depth=(size_t) EvaluateMax((double)
channel_statistics[CompositeChannels].depth,(double)
channel_statistics[i].depth);
channel_statistics[CompositeChannels].minima=MagickMin(
channel_statistics[CompositeChannels].minima,
channel_statistics[i].minima);
channel_statistics[CompositeChannels].maxima=EvaluateMax(
channel_statistics[CompositeChannels].maxima,
channel_statistics[i].maxima);
channel_statistics[CompositeChannels].sum+=channel_statistics[i].sum;
channel_statistics[CompositeChannels].sum_squared+=
channel_statistics[i].sum_squared;
channel_statistics[CompositeChannels].sum_cubed+=
channel_statistics[i].sum_cubed;
channel_statistics[CompositeChannels].sum_fourth_power+=
channel_statistics[i].sum_fourth_power;
channel_statistics[CompositeChannels].mean+=channel_statistics[i].mean;
channel_statistics[CompositeChannels].variance+=
channel_statistics[i].variance-channel_statistics[i].mean*
channel_statistics[i].mean;
standard_deviation=sqrt(channel_statistics[i].variance-
(channel_statistics[i].mean*channel_statistics[i].mean));
area=PerceptibleReciprocal((double) image->columns*image->rows-1.0)*
((double) image->columns*image->rows);
standard_deviation=sqrt(area*standard_deviation*standard_deviation);
channel_statistics[CompositeChannels].standard_deviation=standard_deviation;
channel_statistics[CompositeChannels].entropy+=
channel_statistics[i].entropy;
}
channels=3;
if (image->matte != MagickFalse)
channels++;
if (image->colorspace == CMYKColorspace)
channels++;
channel_statistics[CompositeChannels].sum/=channels;
channel_statistics[CompositeChannels].sum_squared/=channels;
channel_statistics[CompositeChannels].sum_cubed/=channels;
channel_statistics[CompositeChannels].sum_fourth_power/=channels;
channel_statistics[CompositeChannels].mean/=channels;
channel_statistics[CompositeChannels].kurtosis/=channels;
channel_statistics[CompositeChannels].skewness/=channels;
channel_statistics[CompositeChannels].entropy/=channels;
i=CompositeChannels;
area=PerceptibleReciprocal((double) channels*image->columns*image->rows);
channel_statistics[i].variance=channel_statistics[i].sum_squared;
channel_statistics[i].mean=channel_statistics[i].sum;
standard_deviation=sqrt(channel_statistics[i].variance-
(channel_statistics[i].mean*channel_statistics[i].mean));
standard_deviation=sqrt(PerceptibleReciprocal((double) channels*
image->columns*image->rows-1.0)*channels*image->columns*image->rows*
standard_deviation*standard_deviation);
channel_statistics[i].standard_deviation=standard_deviation;
for (i=0; i <= (ssize_t) CompositeChannels; i++)
{
/*
Compute kurtosis & skewness statistics.
*/
standard_deviation=PerceptibleReciprocal(
channel_statistics[i].standard_deviation);
channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-3.0*
channel_statistics[i].mean*channel_statistics[i].sum_squared+2.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation);
channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-4.0*
channel_statistics[i].mean*channel_statistics[i].sum_cubed+6.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean*
channel_statistics[i].mean*1.0*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation*standard_deviation)-3.0;
}
channel_statistics[CompositeChannels].mean=0.0;
channel_statistics[CompositeChannels].standard_deviation=0.0;
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[i].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[i].standard_deviation;
}
channel_statistics[CompositeChannels].mean/=(double) channels;
channel_statistics[CompositeChannels].standard_deviation/=(double) channels;
histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram);
if (y < (ssize_t) image->rows)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l y n o m i a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolynomialImage() returns a new image where each pixel is the sum of the
% pixels in the image sequence after applying its corresponding terms
% (coefficient and degree pairs).
%
% The format of the PolynomialImage method is:
%
% Image *PolynomialImage(const Image *images,const size_t number_terms,
% const double *terms,ExceptionInfo *exception)
% Image *PolynomialImageChannel(const Image *images,
% const size_t number_terms,const ChannelType channel,
% const double *terms,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o channel: the channel.
%
% o number_terms: the number of terms in the list. The actual list length
% is 2 x number_terms + 1 (the constant).
%
% o terms: the list of polynomial coefficients and degree pairs and a
% constant.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolynomialImage(const Image *images,
const size_t number_terms,const double *terms,ExceptionInfo *exception)
{
Image
*polynomial_image;
polynomial_image=PolynomialImageChannel(images,DefaultChannels,number_terms,
terms,exception);
return(polynomial_image);
}
MagickExport Image *PolynomialImageChannel(const Image *images,
const ChannelType channel,const size_t number_terms,const double *terms,
ExceptionInfo *exception)
{
#define PolynomialImageTag "Polynomial/Image"
CacheView
*polynomial_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
**magick_restrict polynomial_pixels,
zero;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
image=DestroyImage(image);
return((Image *) NULL);
}
polynomial_pixels=AcquirePixelThreadSet(images);
if (polynomial_pixels == (MagickPixelPacket **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Polynomial image pixels.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(images,&zero);
polynomial_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register IndexPacket
*magick_restrict polynomial_indexes;
register MagickPixelPacket
*polynomial_pixel;
register PixelPacket
*magick_restrict q;
register ssize_t
i,
x;
size_t
number_images;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
polynomial_indexes=GetCacheViewAuthenticIndexQueue(polynomial_view);
polynomial_pixel=polynomial_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
polynomial_pixel[x]=zero;
next=images;
number_images=GetImageListLength(images);
for (i=0; i < (ssize_t) number_images; i++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
if (i >= (ssize_t) number_terms)
break;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
coefficient,
degree;
coefficient=terms[i << 1];
degree=terms[(i << 1)+1];
if ((channel & RedChannel) != 0)
polynomial_pixel[x].red+=coefficient*pow(QuantumScale*p->red,degree);
if ((channel & GreenChannel) != 0)
polynomial_pixel[x].green+=coefficient*pow(QuantumScale*p->green,
degree);
if ((channel & BlueChannel) != 0)
polynomial_pixel[x].blue+=coefficient*pow(QuantumScale*p->blue,
degree);
if ((channel & OpacityChannel) != 0)
polynomial_pixel[x].opacity+=coefficient*pow(QuantumScale*
(QuantumRange-p->opacity),degree);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
polynomial_pixel[x].index+=coefficient*pow(QuantumScale*indexes[x],
degree);
p++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].red));
SetPixelGreen(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].green));
SetPixelBlue(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].blue));
if (image->matte == MagickFalse)
SetPixelOpacity(q,ClampToQuantum(QuantumRange-QuantumRange*
polynomial_pixel[x].opacity));
else
SetPixelAlpha(q,ClampToQuantum(QuantumRange-QuantumRange*
polynomial_pixel[x].opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(polynomial_indexes+x,ClampToQuantum(QuantumRange*
polynomial_pixel[x].index));
q++;
}
if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(images,PolynomialImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
polynomial_view=DestroyCacheView(polynomial_view);
polynomial_pixels=DestroyPixelThreadSet(images,polynomial_pixels);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t a t i s t i c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StatisticImage() makes each pixel the min / max / median / mode / etc. of
% the neighborhood of the specified width and height.
%
% The format of the StatisticImage method is:
%
% Image *StatisticImage(const Image *image,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
% Image *StatisticImageChannel(const Image *image,
% const ChannelType channel,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the image channel.
%
% o type: the statistic type (median, mode, etc.).
%
% o width: the width of the pixel neighborhood.
%
% o height: the height of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
#define ListChannels 5
typedef struct _ListNode
{
size_t
next[9],
count,
signature;
} ListNode;
typedef struct _SkipList
{
ssize_t
level;
ListNode
*nodes;
} SkipList;
typedef struct _PixelList
{
size_t
length,
seed,
signature;
SkipList
lists[ListChannels];
} PixelList;
static PixelList *DestroyPixelList(PixelList *pixel_list)
{
register ssize_t
i;
if (pixel_list == (PixelList *) NULL)
return((PixelList *) NULL);
for (i=0; i < ListChannels; i++)
if (pixel_list->lists[i].nodes != (ListNode *) NULL)
pixel_list->lists[i].nodes=(ListNode *) RelinquishAlignedMemory(
pixel_list->lists[i].nodes);
pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list)
{
register ssize_t
i;
assert(pixel_list != (PixelList **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixel_list[i] != (PixelList *) NULL)
pixel_list[i]=DestroyPixelList(pixel_list[i]);
pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList *AcquirePixelList(const size_t width,const size_t height)
{
PixelList
*pixel_list;
register ssize_t
i;
pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list));
if (pixel_list == (PixelList *) NULL)
return(pixel_list);
(void) memset((void *) pixel_list,0,sizeof(*pixel_list));
pixel_list->length=width*height;
for (i=0; i < ListChannels; i++)
{
pixel_list->lists[i].nodes=(ListNode *) AcquireAlignedMemory(65537UL,
sizeof(*pixel_list->lists[i].nodes));
if (pixel_list->lists[i].nodes == (ListNode *) NULL)
return(DestroyPixelList(pixel_list));
(void) memset(pixel_list->lists[i].nodes,0,65537UL*
sizeof(*pixel_list->lists[i].nodes));
}
pixel_list->signature=MagickCoreSignature;
return(pixel_list);
}
static PixelList **AcquirePixelListThreadSet(const size_t width,
const size_t height)
{
PixelList
**pixel_list;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixel_list=(PixelList **) AcquireQuantumMemory(number_threads,
sizeof(*pixel_list));
if (pixel_list == (PixelList **) NULL)
return((PixelList **) NULL);
(void) memset(pixel_list,0,number_threads*sizeof(*pixel_list));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_list[i]=AcquirePixelList(width,height);
if (pixel_list[i] == (PixelList *) NULL)
return(DestroyPixelListThreadSet(pixel_list));
}
return(pixel_list);
}
static void AddNodePixelList(PixelList *pixel_list,const ssize_t channel,
const size_t color)
{
register SkipList
*list;
register ssize_t
level;
size_t
search,
update[9];
/*
Initialize the node.
*/
list=pixel_list->lists+channel;
list->nodes[color].signature=pixel_list->signature;
list->nodes[color].count=1;
/*
Determine where it belongs in the list.
*/
search=65536UL;
for (level=list->level; level >= 0; level--)
{
while (list->nodes[search].next[level] < color)
search=list->nodes[search].next[level];
update[level]=search;
}
/*
Generate a pseudo-random level for this node.
*/
for (level=0; ; level++)
{
pixel_list->seed=(pixel_list->seed*42893621L)+1L;
if ((pixel_list->seed & 0x300) != 0x300)
break;
}
if (level > 8)
level=8;
if (level > (list->level+2))
level=list->level+2;
/*
If we're raising the list's level, link back to the root node.
*/
while (level > list->level)
{
list->level++;
update[list->level]=65536UL;
}
/*
Link the node into the skip-list.
*/
do
{
list->nodes[color].next[level]=list->nodes[update[level]].next[level];
list->nodes[update[level]].next[level]=color;
} while (level-- > 0);
}
static void GetMaximumPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
maximum;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the maximum value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
maximum=list->nodes[color].next[0];
do
{
color=list->nodes[color].next[0];
if (color > maximum)
maximum=color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) maximum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMeanPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
MagickRealType
sum;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the mean value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
do
{
color=list->nodes[color].next[0];
sum+=(MagickRealType) list->nodes[color].count*color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
channels[channel]=(unsigned short) sum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMedianPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the median value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
do
{
color=list->nodes[color].next[0];
count+=list->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
channels[channel]=(unsigned short) color;
}
GetMagickPixelPacket((const Image *) NULL,pixel);
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMinimumPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
minimum;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the minimum value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
count=0;
color=65536UL;
minimum=list->nodes[color].next[0];
do
{
color=list->nodes[color].next[0];
if (color < minimum)
minimum=color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) minimum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetModePixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
max_count,
mode;
ssize_t
count;
unsigned short
channels[5];
/*
Make each pixel the 'predominant color' of the specified neighborhood.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
mode=color;
max_count=list->nodes[mode].count;
count=0;
do
{
color=list->nodes[color].next[0];
if (list->nodes[color].count > max_count)
{
mode=color;
max_count=list->nodes[mode].count;
}
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) mode;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetNonpeakPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
next,
previous;
ssize_t
count;
unsigned short
channels[5];
/*
Finds the non peak value for each of the colors.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
next=list->nodes[color].next[0];
count=0;
do
{
previous=color;
color=next;
next=list->nodes[color].next[0];
count+=list->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
if ((previous == 65536UL) && (next != 65536UL))
color=next;
else
if ((previous != 65536UL) && (next == 65536UL))
color=previous;
channels[channel]=(unsigned short) color;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetRootMeanSquarePixelList(PixelList *pixel_list,
MagickPixelPacket *pixel)
{
MagickRealType
sum;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the root mean square value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
do
{
color=list->nodes[color].next[0];
sum+=(MagickRealType) (list->nodes[color].count*color*color);
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
channels[channel]=(unsigned short) sqrt(sum);
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetStandardDeviationPixelList(PixelList *pixel_list,
MagickPixelPacket *pixel)
{
MagickRealType
sum,
sum_squared;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the standard-deviation value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
sum_squared=0.0;
do
{
register ssize_t
i;
color=list->nodes[color].next[0];
sum+=(MagickRealType) list->nodes[color].count*color;
for (i=0; i < (ssize_t) list->nodes[color].count; i++)
sum_squared+=((MagickRealType) color)*((MagickRealType) color);
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
sum_squared/=pixel_list->length;
channels[channel]=(unsigned short) sqrt(sum_squared-(sum*sum));
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static inline void InsertPixelList(const Image *image,const PixelPacket *pixel,
const IndexPacket *indexes,PixelList *pixel_list)
{
size_t
signature;
unsigned short
index;
index=ScaleQuantumToShort(GetPixelRed(pixel));
signature=pixel_list->lists[0].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[0].nodes[index].count++;
else
AddNodePixelList(pixel_list,0,index);
index=ScaleQuantumToShort(GetPixelGreen(pixel));
signature=pixel_list->lists[1].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[1].nodes[index].count++;
else
AddNodePixelList(pixel_list,1,index);
index=ScaleQuantumToShort(GetPixelBlue(pixel));
signature=pixel_list->lists[2].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[2].nodes[index].count++;
else
AddNodePixelList(pixel_list,2,index);
index=ScaleQuantumToShort(GetPixelOpacity(pixel));
signature=pixel_list->lists[3].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[3].nodes[index].count++;
else
AddNodePixelList(pixel_list,3,index);
if (image->colorspace == CMYKColorspace)
index=ScaleQuantumToShort(GetPixelIndex(indexes));
signature=pixel_list->lists[4].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[4].nodes[index].count++;
else
AddNodePixelList(pixel_list,4,index);
}
static void ResetPixelList(PixelList *pixel_list)
{
int
level;
register ListNode
*root;
register SkipList
*list;
register ssize_t
channel;
/*
Reset the skip-list.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
root=list->nodes+65536UL;
list->level=0;
for (level=0; level < 9; level++)
root->next[level]=65536UL;
}
pixel_list->seed=pixel_list->signature++;
}
MagickExport Image *StatisticImage(const Image *image,const StatisticType type,
const size_t width,const size_t height,ExceptionInfo *exception)
{
Image
*statistic_image;
statistic_image=StatisticImageChannel(image,DefaultChannels,type,width,
height,exception);
return(statistic_image);
}
MagickExport Image *StatisticImageChannel(const Image *image,
const ChannelType channel,const StatisticType type,const size_t width,
const size_t height,ExceptionInfo *exception)
{
#define StatisticImageTag "Statistic/Image"
CacheView
*image_view,
*statistic_view;
Image
*statistic_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelList
**magick_restrict pixel_list;
size_t
neighbor_height,
neighbor_width;
ssize_t
y;
/*
Initialize statistics image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
statistic_image=CloneImage(image,0,0,MagickTrue,exception);
if (statistic_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(statistic_image,DirectClass) == MagickFalse)
{
InheritException(exception,&statistic_image->exception);
statistic_image=DestroyImage(statistic_image);
return((Image *) NULL);
}
neighbor_width=width == 0 ? GetOptimalKernelWidth2D((double) width,0.5) :
width;
neighbor_height=height == 0 ? GetOptimalKernelWidth2D((double) height,0.5) :
height;
pixel_list=AcquirePixelListThreadSet(neighbor_width,neighbor_height);
if (pixel_list == (PixelList **) NULL)
{
statistic_image=DestroyImage(statistic_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Make each pixel the min / max / median / mode / etc. of the neighborhood.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
statistic_view=AcquireAuthenticCacheView(statistic_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,statistic_image,statistic_image->rows,1)
#endif
for (y=0; y < (ssize_t) statistic_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict statistic_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) neighbor_width/2L),y-
(ssize_t) (neighbor_height/2L),image->columns+neighbor_width,
neighbor_height,exception);
q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
statistic_indexes=GetCacheViewAuthenticIndexQueue(statistic_view);
for (x=0; x < (ssize_t) statistic_image->columns; x++)
{
MagickPixelPacket
pixel;
register const IndexPacket
*magick_restrict s;
register const PixelPacket
*magick_restrict r;
register ssize_t
u,
v;
r=p;
s=indexes+x;
ResetPixelList(pixel_list[id]);
for (v=0; v < (ssize_t) neighbor_height; v++)
{
for (u=0; u < (ssize_t) neighbor_width; u++)
InsertPixelList(image,r+u,s+u,pixel_list[id]);
r+=image->columns+neighbor_width;
s+=image->columns+neighbor_width;
}
GetMagickPixelPacket(image,&pixel);
SetMagickPixelPacket(image,p+neighbor_width*neighbor_height/2,indexes+x+
neighbor_width*neighbor_height/2,&pixel);
switch (type)
{
case GradientStatistic:
{
MagickPixelPacket
maximum,
minimum;
GetMinimumPixelList(pixel_list[id],&pixel);
minimum=pixel;
GetMaximumPixelList(pixel_list[id],&pixel);
maximum=pixel;
pixel.red=MagickAbsoluteValue(maximum.red-minimum.red);
pixel.green=MagickAbsoluteValue(maximum.green-minimum.green);
pixel.blue=MagickAbsoluteValue(maximum.blue-minimum.blue);
pixel.opacity=MagickAbsoluteValue(maximum.opacity-minimum.opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=MagickAbsoluteValue(maximum.index-minimum.index);
break;
}
case MaximumStatistic:
{
GetMaximumPixelList(pixel_list[id],&pixel);
break;
}
case MeanStatistic:
{
GetMeanPixelList(pixel_list[id],&pixel);
break;
}
case MedianStatistic:
default:
{
GetMedianPixelList(pixel_list[id],&pixel);
break;
}
case MinimumStatistic:
{
GetMinimumPixelList(pixel_list[id],&pixel);
break;
}
case ModeStatistic:
{
GetModePixelList(pixel_list[id],&pixel);
break;
}
case NonpeakStatistic:
{
GetNonpeakPixelList(pixel_list[id],&pixel);
break;
}
case RootMeanSquareStatistic:
{
GetRootMeanSquarePixelList(pixel_list[id],&pixel);
break;
}
case StandardDeviationStatistic:
{
GetStandardDeviationPixelList(pixel_list[id],&pixel);
break;
}
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(pixel.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(statistic_indexes+x,ClampToQuantum(pixel.index));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,StatisticImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
statistic_view=DestroyCacheView(statistic_view);
image_view=DestroyCacheView(image_view);
pixel_list=DestroyPixelListThreadSet(pixel_list);
if (status == MagickFalse)
statistic_image=DestroyImage(statistic_image);
return(statistic_image);
}
|
mkldnn_rnn_impl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#ifndef MXNET_OPERATOR_NN_MKLDNN_MKLDNN_RNN_IMPL_H_
#define MXNET_OPERATOR_NN_MKLDNN_MKLDNN_RNN_IMPL_H_
#if MXNET_USE_MKLDNN == 1
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <mxnet/storage.h>
#include <algorithm>
#include <map>
#include <vector>
#include <utility>
#include <string>
#include "../../math_functions-inl.h"
#include "../../operator_common.h"
#include "../../rnn_impl.h"
#include "../../rnn-inl.h"
#include "mkldnn.hpp"
#include "./mkldnn_base-inl.h"
namespace mxnet {
namespace op {
static algorithm GetMKLDNNRNNAlgo(int mode,
int* ngates,
int* nstates) {
algorithm algo = algorithm::vanilla_rnn;
switch (mode) {
case rnn_enum::kLstm:
*ngates = 4;
*nstates = 2;
algo = algorithm::vanilla_lstm;
break;
case rnn_enum::kGru:
*ngates = 3;
*nstates = 1;
algo = algorithm::vanilla_gru;
break;
case rnn_enum::kRnnRelu:
case rnn_enum::kRnnTanh:
*ngates = 1;
*nstates = 1;
algo = algorithm::vanilla_rnn;
break;
default:
LOG(FATAL) << "unsupported RNN mode:" << mode;
break;
}
return algo;
}
static void ConcatData(mkldnn::memory::format src_format,
mkldnn::memory::format dst_format,
std::vector<mkldnn::memory::dims> srcs_cds,
mkldnn::memory::dims dst_cds,
mkldnn::memory::data_type mkldnn_dtype,
int concat_dimension,
std::vector<void*> srcs_data,
const mkldnn::memory &dst) {
auto cpu_engine = CpuEngine::Get()->get_engine();
std::vector<mkldnn::memory::primitive_desc> srcs_pd;
std::vector<mkldnn::memory> srcs;
for (size_t i = 0; i < srcs_cds.size(); i++) {
auto desc = mkldnn::memory::desc(srcs_cds[i], mkldnn_dtype, src_format);
auto mpd = mkldnn::memory::primitive_desc(desc, cpu_engine);
auto src_memory = mkldnn::memory(mpd, srcs_data[i]);
srcs_pd.push_back(mpd);
srcs.push_back(src_memory);
}
std::vector<primitive::at> inputs;
for (size_t i = 0; i < srcs_cds.size(); i++) {
inputs.push_back(srcs[i]);
}
auto dst_desc = mkldnn::memory::desc(dst_cds, mkldnn_dtype, dst_format);
auto concat_pd = concat::primitive_desc(dst_desc, concat_dimension, srcs_pd);
MKLDNNStream::Get()->RegisterPrim(concat(concat_pd, inputs, dst));
MKLDNNStream::Get()->Submit();
}
// cached mkldnn memory
// first layer wx, wh with next L - 1 layers wx and wh
// with L layers hx and cx, src and dst data/iter etc.
// it will prepare memory on before and after reorder and concat.
// for unidirectional, it will fused as dim like 1 + (L - 1) when I != H.
// for bidirectional, it will fused as data + back_data (weight, bias, iter etc),
// also need to identify first layer and next layers
static size_t GetMKLDNNRNNCacheMemorySize(int L,
int D,
int T,
int N,
int I,
int H,
int mode) {
size_t size = 0;
switch (mode) {
case rnn_enum::kLstm:
size = 2 * (D * (I + H) * 4 * H + (L - 1) * D * (D * H + H) * 4 * H +
L * D * 2 * N * H) + T * N * D * H + L * 2 * D * 4 * H + (L + 2) * D * 2 * N * H +
6 * D * (I + H + 2) * 4 * H + T * N * I * 2;
break;
case rnn_enum::kGru:
size = 2 * (D * (I + H) * 3 * H + (L - 1) * D * (D * H + H) * 3 * H +
L * D * 2 * N * H) + T * N * D * H + L * 2 * D * 3 * H + (L + 2) * D * 2 * N * H +
6 * D * (I + H + 2) * 3 * H + T * N * I * 2;
break;
case rnn_enum::kRnnRelu:
case rnn_enum::kRnnTanh:
size = 2 * (D * (I + H) * 1 * H + (L - 1) * D * (D * H + H) * 1 * H +
L * D * 2 * N * H) + T * N * D * H + L * 2 * D * 1 * H + (L + 2) * D * 2 * N * H +
6 * D * (I + H + 2) * 1 * H + T * N * I * 2;
break;
default:
LOG(FATAL) << "unknown RNN mode " << mode;
break;
}
return size;
}
template <typename DType>
static void AdjustGruWeightGateOrder(DType* weight,
const int I,
const int H) {
// mxnet gru gate order is reset, update and new gates
// mkldnn gru gate order is update, reset and new gates
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
DType* weight_reset = weight;
DType* weight_update = weight + I * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < I * H; i++) {
DType tmp = weight_update[i];
weight_update[i] = weight_reset[i];
weight_reset[i] = tmp;
}
}
template <typename DType>
static void AdjustGruBiasGateOrder(DType* bias,
const int H) {
// mxnet gru gate order is reset, update and new gates
// mkldnn gru gate order is update, reset and new gates
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
DType* bias_reset = bias;
DType* bias_update = bias + H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; i++) {
DType tmp = bias_update[i];
bias_update[i] = bias_reset[i];
bias_reset[i] = tmp;
}
}
// since there is different sematics of MKLDNN's Fused RNN and MXNet FusedRNN,
// bidirectional will be fused layer by layer,
// unidirectional will be done by fused 1 + fused (L - 1) layers or fused L layers(when I = H)
template <typename DType>
static void MKLDNNRNNForwardSingleLayerBi(bool state_outputs,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
mkldnn::memory *user_src_layer_memory,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr,
std::vector<mkldnn::memory> *concat_weight_memory,
std::vector<mkldnn::memory> *concat_iter_memory,
std::vector<mkldnn::memory> *x_memory,
std::vector<mkldnn::memory> *hcx_memory,
std::vector<mkldnn::memory> *wx_memory,
std::vector<mkldnn::memory> *wh_memory,
std::vector<mkldnn::memory> *bias_memory,
std::vector<mkldnn::memory> *y_memory,
std::vector<mkldnn::memory> *hcy_memory,
std::vector<primitive> *rnn_forward_prim,
int layer_index,
bool *has_cache,
int lvalue,
int dtype,
bool is_train,
int mode) {
int ngates = 0, nstates = 0;
algorithm nalgorithm = GetMKLDNNRNNAlgo(mode, &ngates, &nstates);
mkldnn::memory::data_type mkldnn_dtype = get_mkldnn_type(dtype);
const int single_cell_size = N * H;
const int single_b_size = ngates * H;
DType* wx = w_ptr; // ngates * H, I
DType* wh = w_ptr + I * H * ngates; // ngates * H, H
DType* back_wx = w_ptr + ngates * H * (I + H);
DType* back_wh = back_wx + I * H * ngates;
DType* bx = b_ptr;
DType* bh = b_ptr + H * ngates;
DType* back_bx = b_ptr + single_b_size * 2;
DType* back_bh = back_bx + H * ngates;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
auto cpu_engine = CpuEngine::Get()->get_engine();
auto null_memory_ = null_memory(cpu_engine);
int offset1 = 0, offset2 = 0;
bool initialized = *has_cache;
mkldnn::memory::dims src_layer_tz = {T, N, I};
mkldnn::memory::dims dst_layer_tz = {T, N, 2 * H};
mkldnn::memory::dims weights_layer_tz = {1, 2, I, ngates, H}; // ldigo
mkldnn::memory::dims weights_layer_r_tz = {1, 1, I, ngates, H}; // ldigo for reorder
mkldnn::memory::dims weights_iter_tz = {1, 2, H, ngates, H}; // ldigo
mkldnn::memory::dims weights_iter_r_tz = {1, 1, H, ngates, H}; // ldigo for reorder
mkldnn::memory::dims bias_tz = {1, 2, ngates, H};
mkldnn::memory::dims src_iter_tz = {1, 2, nstates, N, H}; // ldsnc
mkldnn::memory::dims dst_iter_tz = {1, 2, nstates, N, H}; // ldsnc
if (!initialized) {
if (mode == rnn_enum::kGru) {
AdjustGruWeightGateOrder(wx, I, H);
AdjustGruWeightGateOrder(back_wx, I, H);
AdjustGruWeightGateOrder(wh, H, H);
AdjustGruWeightGateOrder(back_wh, H, H);
AdjustGruBiasGateOrder(bx, H);
AdjustGruBiasGateOrder(back_bx, H);
AdjustGruBiasGateOrder(bh, H);
AdjustGruBiasGateOrder(back_bh, H);
}
auto src_wx = (*concat_weight_memory)[2 * layer_index];
auto src_wh = (*concat_weight_memory)[2 * layer_index + 1];
std::vector<void*> srcs_data1;
srcs_data1.push_back(wx);
srcs_data1.push_back(back_wx);
ConcatData(mkldnn::memory::format::ldgoi, mkldnn::memory::format::ldgoi,
{weights_layer_r_tz, weights_layer_r_tz}, weights_layer_tz,
mkldnn_dtype, 1, srcs_data1, src_wx);
srcs_data1.clear();
srcs_data1.push_back(wh);
srcs_data1.push_back(back_wh);
ConcatData(mkldnn::memory::format::ldgoi, mkldnn::memory::format::ldgoi,
{weights_iter_r_tz, weights_iter_r_tz}, weights_iter_tz,
mkldnn_dtype, 1, srcs_data1, src_wh);
int tmpvalue = 0;
if (lvalue > 0) {
tmpvalue = lvalue + 1;
}
MKLDNNStream::Get()->RegisterPrim(reorder(src_wx, (*wx_memory)[tmpvalue]));
MKLDNNStream::Get()->RegisterPrim(reorder(src_wh, (*wh_memory)[tmpvalue]));
DType* user_bias = reinterpret_cast<DType *>
((*bias_memory)[tmpvalue].get_data_handle());
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < single_b_size; j++) {
user_bias[j] = bx[j] + bh[j];
user_bias[single_b_size + j] = back_bx[j] + back_bh[j];
}
}
if (lvalue > 0) {
(*wx_memory)[layer_index].set_data_handle((*wx_memory)[lvalue + 1].get_data_handle());
(*wh_memory)[layer_index].set_data_handle((*wh_memory)[lvalue + 1].get_data_handle());
(*bias_memory)[layer_index].set_data_handle((*bias_memory)[lvalue + 1].get_data_handle());
}
auto src_layer_md = mkldnn::memory::desc(
{ src_layer_tz }, mkldnn_dtype, mkldnn::memory::format::tnc);
auto weight_layer_md = mkldnn::memory::desc(
{ weights_layer_tz }, mkldnn_dtype, mkldnn::memory::format::ldigo);
auto weight_iter_md = mkldnn::memory::desc(
{ weights_iter_tz }, mkldnn_dtype, mkldnn::memory::format::ldigo);
auto dst_layer_md = mkldnn::memory::desc(
{ dst_layer_tz }, mkldnn_dtype, mkldnn::memory::format::tnc);
auto dst_iter_md = mkldnn::memory::desc(
{ dst_iter_tz }, mkldnn_dtype, mkldnn::memory::format::ldsnc);
auto src_iter_md = mkldnn::memory::desc(
{src_iter_tz}, mkldnn_dtype, mkldnn::memory::format::ldsnc);
auto bias_md = mkldnn::memory::desc({bias_tz},
mkldnn_dtype, mkldnn::memory::format::ldgo);
auto user_src_iter_memory = (*concat_iter_memory)[2];
if (mode == rnn_enum::kLstm) {
std::vector<void*> srcs_data1;
srcs_data1.push_back(hx_ptr);
srcs_data1.push_back(cx_ptr);
auto tmp1_src_iter_memory = (*concat_iter_memory)[0];
ConcatData(mkldnn::memory::format::ldsnc, mkldnn::memory::format::ldsnc,
{{1, 1, 1, N, H}, {1, 1, 1, N, H}}, {1, 1, nstates, N, H}, mkldnn_dtype, 2,
srcs_data1, tmp1_src_iter_memory);
std::vector<void*> srcs_data2;
srcs_data2.push_back(hx_ptr + single_cell_size);
srcs_data2.push_back(cx_ptr + single_cell_size);
auto tmp2_src_iter_memory = (*concat_iter_memory)[1];
ConcatData(mkldnn::memory::format::ldsnc, mkldnn::memory::format::ldsnc,
{{1, 1, 1, N, H}, {1, 1, 1, N, H}}, {1, 1, nstates, N, H}, mkldnn_dtype, 2,
srcs_data2, tmp2_src_iter_memory);
std::vector<void*> srcs_data3;
srcs_data3.push_back(reinterpret_cast<DType *>(tmp1_src_iter_memory.get_data_handle()));
srcs_data3.push_back(reinterpret_cast<DType *>(tmp2_src_iter_memory.get_data_handle()));
ConcatData(mkldnn::memory::format::ldsnc, mkldnn::memory::format::ldsnc,
{{1, 1, nstates, N, H}, {1, 1, nstates, N, H}}, {1, 2, nstates, N, H},
mkldnn_dtype, 1, srcs_data3, user_src_iter_memory);
} else {
user_src_iter_memory.set_data_handle(hx_ptr);
}
(*hcx_memory)[layer_index].set_data_handle(user_src_iter_memory.get_data_handle());
rnn_cell::desc rnn_cell(nalgorithm,
mode == rnn_enum::kRnnRelu ? algorithm::eltwise_relu : algorithm::eltwise_tanh);
rnn_forward::desc layer_desc(prop_kind::forward_inference, rnn_cell,
rnn_direction::bidirectional_concat, src_layer_md,
src_iter_md, weight_layer_md, weight_iter_md,
bias_md, dst_layer_md, dst_iter_md);
auto prim_desc
= rnn_forward::primitive_desc(layer_desc, cpu_engine);
if (x_ptr && layer_index == 0) {
(*x_memory)[layer_index].set_data_handle(x_ptr);
} else {
(*x_memory)[layer_index].set_data_handle((*user_src_layer_memory).get_data_handle());
}
(*y_memory)[layer_index].set_data_handle(y_ptr);
if (rnn_forward_prim->size() <= (size_t)layer_index) {
primitive rnn_prim = rnn_forward(prim_desc, (*x_memory)[layer_index],
(*hcx_memory)[layer_index], (*wx_memory)[layer_index],
(*wh_memory)[layer_index], (*bias_memory)[layer_index],
(*y_memory)[layer_index],
(*hcy_memory)[layer_index], null_memory_);
rnn_forward_prim->push_back(rnn_prim);
}
MKLDNNStream::Get()->RegisterPrim((*rnn_forward_prim)[layer_index]);
MKLDNNStream::Get()->Submit();
if (state_outputs) {
DType* dst_hcy = reinterpret_cast<DType *> ((*hcy_memory)[layer_index].get_data_handle());
if (mode == rnn_enum::kLstm) {
offset1 = nstates * single_cell_size;
offset2 = (nstates + 1) * single_cell_size;
#pragma omp parallel for num_threads(omp_threads)
for (int n = 0; n < single_cell_size; n++) {
hy_ptr[n] = dst_hcy[n];
hy_ptr[n + single_cell_size] = dst_hcy[n + offset1];
cy_ptr[n] = dst_hcy[n + single_cell_size];
cy_ptr[n + single_cell_size] = dst_hcy[n + offset2];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int n = 0; n < 2 * single_cell_size; n++) {
hy_ptr[n] = dst_hcy[n];
}
}
}
}
template <typename DType>
static void MKLDNNRNNForwardUnidi(bool state_outputs,
const int L,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
mkldnn::memory *user_src_layer_memory,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr,
std::vector<mkldnn::memory> *concat_weight_memory,
std::vector<mkldnn::memory> *concat_iter_memory,
std::vector<mkldnn::memory> *x_memory,
std::vector<mkldnn::memory> *hcx_memory,
std::vector<mkldnn::memory> *wx_memory,
std::vector<mkldnn::memory> *wh_memory,
std::vector<mkldnn::memory> *bias_memory,
std::vector<mkldnn::memory> *y_memory,
std::vector<mkldnn::memory> *hcy_memory,
std::vector<primitive> *rnn_forward_prim,
int layer_index,
bool *has_cache,
int dtype,
bool is_train,
int mode) {
int ngates = 0, nstates = 0;
algorithm nalgorithm = GetMKLDNNRNNAlgo(mode, &ngates, &nstates);
mkldnn::memory::data_type mkldnn_dtype = get_mkldnn_type(dtype);
const int cell_size = N * H;
const int single_cell_size = N * H;
const int single_b_size = ngates * H;
int w_size = (I + H) * H * ngates;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
auto cpu_engine = CpuEngine::Get()->get_engine();
auto null_memory_ = null_memory(cpu_engine);
int offset1 = 0, offset2 = 0;
bool initialized = *has_cache;
mkldnn::memory::dims src_layer_tz = {T, N, I};
mkldnn::memory::dims dst_layer_tz = {T, N, H};
mkldnn::memory::dims weights_layer_tz = {L, 1, I, ngates, H}; // ldigo
mkldnn::memory::dims weights_iter_tz = {L, 1, H, ngates, H}; // ldigo
mkldnn::memory::dims bias_tz = {L, 1, ngates, H};
mkldnn::memory::dims src_iter_tz = {L, 1, nstates, N, H}; // ldsnc
mkldnn::memory::dims dst_iter_tz = {L, 1, nstates, N, H}; // ldsnc
mkldnn::memory::dims weights_layer_r_tz = {1, 1, I, ngates, H}; // ldigo for reorder
mkldnn::memory::dims weights_iter_r_tz = {1, 1, H, ngates, H}; // ldigo for reorder
auto weight_layer_md = mkldnn::memory::desc(
{ weights_layer_tz }, mkldnn_dtype, mkldnn::memory::format::ldigo);
auto weight_iter_md = mkldnn::memory::desc(
{ weights_iter_tz }, mkldnn_dtype, mkldnn::memory::format::ldigo);
auto src_layer_md = mkldnn::memory::desc(
{ src_layer_tz }, mkldnn_dtype, mkldnn::memory::format::tnc);
auto dst_layer_md = mkldnn::memory::desc(
{dst_layer_tz}, mkldnn_dtype, mkldnn::memory::format::tnc);
auto src_iter_md = mkldnn::memory::desc(
{src_iter_tz}, mkldnn_dtype, mkldnn::memory::format::ldsnc);
auto bias_md = mkldnn::memory::desc({bias_tz},
mkldnn_dtype, mkldnn::memory::format::ldgo);
auto dst_iter_md = mkldnn::memory::desc(
{dst_iter_tz}, mkldnn_dtype, mkldnn::memory::format::ldsnc);
for (int l = 0; l < L; l++) {
if (mode == rnn_enum::kLstm) {
std::vector<void*> srcs_data;
srcs_data.push_back(hx_ptr);
srcs_data.push_back(cx_ptr);
auto tmp_src_iter_memory = (*concat_iter_memory)[l + layer_index];
ConcatData(mkldnn::memory::format::ldsnc, mkldnn::memory::format::ldsnc,
{{1, 1, 1, N, H}, {1, 1, 1, N, H}}, {1, 1, nstates, N, H}, mkldnn_dtype,
2, srcs_data, tmp_src_iter_memory);
} else {
(*concat_iter_memory)[l + layer_index].set_data_handle(hx_ptr);
}
hx_ptr += cell_size;
if (mode == rnn_enum::kLstm) {
cx_ptr += cell_size;
}
}
auto user_src_iter_memory = null_memory_;
if (L == 1) {
user_src_iter_memory = (*concat_iter_memory)[layer_index];
} else {
user_src_iter_memory = (*concat_iter_memory)[L + layer_index];
std::vector<void*> src_l_data;
std::vector<mkldnn::memory::dims> src_l_dim;
for (int l = 0; l < L; l++) {
src_l_data.push_back(reinterpret_cast<DType *>
((*concat_iter_memory)[l + layer_index].get_data_handle()));
src_l_dim.push_back({1, 1, nstates, N, H});
}
ConcatData(mkldnn::memory::format::ldsnc, mkldnn::memory::format::ldsnc, src_l_dim,
{L, 1, nstates, N, H}, mkldnn_dtype, 0, src_l_data, user_src_iter_memory);
}
(*hcx_memory)[layer_index].set_data_handle(user_src_iter_memory.get_data_handle());
auto src_wx_f = (*concat_weight_memory)[2 * layer_index];
auto src_wh_f = (*concat_weight_memory)[2 * layer_index + 1];
std::vector<void*> srcs_data_x;
std::vector<void*> srcs_data_h;
std::vector<mkldnn::memory::dims> src_l_dim_x;
std::vector<mkldnn::memory::dims> src_l_dim_h;
if (!initialized) {
if (L == 1) {
DType* wx = w_ptr;
DType* wh = w_ptr + I * H * ngates;
if (mode == rnn_enum::kGru) {
AdjustGruWeightGateOrder(wx, I, H);
AdjustGruWeightGateOrder(wh, H, H);
AdjustGruBiasGateOrder(b_ptr, H);
AdjustGruBiasGateOrder(b_ptr + H * ngates, H);
}
src_wx_f.set_data_handle(wx);
src_wh_f.set_data_handle(wh);
} else {
for (int l = 0; l < L; l++) {
DType* wx = w_ptr;
DType* wh = w_ptr + I * H * ngates;
DType* bx = b_ptr + l * ngates * H * 2;
DType* bh = b_ptr + l * ngates * H * 2 + H * ngates;
if (mode == rnn_enum::kGru) {
AdjustGruWeightGateOrder(wx, I, H);
AdjustGruWeightGateOrder(wh, H, H);
AdjustGruBiasGateOrder(bx, H);
AdjustGruBiasGateOrder(bh, H);
}
srcs_data_x.push_back(wx);
srcs_data_h.push_back(wh);
src_l_dim_x.push_back(weights_layer_r_tz);
src_l_dim_h.push_back(weights_iter_r_tz);
w_ptr = w_ptr + w_size;
}
ConcatData(mkldnn::memory::format::ldgoi, mkldnn::memory::format::ldgoi,
src_l_dim_x, weights_layer_tz, mkldnn_dtype, 0, srcs_data_x, src_wx_f);
ConcatData(mkldnn::memory::format::ldgoi, mkldnn::memory::format::ldgoi,
src_l_dim_h, weights_iter_tz, mkldnn_dtype, 0, srcs_data_h, src_wh_f);
}
MKLDNNStream::Get()->RegisterPrim(reorder(src_wx_f, (*wx_memory)[layer_index]));
MKLDNNStream::Get()->RegisterPrim(reorder(src_wh_f, (*wh_memory)[layer_index]));
DType* user_bias_f = reinterpret_cast<DType *> ((*bias_memory)[layer_index].get_data_handle());
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < L * single_b_size; j++) {
int k = j / single_b_size;
user_bias_f[j] = b_ptr[j + k * single_b_size] + b_ptr[j + k * single_b_size + single_b_size];
}
}
rnn_cell::desc rnn_cell(nalgorithm,
mode == rnn_enum::kRnnRelu ? algorithm::eltwise_relu : algorithm::eltwise_tanh);
rnn_forward::desc layer_desc(prop_kind::forward_inference, rnn_cell,
rnn_direction::unidirectional, src_layer_md,
src_iter_md, weight_layer_md, weight_iter_md,
bias_md, dst_layer_md, dst_iter_md);
auto prim_desc
= rnn_forward::primitive_desc(layer_desc, cpu_engine);
if (x_ptr && layer_index == 0) {
(*x_memory)[layer_index].set_data_handle(x_ptr);
} else {
(*x_memory)[layer_index].set_data_handle((*user_src_layer_memory).get_data_handle());
}
(*y_memory)[layer_index].set_data_handle(y_ptr);
if (rnn_forward_prim->size() <= (size_t)layer_index) {
primitive rnn_prim = rnn_forward(prim_desc, (*x_memory)[layer_index],
(*hcx_memory)[layer_index], (*wx_memory)[layer_index],
(*wh_memory)[layer_index], (*bias_memory)[layer_index],
(*y_memory)[layer_index],
(*hcy_memory)[layer_index], null_memory_);
rnn_forward_prim->push_back(rnn_prim);
}
MKLDNNStream::Get()->RegisterPrim((*rnn_forward_prim)[layer_index]);
MKLDNNStream::Get()->Submit();
if (state_outputs) {
DType* dst_hcy = reinterpret_cast<DType *> ((*hcy_memory)[layer_index].get_data_handle());
if (mode == rnn_enum::kLstm) {
for (int l = 0; l < L; l++) {
offset1 = l * single_cell_size;
offset2 = l * nstates * single_cell_size;
#pragma omp parallel for num_threads(omp_threads)
for (int n = 0; n < single_cell_size; n++) {
hy_ptr[offset1 + n] = dst_hcy[offset2 + n];
cy_ptr[offset1 + n] = dst_hcy[offset2 + n + single_cell_size];
}
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int n = 0; n < L * single_cell_size; n++) {
hy_ptr[n] = dst_hcy[n];
}
}
}
}
template <typename DType>
static void MKLDNNRNNForward(bool state_outputs,
const int L,
const int D,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr,
std::vector<mkldnn::memory> *concat_weight_memory,
std::vector<mkldnn::memory> *concat_iter_memory,
std::vector<mkldnn::memory> *x_memory,
std::vector<mkldnn::memory> *hcx_memory,
std::vector<mkldnn::memory> *wx_memory,
std::vector<mkldnn::memory> *wh_memory,
std::vector<mkldnn::memory> *bias_memory,
std::vector<mkldnn::memory> *y_memory,
std::vector<mkldnn::memory> *hcy_memory,
std::vector<primitive> *rnn_forward_prim,
bool *has_cache,
int dtype,
bool is_train,
int mode) {
int ngates = 0, nstates = 0;
GetMKLDNNRNNAlgo(mode, &ngates, &nstates);
const int b_size = 2 * H * ngates * D;
const int cell_size = N * H * D;
// First layer
int w_size = (I + H) * H * ngates * D;
auto cpu_engine = CpuEngine::Get()->get_engine();
auto null_memory_ = null_memory(cpu_engine);
DType* tmpNull = NULL;
// when D = 1 and I == H, L layers can be fused together
if (D == 1 && I == H) {
MKLDNNRNNForwardUnidi(state_outputs, L, T, N, I, H, x_ptr, &null_memory_,
hx_ptr, cx_ptr, w_ptr, b_ptr, y_ptr, hy_ptr, cy_ptr, concat_weight_memory,
concat_iter_memory, x_memory, hcx_memory, wx_memory, wh_memory,
bias_memory, y_memory, hcy_memory, rnn_forward_prim,
0, has_cache, dtype, is_train, mode);
} else {
auto user_src_layer_memory_l = null_memory_;
if (D == 2) {
MKLDNNRNNForwardSingleLayerBi(state_outputs, T, N, I, H, x_ptr, &user_src_layer_memory_l,
hx_ptr, cx_ptr, w_ptr, b_ptr, y_ptr, hy_ptr, cy_ptr, concat_weight_memory,
concat_iter_memory, x_memory, hcx_memory, wx_memory, wh_memory,
bias_memory, y_memory, hcy_memory, rnn_forward_prim,
0, has_cache, 0, dtype, is_train, mode);
} else {
MKLDNNRNNForwardUnidi(state_outputs, 1, T, N, I, H, x_ptr, &user_src_layer_memory_l,
hx_ptr, cx_ptr, w_ptr, b_ptr, y_ptr, hy_ptr, cy_ptr, concat_weight_memory,
concat_iter_memory, x_memory, hcx_memory, wx_memory, wh_memory,
bias_memory, y_memory, hcy_memory, rnn_forward_prim,
0, has_cache, dtype, is_train, mode);
}
if (L > 1) {
user_src_layer_memory_l = (*y_memory)[0];
// go to next L - 1 layers.
// If D = 2, do it layer by layer. If D = 1, fused L - 1 layers
w_ptr += w_size;
b_ptr += b_size;
if (D == 2) {
w_size = (H * D + H) * H * ngates * D;
for (int l = 0; l < L - 1; l++) {
if (state_outputs) {
hy_ptr += cell_size;
if (mode == rnn_enum::kLstm) {
cy_ptr += cell_size;
}
}
hx_ptr += cell_size;
if (mode == rnn_enum::kLstm) {
cx_ptr += cell_size;
}
MKLDNNRNNForwardSingleLayerBi(state_outputs, T, N, D * H, H, tmpNull,
&user_src_layer_memory_l, hx_ptr, cx_ptr, w_ptr, b_ptr, y_ptr, hy_ptr,
cy_ptr, concat_weight_memory, concat_iter_memory, x_memory,
hcx_memory, wx_memory, wh_memory, bias_memory,
y_memory, hcy_memory, rnn_forward_prim,
1, has_cache, l + 1, dtype, is_train, mode);
user_src_layer_memory_l = (*y_memory)[1];
w_ptr += w_size;
b_ptr += b_size;
}
}
if (D == 1) {
if (state_outputs) {
hy_ptr += cell_size;
if (mode == rnn_enum::kLstm) {
cy_ptr += cell_size;
}
}
w_size = (H + H) * H * ngates;
MKLDNNRNNForwardUnidi(state_outputs, L - 1, T, N, H, H, tmpNull, &user_src_layer_memory_l,
hx_ptr, cx_ptr, w_ptr, b_ptr, y_ptr, hy_ptr, cy_ptr, concat_weight_memory,
concat_iter_memory, x_memory, hcx_memory, wx_memory,
wh_memory, bias_memory, y_memory, hcy_memory,
rnn_forward_prim, 1, has_cache, dtype, is_train, mode);
}
}
}
*has_cache = true;
}
template <typename DType>
static void MKLDNNRNNForwardInference(bool state_outputs,
const int num_layers,
const int direction,
const int seq_length,
const int batch_size,
const int input_size,
const int state_size,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr,
std::vector<mkldnn::memory>* concat_weight_memory,
std::vector<mkldnn::memory>* concat_iter_memory,
std::vector<mkldnn::memory> *x_memory,
std::vector<mkldnn::memory> *hcx_memory,
std::vector<mkldnn::memory> *wx_memory,
std::vector<mkldnn::memory> *wh_memory,
std::vector<mkldnn::memory> *bias_memory,
std::vector<mkldnn::memory> *y_memory,
std::vector<mkldnn::memory> *hcy_memory,
std::vector<primitive> *rnn_forward_prim,
bool *has_cache,
int dtype,
bool is_train,
int mode) {
switch (mode) {
case rnn_enum::kLstm:
case rnn_enum::kGru:
case rnn_enum::kRnnTanh:
case rnn_enum::kRnnRelu:
MKLDNNRNNForward<DType>(state_outputs, num_layers, direction, seq_length,
batch_size, input_size, state_size, x_ptr, hx_ptr,
cx_ptr, w_ptr, b_ptr, y_ptr, hy_ptr, cy_ptr,
concat_weight_memory, concat_iter_memory, x_memory,
hcx_memory, wx_memory, wh_memory,
bias_memory, y_memory, hcy_memory, rnn_forward_prim,
has_cache, dtype, is_train, mode);
break;
default:
LOG(FATAL) << "unknown RNN mode" << mode;
break;
}
}
} // namespace op
} // namespace mxnet
#endif // MXNET_USE_MKLDNN == 1
#endif // MXNET_OPERATOR_NN_MKLDNN_MKLDNN_RNN_IMPL_H_
|
residualbased_block_builder_and_solver.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
// Collaborators: Vicente Mataix
//
//
#if !defined(KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER )
#define KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER
/* System includes */
#include <unordered_set>
/* External includes */
/* Project includes */
#include "includes/define.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "includes/model_part.h"
#include "includes/key_hash.h"
#include "utilities/timer.h"
#include "utilities/openmp_utils.h"
#include "utilities/variable_utils.h"
#include "includes/kratos_flags.h"
#include "includes/lock_object.h"
#include "utilities/sparse_matrix_multiplication_utility.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedEliminationBuilderAndSolver
* @ingroup KratosCore
* @brief Current class provides an implementation for standard builder and solving operations.
* @details The RHS is constituted by the unbalanced loads (residual)
* Degrees of freedom are reordered putting the restrained degrees of freedom at
* the end of the system ordered in reverse order with respect to the DofSet.
* Imposition of the dirichlet conditions is naturally dealt with as the residual already contains
* this information.
* Calculation of the reactions involves a cost very similiar to the calculation of the total residual
* @tparam TSparseSpace The sparse system considered
* @tparam TDenseSpace The dense system considered
* @tparam TLinearSolver The linear solver considered
* @author Riccardo Rossi
*/
template<class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ResidualBasedBlockBuilderAndSolver
: public BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
///@name Type Definitions
///@{
/// Definition of the flags
KRATOS_DEFINE_LOCAL_FLAG( SILENT_WARNINGS );
// Scaling enum
enum class SCALING_DIAGONAL {NO_SCALING = 0, CONSIDER_NORM_DIAGONAL = 1, CONSIDER_MAX_DIAGONAL = 2, CONSIDER_PRESCRIBED_DIAGONAL = 3};
/// Definition of the pointer
KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedBlockBuilderAndSolver);
/// Definition of the base class
typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
/// The definition of the current class
typedef ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> ClassType;
// The size_t types
typedef std::size_t SizeType;
typedef std::size_t IndexType;
/// Definition of the classes from the base class
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
/// Additional definitions
typedef PointerVectorSet<Element, IndexedObject> ElementsContainerType;
typedef Element::EquationIdVectorType EquationIdVectorType;
typedef Element::DofsVectorType DofsVectorType;
typedef boost::numeric::ublas::compressed_matrix<double> CompressedMatrixType;
/// DoF types definition
typedef Node<3> NodeType;
typedef typename NodeType::DofType DofType;
typedef typename DofType::Pointer DofPointerType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor
*/
explicit ResidualBasedBlockBuilderAndSolver() : BaseType()
{
}
/**
* @brief Default constructor. (with parameters)
*/
explicit ResidualBasedBlockBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver,
Parameters ThisParameters
) : BaseType(pNewLinearSystemSolver)
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
}
/**
* @brief Default constructor.
*/
explicit ResidualBasedBlockBuilderAndSolver(typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BaseType(pNewLinearSystemSolver)
{
mScalingDiagonal = SCALING_DIAGONAL::NO_SCALING;
}
/** Destructor.
*/
~ResidualBasedBlockBuilderAndSolver() override
{
}
/**
* @brief Create method
* @param pNewLinearSystemSolver The linear solver for the system of equations
* @param ThisParameters The configuration parameters
*/
typename BaseType::Pointer Create(
typename TLinearSolver::Pointer pNewLinearSystemSolver,
Parameters ThisParameters
) const override
{
return Kratos::make_shared<ClassType>(pNewLinearSystemSolver,ThisParameters);
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Function to perform the build of the RHS. The vector could be sized as the total number
* of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param b The RHS vector
*/
void Build(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& b) override
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
// Getting the elements from the model
const int nelements = static_cast<int>(rModelPart.Elements().size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
const ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin();
ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin();
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
double start_build = OpenMPUtils::GetCurrentTime();
#pragma omp parallel firstprivate(nelements,nconditions, LHS_Contribution, RHS_Contribution, EquationId )
{
# pragma omp for schedule(guided, 512) nowait
for (int k = 0; k < nelements; k++)
{
ModelPart::ElementsContainerType::iterator it = el_begin + k;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool element_is_active = true;
if ((it)->IsDefined(ACTIVE))
element_is_active = (it)->Is(ACTIVE);
if (element_is_active)
{
//calculate elemental contribution
pScheme->CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
}
}
#pragma omp for schedule(guided, 512)
for (int k = 0; k < nconditions; k++)
{
ModelPart::ConditionsContainerType::iterator it = cond_begin + k;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool condition_is_active = true;
if ((it)->IsDefined(ACTIVE))
condition_is_active = (it)->Is(ACTIVE);
if (condition_is_active)
{
//calculate elemental contribution
pScheme->CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
}
}
}
const double stop_build = OpenMPUtils::GetCurrentTime();
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Build time: " << stop_build - start_build << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished parallel building" << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building of the LHS
* @details Depending on the implementation choosen the size of the matrix could
* be equal to the total number of Dofs or to the number of unrestrained dofs
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
*/
void BuildLHS(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA
) override
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
// Getting the elements from the model
const int nelements = static_cast<int>(rModelPart.Elements().size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
const auto it_elem_begin = rModelPart.ElementsBegin();
const auto it_cond_begin = rModelPart.ConditionsBegin();
// Contributions to the system
LocalSystemMatrixType lhs_contribution(0, 0);
// Vector containing the localization in the system of the different terms
Element::EquationIdVectorType equation_id;
// Assemble all elements
double start_build = OpenMPUtils::GetCurrentTime();
#pragma omp parallel firstprivate(nelements, nconditions, lhs_contribution, equation_id )
{
# pragma omp for schedule(guided, 512) nowait
for (int k = 0; k < nelements; ++k) {
auto it_elem = it_elem_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool element_is_active = true;
if (it_elem->IsDefined(ACTIVE))
element_is_active = it_elem->Is(ACTIVE);
if (element_is_active) {
// Calculate elemental contribution
pScheme->CalculateLHSContribution(*it_elem, lhs_contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleLHS(rA, lhs_contribution, equation_id);
}
}
#pragma omp for schedule(guided, 512)
for (int k = 0; k < nconditions; ++k) {
auto it_cond = it_cond_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool condition_is_active = true;
if (it_cond->IsDefined(ACTIVE))
condition_is_active = it_cond->Is(ACTIVE);
if (condition_is_active)
{
// Calculate elemental contribution
pScheme->CalculateLHSContribution(*it_cond, lhs_contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleLHS(rA, lhs_contribution, equation_id);
}
}
}
const double stop_build = OpenMPUtils::GetCurrentTime();
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() >= 1) << "Build time LHS: " << stop_build - start_build << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 2) << "Finished parallel building LHS" << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Build a rectangular matrix of size n*N where "n" is the number of unrestrained degrees of freedom
* and "N" is the total number of degrees of freedom involved.
* @details This matrix is obtained by building the total matrix without the lines corresponding to the fixed
* degrees of freedom (but keeping the columns!!)
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
*/
void BuildLHS_CompleteOnFreeRows(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A) override
{
KRATOS_TRY
TSystemVectorType tmp(A.size1(), 0.0);
this->Build(pScheme, rModelPart, A, tmp);
KRATOS_CATCH("")
}
/**
* @brief This is a call to the linear system solver
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void SystemSolve(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
) override
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
TSparseSpace::SetToZero(Dx);
if(mT.size1() != 0) //if there are master-slave constraints
{
//recover solution of the original problem
TSystemVectorType Dxmodified = Dx;
TSparseSpace::Mult(mT, Dxmodified, Dx);
}
//prints informations about the current time
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
* @brief This is a call to the linear system solver (taking into account some physical particularities of the problem)
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
* @param rModelPart The model part of the problem to solve
*/
virtual void SystemSolveWithPhysics(
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb,
ModelPart& rModelPart
)
{
if(rModelPart.MasterSlaveConstraints().size() != 0) {
TSystemVectorType Dxmodified(rb.size());
InternalSystemSolveWithPhysics(rA, Dxmodified, rb, rModelPart);
//recover solution of the original problem
TSparseSpace::Mult(mT, Dxmodified, rDx);
} else {
InternalSystemSolveWithPhysics(rA, rDx, rb, rModelPart);
}
}
/**
*@brief This is a call to the linear system solver (taking into account some physical particularities of the problem)
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
* @param rModelPart The model part of the problem to solve
*/
void InternalSystemSolveWithPhysics(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b,
ModelPart& rModelPart
)
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00) {
//provide physical data as needed
if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded() )
BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart);
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
} else {
TSparseSpace::SetToZero(Dx);
KRATOS_WARNING_IF("ResidualBasedBlockBuilderAndSolver", mOptions.IsNot(SILENT_WARNINGS)) << "ATTENTION! setting the RHS to zero!" << std::endl;
}
// Prints informations about the current time
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building and solving phase at the same time.
* @details It is ideally the fastest and safer function to use when it is possible to solve
* just after building
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void BuildAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
KRATOS_TRY
Timer::Start("Build");
Build(pScheme, rModelPart, A, b);
Timer::Stop("Build");
if(rModelPart.MasterSlaveConstraints().size() != 0) {
Timer::Start("ApplyConstraints");
ApplyConstraints(pScheme, rModelPart, A, b);
Timer::Stop("ApplyConstraints");
}
ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b);
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
const double start_solve = OpenMPUtils::GetCurrentTime();
Timer::Start("Solve");
SystemSolveWithPhysics(A, Dx, b, rModelPart);
Timer::Stop("Solve");
const double stop_solve = OpenMPUtils::GetCurrentTime();
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >=1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System solve time: " << stop_solve - start_solve << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building and solving phase at the same time Linearizing with the database at the old iteration
* @details It is ideally the fastest and safer function to use when it is possible to solve just after building
* @param pScheme The pointer to the integration scheme
* @param rModelPart The model part to compute
* @param rA The LHS matrix of the system of equations
* @param rDx The vector of unkowns
* @param rb The RHS vector of the system of equations
* @param MoveMesh tells if the update of the scheme needs to be performed when calling the Update of the scheme
*/
void BuildAndSolveLinearizedOnPreviousIteration(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb,
const bool MoveMesh
) override
{
KRATOS_INFO_IF("BlockBuilderAndSolver", this->GetEchoLevel() > 0)
<< "Linearizing on Old iteration" << std::endl;
KRATOS_ERROR_IF(rModelPart.GetBufferSize() == 1) << "BlockBuilderAndSolver: \n"
<< "The buffer size needs to be at least 2 in order to use \n"
<< "BuildAndSolveLinearizedOnPreviousIteration \n"
<< "current buffer size for modelpart: " << rModelPart.Name() << std::endl
<< "is :" << rModelPart.GetBufferSize()
<< " Please set IN THE STRATEGY SETTINGS "
<< " UseOldStiffnessInFirstIteration=false " << std::endl;
DofsArrayType fixed_dofs;
for(auto& r_dof : BaseType::mDofSet){
if(r_dof.IsFixed()){
fixed_dofs.push_back(&r_dof);
r_dof.FreeDof();
}
}
//TODO: Here we need to take the vector from other ones because
// We cannot create a trilinos vector without a communicator. To be improved!
TSystemVectorType dx_prediction(rDx);
TSystemVectorType rhs_addition(rb); //we know it is zero here, so we do not need to set it
// Here we bring back the database to before the prediction,
// but we store the prediction increment in dx_prediction.
// The goal is that the stiffness is computed with the
// converged configuration at the end of the previous step.
const auto it_dof_begin = BaseType::mDofSet.begin();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(BaseType::mDofSet.size()); ++i) {
auto it_dof = it_dof_begin + i;
//NOTE: this is initialzed to - the value of dx prediction
dx_prediction[it_dof->EquationId()] = -(it_dof->GetSolutionStepValue() - it_dof->GetSolutionStepValue(1));
}
// Use UpdateDatabase to bring back the solution to how it was at the end of the previous step
pScheme->Update(rModelPart, BaseType::mDofSet, rA, dx_prediction, rb);
if (MoveMesh) {
VariableUtils().UpdateCurrentPosition(rModelPart.Nodes(),DISPLACEMENT,0);
}
this->Build(pScheme, rModelPart, rA, rb);
// Put back the prediction into the database
TSparseSpace::InplaceMult(dx_prediction, -1.0); //change sign to dx_prediction
TSparseSpace::UnaliasedAdd(rDx, 1.0, dx_prediction);
// Use UpdateDatabase to bring back the solution
// to where it was taking into account BCs
// it is done here so that constraints are correctly taken into account right after
pScheme->Update(rModelPart, BaseType::mDofSet, rA, dx_prediction, rb);
if (MoveMesh) {
VariableUtils().UpdateCurrentPosition(rModelPart.Nodes(),DISPLACEMENT,0);
}
// Apply rb -= A*dx_prediction
TSparseSpace::Mult(rA, dx_prediction, rhs_addition);
TSparseSpace::UnaliasedAdd(rb, -1.0, rhs_addition);
for(auto& dof : fixed_dofs)
dof.FixDof();
if (!rModelPart.MasterSlaveConstraints().empty()) {
this->ApplyConstraints(pScheme, rModelPart, rA, rb);
}
this->ApplyDirichletConditions(pScheme, rModelPart, rA, rDx, rb);
this->SystemSolveWithPhysics(rA, rDx, rb, rModelPart);
}
/**
* @brief Corresponds to the previews, but the System's matrix is considered already built and only the RHS is built again
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
*/
void BuildRHSAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY
BuildRHS(pScheme, rModelPart, rb);
if(rModelPart.MasterSlaveConstraints().size() != 0) {
Timer::Start("ApplyRHSConstraints");
ApplyRHSConstraints(pScheme, rModelPart, rb);
Timer::Stop("ApplyRHSConstraints");
}
ApplyDirichletConditions(pScheme, rModelPart, rA, rDx, rb);
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl;
const double start_solve = OpenMPUtils::GetCurrentTime();
Timer::Start("Solve");
SystemSolveWithPhysics(rA, rDx, rb, rModelPart);
Timer::Stop("Solve");
const double stop_solve = OpenMPUtils::GetCurrentTime();
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >=1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System solve time: " << stop_solve - start_solve << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the build of the RHS.
* @details The vector could be sized as the total number of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void BuildRHS(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemVectorType& b) override
{
KRATOS_TRY
Timer::Start("BuildRHS");
BuildRHSNoDirichlet(pScheme,rModelPart,b);
const int ndofs = static_cast<int>(BaseType::mDofSet.size());
//NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver
#pragma omp parallel for firstprivate(ndofs)
for (int k = 0; k<ndofs; k++)
{
typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + k;
const std::size_t i = dof_iterator->EquationId();
if (dof_iterator->IsFixed())
b[i] = 0.0;
}
Timer::Stop("BuildRHS");
KRATOS_CATCH("")
}
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element
* and condition its Dofs.
* @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the
* way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart
) override
{
KRATOS_TRY;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Setting up the dofs" << std::endl;
//Gets the array of elements from the modeler
ElementsArrayType& r_elements_array = rModelPart.Elements();
const int number_of_elements = static_cast<int>(r_elements_array.size());
DofsVectorType dof_list, second_dof_list; // NOTE: The second dof list is only used on constraints to include master/slave relations
unsigned int nthreads = OpenMPUtils::GetNumThreads();
typedef std::unordered_set < NodeType::DofType::Pointer, DofPointerHasher> set_type;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Number of threads" << nthreads << "\n" << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Initializing element loop" << std::endl;
/**
* Here we declare three sets.
* - The global set: Contains all the DoF of the system
* - The slave set: The DoF that are not going to be solved, due to MPC formulation
*/
set_type dof_global_set;
dof_global_set.reserve(number_of_elements*20);
#pragma omp parallel firstprivate(dof_list, second_dof_list)
{
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// We cleate the temporal set and we reserve some space on them
set_type dofs_tmp_set;
dofs_tmp_set.reserve(20000);
// Gets the array of elements from the modeler
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < number_of_elements; ++i) {
auto it_elem = r_elements_array.begin() + i;
// Gets list of Dof involved on every element
pScheme->GetDofList(*it_elem, dof_list, r_current_process_info);
dofs_tmp_set.insert(dof_list.begin(), dof_list.end());
}
// Gets the array of conditions from the modeler
ConditionsArrayType& r_conditions_array = rModelPart.Conditions();
const int number_of_conditions = static_cast<int>(r_conditions_array.size());
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < number_of_conditions; ++i) {
auto it_cond = r_conditions_array.begin() + i;
// Gets list of Dof involved on every element
pScheme->GetDofList(*it_cond, dof_list, r_current_process_info);
dofs_tmp_set.insert(dof_list.begin(), dof_list.end());
}
// Gets the array of constraints from the modeler
auto& r_constraints_array = rModelPart.MasterSlaveConstraints();
const int number_of_constraints = static_cast<int>(r_constraints_array.size());
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < number_of_constraints; ++i) {
auto it_const = r_constraints_array.begin() + i;
// Gets list of Dof involved on every element
it_const->GetDofList(dof_list, second_dof_list, r_current_process_info);
dofs_tmp_set.insert(dof_list.begin(), dof_list.end());
dofs_tmp_set.insert(second_dof_list.begin(), second_dof_list.end());
}
// We merge all the sets in one thread
#pragma omp critical
{
dof_global_set.insert(dofs_tmp_set.begin(), dofs_tmp_set.end());
}
}
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Initializing ordered array filling\n" << std::endl;
DofsArrayType Doftemp;
BaseType::mDofSet = DofsArrayType();
Doftemp.reserve(dof_global_set.size());
for (auto it= dof_global_set.begin(); it!= dof_global_set.end(); it++)
{
Doftemp.push_back( *it );
}
Doftemp.Sort();
BaseType::mDofSet = Doftemp;
//Throws an exception if there are no Degrees Of Freedom involved in the analysis
KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Number of degrees of freedom:" << BaseType::mDofSet.size() << std::endl;
BaseType::mDofSetIsInitialized = true;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished setting up the dofs" << std::endl;
#ifdef KRATOS_DEBUG
// If reactions are to be calculated, we check if all the dofs have reactions defined
// This is tobe done only in debug mode
if (BaseType::GetCalculateReactionsFlag()) {
for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) {
KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " <<std::endl
<< "Node : "<<dof_iterator->Id()<< std::endl
<< "Dof : "<<(*dof_iterator)<<std::endl<<"Not possible to calculate reactions."<<std::endl;
}
}
#endif
KRATOS_CATCH("");
}
/**
* @brief Organises the dofset in order to speed up the building phase
* @param rModelPart The model part of the problem to solve
*/
void SetUpSystem(
ModelPart& rModelPart
) override
{
//int free_id = 0;
BaseType::mEquationSystemSize = BaseType::mDofSet.size();
int ndofs = static_cast<int>(BaseType::mDofSet.size());
#pragma omp parallel for firstprivate(ndofs)
for (int i = 0; i < static_cast<int>(ndofs); i++) {
typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + i;
dof_iterator->SetEquationId(i);
}
}
//**************************************************************************
//**************************************************************************
void ResizeAndInitializeVectors(
typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType& pA,
TSystemVectorPointerType& pDx,
TSystemVectorPointerType& pb,
ModelPart& rModelPart
) override
{
KRATOS_TRY
if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0));
pA.swap(pNewA);
}
if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0));
pDx.swap(pNewDx);
}
if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0));
pb.swap(pNewb);
}
TSystemMatrixType& A = *pA;
TSystemVectorType& Dx = *pDx;
TSystemVectorType& b = *pb;
//resizing the system vectors and matrix
if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized
{
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
else
{
if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize)
{
KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl;
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
}
if (Dx.size() != BaseType::mEquationSystemSize)
Dx.resize(BaseType::mEquationSystemSize, false);
TSparseSpace::SetToZero(Dx);
if (b.size() != BaseType::mEquationSystemSize) {
b.resize(BaseType::mEquationSystemSize, false);
}
TSparseSpace::SetToZero(b);
ConstructMasterSlaveConstraintsStructure(rModelPart);
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void InitializeSolutionStep(
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb) override
{
KRATOS_TRY
BaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb);
// Getting process info
const ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// Computing constraints
const int n_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size());
auto constraints_begin = rModelPart.MasterSlaveConstraintsBegin();
#pragma omp parallel for schedule(guided, 512) firstprivate(n_constraints, constraints_begin)
for (int k = 0; k < n_constraints; ++k) {
auto it = constraints_begin + k;
it->InitializeSolutionStep(r_process_info); // Here each constraint constructs and stores its T and C matrices. Also its equation slave_ids.
}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void FinalizeSolutionStep(
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb) override
{
BaseType::FinalizeSolutionStep(rModelPart, rA, rDx, rb);
// Getting process info
const ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// Computing constraints
const int n_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size());
const auto constraints_begin = rModelPart.MasterSlaveConstraintsBegin();
#pragma omp parallel for schedule(guided, 512) firstprivate(n_constraints, constraints_begin)
for (int k = 0; k < n_constraints; ++k) {
auto it = constraints_begin + k;
it->FinalizeSolutionStep(r_process_info);
}
}
//**************************************************************************
//**************************************************************************
void CalculateReactions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
TSparseSpace::SetToZero(b);
//refresh RHS to have the correct reactions
BuildRHSNoDirichlet(pScheme, rModelPart, b);
const int ndofs = static_cast<int>(BaseType::mDofSet.size());
//NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver
#pragma omp parallel for firstprivate(ndofs)
for (int k = 0; k<ndofs; k++) {
typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + k;
const int i = (dof_iterator)->EquationId();
(dof_iterator)->GetSolutionStepReactionValue() = -b[i];
}
}
/**
* @brief Applies the dirichlet conditions. This operation may be very heavy or completely
* unexpensive depending on the implementation choosen and on how the System Matrix is built.
* @details For explanation of how it works for a particular implementation the user
* should refer to the particular Builder And Solver choosen
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
*/
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
const std::size_t system_size = rA.size1();
Vector scaling_factors (system_size);
const auto it_dof_iterator_begin = BaseType::mDofSet.begin();
const int ndofs = static_cast<int>(BaseType::mDofSet.size());
// NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver
#pragma omp parallel for firstprivate(ndofs)
for (int k = 0; k<ndofs; k++) {
auto it_dof_iterator = it_dof_iterator_begin + k;
if (it_dof_iterator->IsFixed()) {
scaling_factors[k] = 0.0;
} else {
scaling_factors[k] = 1.0;
}
}
double* Avalues = rA.value_data().begin();
std::size_t* Arow_indices = rA.index1_data().begin();
std::size_t* Acol_indices = rA.index2_data().begin();
// The diagonal considered
mScaleFactor = GetScaleNorm(rModelPart, rA);
// Detect if there is a line of all zeros and set the diagonal to a 1 if this happens
#pragma omp parallel firstprivate(system_size)
{
std::size_t col_begin = 0, col_end = 0;
bool empty = true;
#pragma omp for
for (int k = 0; k < static_cast<int>(system_size); ++k) {
col_begin = Arow_indices[k];
col_end = Arow_indices[k + 1];
empty = true;
for (std::size_t j = col_begin; j < col_end; ++j) {
if(Avalues[j] != 0.0) {
empty = false;
break;
}
}
if(empty) {
rA(k, k) = mScaleFactor;
rb[k] = 0.0;
}
}
}
#pragma omp parallel for firstprivate(system_size)
for (int k = 0; k < static_cast<int>(system_size); ++k) {
std::size_t col_begin = Arow_indices[k];
std::size_t col_end = Arow_indices[k+1];
const double k_factor = scaling_factors[k];
if (k_factor == 0.0) {
// Zero out the whole row, except the diagonal
for (std::size_t j = col_begin; j < col_end; ++j)
if (static_cast<int>(Acol_indices[j]) != k )
Avalues[j] = 0.0;
// Zero out the RHS
rb[k] = 0.0;
} else {
// Zero out the column which is associated with the zero'ed row
for (std::size_t j = col_begin; j < col_end; ++j)
if(scaling_factors[ Acol_indices[j] ] == 0 )
Avalues[j] = 0.0;
}
}
}
/**
* @brief Applies the constraints with master-slave relation matrix (RHS only)
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rb The RHS vector
*/
void ApplyRHSConstraints(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemVectorType& rb
) override
{
KRATOS_TRY
if (rModelPart.MasterSlaveConstraints().size() != 0) {
BuildMasterSlaveConstraints(rModelPart);
// We compute the transposed matrix of the global relation matrix
TSystemMatrixType T_transpose_matrix(mT.size2(), mT.size1());
SparseMatrixMultiplicationUtility::TransposeMatrix<TSystemMatrixType, TSystemMatrixType>(T_transpose_matrix, mT, 1.0);
TSystemVectorType b_modified(rb.size());
TSparseSpace::Mult(T_transpose_matrix, rb, b_modified);
TSparseSpace::Copy(b_modified, rb);
// Apply diagonal values on slaves
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(mSlaveIds.size()); ++i) {
const IndexType slave_equation_id = mSlaveIds[i];
if (mInactiveSlaveDofs.find(slave_equation_id) == mInactiveSlaveDofs.end()) {
rb[slave_equation_id] = 0.0;
}
}
}
KRATOS_CATCH("")
}
/**
* @brief Applies the constraints with master-slave relation matrix
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rb The RHS vector
*/
void ApplyConstraints(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rb
) override
{
KRATOS_TRY
if (rModelPart.MasterSlaveConstraints().size() != 0) {
BuildMasterSlaveConstraints(rModelPart);
// We compute the transposed matrix of the global relation matrix
TSystemMatrixType T_transpose_matrix(mT.size2(), mT.size1());
SparseMatrixMultiplicationUtility::TransposeMatrix<TSystemMatrixType, TSystemMatrixType>(T_transpose_matrix, mT, 1.0);
TSystemVectorType b_modified(rb.size());
TSparseSpace::Mult(T_transpose_matrix, rb, b_modified);
TSparseSpace::Copy(b_modified, rb);
TSystemMatrixType auxiliar_A_matrix(mT.size2(), rA.size2());
SparseMatrixMultiplicationUtility::MatrixMultiplication(T_transpose_matrix, rA, auxiliar_A_matrix); //auxiliar = T_transpose * rA
T_transpose_matrix.resize(0, 0, false); //free memory
SparseMatrixMultiplicationUtility::MatrixMultiplication(auxiliar_A_matrix, mT, rA); //A = auxilar * T NOTE: here we are overwriting the old A matrix!
auxiliar_A_matrix.resize(0, 0, false); //free memory
const double max_diag = GetMaxDiagonal(rA);
// Apply diagonal values on slaves
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(mSlaveIds.size()); ++i) {
const IndexType slave_equation_id = mSlaveIds[i];
if (mInactiveSlaveDofs.find(slave_equation_id) == mInactiveSlaveDofs.end()) {
rA(slave_equation_id, slave_equation_id) = max_diag;
rb[slave_equation_id] = 0.0;
}
}
}
KRATOS_CATCH("")
}
/**
* @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed
*/
void Clear() override
{
BaseType::Clear();
mSlaveIds.clear();
mMasterIds.clear();
mInactiveSlaveDofs.clear();
mT.resize(0,0,false);
mConstantVector.resize(0,false);
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided. Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model part of the problem to solve
* @return 0 all ok
*/
int Check(ModelPart& rModelPart) override
{
KRATOS_TRY
return 0;
KRATOS_CATCH("");
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "block_builder_and_solver",
"block_builder" : true,
"diagonal_values_for_dirichlet_dofs" : "use_max_diagonal",
"silent_warnings" : false
})");
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "block_builder_and_solver";
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedBlockBuilderAndSolver";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
TSystemMatrixType mT; /// This is matrix containing the global relation for the constraints
TSystemVectorType mConstantVector; /// This is vector containing the rigid movement of the constraint
std::vector<IndexType> mSlaveIds; /// The equation ids of the slaves
std::vector<IndexType> mMasterIds; /// The equation ids of the master
std::unordered_set<IndexType> mInactiveSlaveDofs; /// The set containing the inactive slave dofs
double mScaleFactor = 1.0; /// The manuallyset scale factor
SCALING_DIAGONAL mScalingDiagonal; /// We identify the scaling considered for the dirichlet dofs
Flags mOptions; /// Some flags used internally
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
void BuildRHSNoDirichlet(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemVectorType& b)
{
KRATOS_TRY
//Getting the Elements
ElementsArrayType& pElements = rModelPart.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = rModelPart.Conditions();
const ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
//for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it)
const int nelements = static_cast<int>(pElements.size());
#pragma omp parallel firstprivate(nelements, RHS_Contribution, EquationId)
{
#pragma omp for schedule(guided, 512) nowait
for (int i=0; i<nelements; i++) {
typename ElementsArrayType::iterator it = pElements.begin() + i;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool element_is_active = true;
if( (it)->IsDefined(ACTIVE) ) {
element_is_active = (it)->Is(ACTIVE);
}
if(element_is_active) {
//calculate elemental Right Hand Side Contribution
pScheme->CalculateRHSContribution(*it, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b, RHS_Contribution, EquationId);
}
}
LHS_Contribution.resize(0, 0, false);
RHS_Contribution.resize(0, false);
// assemble all conditions
const int nconditions = static_cast<int>(ConditionsArray.size());
#pragma omp for schedule(guided, 512)
for (int i = 0; i<nconditions; i++) {
auto it = ConditionsArray.begin() + i;
//detect if the element is active or not. If the user did not make any choice the element
//is active by default
bool condition_is_active = true;
if( (it)->IsDefined(ACTIVE) ) {
condition_is_active = (it)->Is(ACTIVE);
}
if(condition_is_active) {
//calculate elemental contribution
pScheme->CalculateRHSContribution(*it, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b, RHS_Contribution, EquationId);
}
}
}
KRATOS_CATCH("")
}
virtual void ConstructMasterSlaveConstraintsStructure(ModelPart& rModelPart)
{
if (rModelPart.MasterSlaveConstraints().size() > 0) {
Timer::Start("ConstraintsRelationMatrixStructure");
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Vector containing the localization in the system of the different terms
DofsVectorType slave_dof_list, master_dof_list;
// Constraint initial iterator
const auto it_const_begin = rModelPart.MasterSlaveConstraints().begin();
std::vector<std::unordered_set<IndexType>> indices(BaseType::mDofSet.size());
std::vector<LockObject> lock_array(indices.size());
#pragma omp parallel firstprivate(slave_dof_list, master_dof_list)
{
Element::EquationIdVectorType slave_ids(3);
Element::EquationIdVectorType master_ids(3);
std::unordered_map<IndexType, std::unordered_set<IndexType>> temp_indices;
#pragma omp for schedule(guided, 512) nowait
for (int i_const = 0; i_const < static_cast<int>(rModelPart.MasterSlaveConstraints().size()); ++i_const) {
auto it_const = it_const_begin + i_const;
// Detect if the constraint is active or not. If the user did not make any choice the constraint
// It is active by default
bool constraint_is_active = true;
if( it_const->IsDefined(ACTIVE) ) {
constraint_is_active = it_const->Is(ACTIVE);
}
if(constraint_is_active) {
it_const->EquationIdVector(slave_ids, master_ids, r_current_process_info);
// Slave DoFs
for (auto &id_i : slave_ids) {
temp_indices[id_i].insert(master_ids.begin(), master_ids.end());
}
}
}
// Merging all the temporal indexes
for (int i = 0; i < static_cast<int>(temp_indices.size()); ++i) {
lock_array[i].SetLock();
indices[i].insert(temp_indices[i].begin(), temp_indices[i].end());
lock_array[i].UnSetLock();
}
}
mSlaveIds.clear();
mMasterIds.clear();
for (int i = 0; i < static_cast<int>(indices.size()); ++i) {
if (indices[i].size() == 0) // Master dof!
mMasterIds.push_back(i);
else // Slave dof
mSlaveIds.push_back(i);
indices[i].insert(i); // Ensure that the diagonal is there in T
}
// Count the row sizes
std::size_t nnz = 0;
for (IndexType i = 0; i < indices.size(); ++i)
nnz += indices[i].size();
mT = TSystemMatrixType(indices.size(), indices.size(), nnz);
mConstantVector.resize(indices.size(), false);
double *Tvalues = mT.value_data().begin();
IndexType *Trow_indices = mT.index1_data().begin();
IndexType *Tcol_indices = mT.index2_data().begin();
// Filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Trow_indices[0] = 0;
for (int i = 0; i < static_cast<int>(mT.size1()); i++)
Trow_indices[i + 1] = Trow_indices[i] + indices[i].size();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(mT.size1()); ++i) {
const IndexType row_begin = Trow_indices[i];
const IndexType row_end = Trow_indices[i + 1];
IndexType k = row_begin;
for (auto it = indices[i].begin(); it != indices[i].end(); ++it) {
Tcol_indices[k] = *it;
Tvalues[k] = 0.0;
k++;
}
indices[i].clear(); //deallocating the memory
std::sort(&Tcol_indices[row_begin], &Tcol_indices[row_end]);
}
mT.set_filled(indices.size() + 1, nnz);
Timer::Stop("ConstraintsRelationMatrixStructure");
}
}
virtual void BuildMasterSlaveConstraints(ModelPart& rModelPart)
{
KRATOS_TRY
TSparseSpace::SetToZero(mT);
TSparseSpace::SetToZero(mConstantVector);
// The current process info
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Vector containing the localization in the system of the different terms
DofsVectorType slave_dof_list, master_dof_list;
// Contributions to the system
Matrix transformation_matrix = LocalSystemMatrixType(0, 0);
Vector constant_vector = LocalSystemVectorType(0);
// Vector containing the localization in the system of the different terms
Element::EquationIdVectorType slave_equation_ids, master_equation_ids;
const int number_of_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size());
// We clear the set
mInactiveSlaveDofs.clear();
#pragma omp parallel firstprivate(transformation_matrix, constant_vector, slave_equation_ids, master_equation_ids)
{
std::unordered_set<IndexType> auxiliar_inactive_slave_dofs;
#pragma omp for schedule(guided, 512)
for (int i_const = 0; i_const < number_of_constraints; ++i_const) {
auto it_const = rModelPart.MasterSlaveConstraints().begin() + i_const;
// Detect if the constraint is active or not. If the user did not make any choice the constraint
// It is active by default
bool constraint_is_active = true;
if (it_const->IsDefined(ACTIVE))
constraint_is_active = it_const->Is(ACTIVE);
if (constraint_is_active) {
it_const->CalculateLocalSystem(transformation_matrix, constant_vector, r_current_process_info);
it_const->EquationIdVector(slave_equation_ids, master_equation_ids, r_current_process_info);
for (IndexType i = 0; i < slave_equation_ids.size(); ++i) {
const IndexType i_global = slave_equation_ids[i];
// Assemble matrix row
AssembleRowContribution(mT, transformation_matrix, i_global, i, master_equation_ids);
// Assemble constant vector
const double constant_value = constant_vector[i];
double& r_value = mConstantVector[i_global];
#pragma omp atomic
r_value += constant_value;
}
} else { // Taking into account inactive constraints
it_const->EquationIdVector(slave_equation_ids, master_equation_ids, r_current_process_info);
auxiliar_inactive_slave_dofs.insert(slave_equation_ids.begin(), slave_equation_ids.end());
}
}
// We merge all the sets in one thread
#pragma omp critical
{
mInactiveSlaveDofs.insert(auxiliar_inactive_slave_dofs.begin(), auxiliar_inactive_slave_dofs.end());
}
}
// Setting the master dofs into the T and C system
for (auto eq_id : mMasterIds) {
mConstantVector[eq_id] = 0.0;
mT(eq_id, eq_id) = 1.0;
}
// Setting inactive slave dofs in the T and C system
for (auto eq_id : mInactiveSlaveDofs) {
mConstantVector[eq_id] = 0.0;
mT(eq_id, eq_id) = 1.0;
}
KRATOS_CATCH("")
}
virtual void ConstructMatrixStructure(
typename TSchemeType::Pointer pScheme,
TSystemMatrixType& A,
ModelPart& rModelPart)
{
//filling with zero the matrix (creating the structure)
Timer::Start("MatrixStructure");
// Getting the elements from the model
const int nelements = static_cast<int>(rModelPart.Elements().size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
const ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin();
ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin();
const std::size_t equation_size = BaseType::mEquationSystemSize;
std::vector< LockObject > lock_array(equation_size);
std::vector<std::unordered_set<std::size_t> > indices(equation_size);
#pragma omp parallel for firstprivate(equation_size)
for (int iii = 0; iii < static_cast<int>(equation_size); iii++) {
indices[iii].reserve(40);
}
Element::EquationIdVectorType ids(3, 0);
#pragma omp parallel for firstprivate(nelements, ids)
for (int iii=0; iii<nelements; iii++) {
typename ElementsContainerType::iterator i_element = el_begin + iii;
pScheme->EquationId(*i_element, ids, CurrentProcessInfo);
for (std::size_t i = 0; i < ids.size(); i++) {
lock_array[ids[i]].SetLock();
auto& row_indices = indices[ids[i]];
row_indices.insert(ids.begin(), ids.end());
lock_array[ids[i]].UnSetLock();
}
}
#pragma omp parallel for firstprivate(nconditions, ids)
for (int iii = 0; iii<nconditions; iii++) {
typename ConditionsArrayType::iterator i_condition = cond_begin + iii;
pScheme->EquationId(*i_condition, ids, CurrentProcessInfo);
for (std::size_t i = 0; i < ids.size(); i++) {
lock_array[ids[i]].SetLock();
auto& row_indices = indices[ids[i]];
row_indices.insert(ids.begin(), ids.end());
lock_array[ids[i]].UnSetLock();
}
}
//destroy locks
lock_array = std::vector< LockObject >();
//count the row sizes
unsigned int nnz = 0;
for (unsigned int i = 0; i < indices.size(); i++) {
nnz += indices[i].size();
}
A = CompressedMatrixType(indices.size(), indices.size(), nnz);
double* Avalues = A.value_data().begin();
std::size_t* Arow_indices = A.index1_data().begin();
std::size_t* Acol_indices = A.index2_data().begin();
//filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Arow_indices[0] = 0;
for (int i = 0; i < static_cast<int>(A.size1()); i++) {
Arow_indices[i+1] = Arow_indices[i] + indices[i].size();
}
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(A.size1()); i++) {
const unsigned int row_begin = Arow_indices[i];
const unsigned int row_end = Arow_indices[i+1];
unsigned int k = row_begin;
for (auto it = indices[i].begin(); it != indices[i].end(); it++) {
Acol_indices[k] = *it;
Avalues[k] = 0.0;
k++;
}
indices[i].clear(); //deallocating the memory
std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]);
}
A.set_filled(indices.size()+1, nnz);
Timer::Stop("MatrixStructure");
}
void Assemble(
TSystemMatrixType& A,
TSystemVectorType& b,
const LocalSystemMatrixType& LHS_Contribution,
const LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++) {
unsigned int i_global = EquationId[i_local];
double& r_a = b[i_global];
const double& v_a = RHS_Contribution(i_local);
#pragma omp atomic
r_a += v_a;
AssembleRowContribution(A, LHS_Contribution, i_global, i_local, EquationId);
}
}
//**************************************************************************
void AssembleLHS(
TSystemMatrixType& rA,
const LocalSystemMatrixType& rLHSContribution,
Element::EquationIdVectorType& rEquationId
)
{
const SizeType local_size = rLHSContribution.size1();
for (IndexType i_local = 0; i_local < local_size; i_local++) {
const IndexType i_global = rEquationId[i_local];
AssembleRowContribution(rA, rLHSContribution, i_global, i_local, rEquationId);
}
}
//**************************************************************************
void AssembleRHS(
TSystemVectorType& b,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = RHS_Contribution.size();
for (unsigned int i_local = 0; i_local < local_size; i_local++) {
unsigned int i_global = EquationId[i_local];
// ASSEMBLING THE SYSTEM VECTOR
double& b_value = b[i_global];
const double& rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
inline void AssembleRowContribution(TSystemMatrixType& A, const Matrix& Alocal, const unsigned int i, const unsigned int i_local, Element::EquationIdVectorType& EquationId)
{
double* values_vector = A.value_data().begin();
std::size_t* index1_vector = A.index1_data().begin();
std::size_t* index2_vector = A.index2_data().begin();
size_t left_limit = index1_vector[i];
// size_t right_limit = index1_vector[i+1];
//find the first entry
size_t last_pos = ForwardFind(EquationId[0],left_limit,index2_vector);
size_t last_found = EquationId[0];
double& r_a = values_vector[last_pos];
const double& v_a = Alocal(i_local,0);
#pragma omp atomic
r_a += v_a;
//now find all of the other entries
size_t pos = 0;
for (unsigned int j=1; j<EquationId.size(); j++) {
unsigned int id_to_find = EquationId[j];
if(id_to_find > last_found) {
pos = ForwardFind(id_to_find,last_pos+1,index2_vector);
} else if(id_to_find < last_found) {
pos = BackwardFind(id_to_find,last_pos-1,index2_vector);
} else {
pos = last_pos;
}
double& r = values_vector[pos];
const double& v = Alocal(i_local,j);
#pragma omp atomic
r += v;
last_found = id_to_find;
last_pos = pos;
}
}
/**
* @brief This method returns the scale norm considering for scaling the diagonal
* @param rModelPart The problem model part
* @param rA The LHS matrix
* @return The scale norm
*/
double GetScaleNorm(
ModelPart& rModelPart,
TSystemMatrixType& rA
)
{
switch (mScalingDiagonal) {
case SCALING_DIAGONAL::NO_SCALING:
return 1.0;
case SCALING_DIAGONAL::CONSIDER_PRESCRIBED_DIAGONAL: {
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
KRATOS_ERROR_IF_NOT(r_current_process_info.Has(BUILD_SCALE_FACTOR)) << "Scale factor not defined at process info" << std::endl;
return r_current_process_info.GetValue(BUILD_SCALE_FACTOR);
}
case SCALING_DIAGONAL::CONSIDER_NORM_DIAGONAL:
return GetDiagonalNorm(rA)/static_cast<double>(rA.size1());
case SCALING_DIAGONAL::CONSIDER_MAX_DIAGONAL:
return GetMaxDiagonal(rA);
// return TSparseSpace::TwoNorm(rA)/static_cast<double>(rA.size1());
default:
return GetMaxDiagonal(rA);
}
}
/**
* @brief This method returns the diagonal norm considering for scaling the diagonal
* @param rA The LHS matrix
* @return The diagonal norm
*/
double GetDiagonalNorm(TSystemMatrixType& rA)
{
double diagonal_norm = 0.0;
#pragma omp parallel for reduction(+:diagonal_norm)
for(int i = 0; i < static_cast<int>(TSparseSpace::Size1(rA)); ++i) {
diagonal_norm += std::pow(rA(i,i), 2);
}
return std::sqrt(diagonal_norm);
}
/**
* @brief This method returns the diagonal max value
* @param rA The LHS matrix
* @return The diagonal max value
*/
double GetAveragevalueDiagonal(TSystemMatrixType& rA)
{
return 0.5 * (GetMaxDiagonal(rA) + GetMinDiagonal(rA));
}
/**
* @brief This method returns the diagonal max value
* @param rA The LHS matrix
* @return The diagonal max value
*/
double GetMaxDiagonal(TSystemMatrixType& rA)
{
// // NOTE: Reduction failing in MSVC
// double max_diag = 0.0;
// #pragma omp parallel for reduction(max:max_diag)
// for(int i = 0; i < static_cast<int>(TSparseSpace::Size1(rA)); ++i) {
// max_diag = std::max(max_diag, std::abs(rA(i,i)));
// }
// return max_diag;
// Creating a buffer for parallel vector fill
const int num_threads = OpenMPUtils::GetNumThreads();
Vector max_vector(num_threads, 0.0);
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(TSparseSpace::Size1(rA)); ++i) {
const int id = OpenMPUtils::ThisThread();
const double abs_value_ii = std::abs(rA(i,i));
if (abs_value_ii > max_vector[id])
max_vector[id] = abs_value_ii;
}
double max_diag = 0.0;
for(int i = 0; i < num_threads; ++i) {
max_diag = std::max(max_diag, max_vector[i]);
}
return max_diag;
}
/**
* @brief This method returns the diagonal min value
* @param rA The LHS matrix
* @return The diagonal min value
*/
double GetMinDiagonal(TSystemMatrixType& rA)
{
// // NOTE: Reduction failing in MSVC
// double min_diag = std::numeric_limits<double>::max();
// #pragma omp parallel for reduction(min:min_diag)
// for(int i = 0; i < static_cast<int>(TSparseSpace::Size1(rA)); ++i) {
// min_diag = std::min(min_diag, std::abs(rA(i,i)));
// }
// return min_diag;
// Creating a buffer for parallel vector fill
const int num_threads = OpenMPUtils::GetNumThreads();
Vector min_vector(num_threads, std::numeric_limits<double>::max());
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(TSparseSpace::Size1(rA)); ++i) {
const int id = OpenMPUtils::ThisThread();
const double abs_value_ii = std::abs(rA(i,i));
if (abs_value_ii < min_vector[id])
min_vector[id] = abs_value_ii;
}
double min_diag = std::numeric_limits<double>::max();
for(int i = 0; i < num_threads; ++i) {
min_diag = std::min(min_diag, min_vector[i]);
}
return min_diag;
}
/**
* @brief This method assigns settings to member variables
* @param ThisParameters Parameters that are assigned to the member variables
*/
void AssignSettings(const Parameters ThisParameters) override
{
BaseType::AssignSettings(ThisParameters);
// Setting flags<
const std::string& r_diagonal_values_for_dirichlet_dofs = ThisParameters["diagonal_values_for_dirichlet_dofs"].GetString();
std::set<std::string> available_options_for_diagonal = {"no_scaling","use_max_diagonal","use_diagonal_norm","defined_in_process_info"};
if (available_options_for_diagonal.find(r_diagonal_values_for_dirichlet_dofs) == available_options_for_diagonal.end()) {
std::stringstream msg;
msg << "Currently prescribed diagonal values for dirichlet dofs : " << r_diagonal_values_for_dirichlet_dofs << "\n";
msg << "Admissible values for the diagonal scaling are : no_scaling, use_max_diagonal, use_diagonal_norm, or defined_in_process_info" << "\n";
KRATOS_ERROR << msg.str() << std::endl;
}
// The first option will not consider any scaling (the diagonal values will be replaced with 1)
if (r_diagonal_values_for_dirichlet_dofs == "no_scaling") {
mScalingDiagonal = SCALING_DIAGONAL::NO_SCALING;
} else if (r_diagonal_values_for_dirichlet_dofs == "use_max_diagonal") {
mScalingDiagonal = SCALING_DIAGONAL::CONSIDER_MAX_DIAGONAL;
} else if (r_diagonal_values_for_dirichlet_dofs == "use_diagonal_norm") { // On this case the norm of the diagonal will be considered
mScalingDiagonal = SCALING_DIAGONAL::CONSIDER_NORM_DIAGONAL;
} else { // Otherwise we will assume we impose a numerical value
mScalingDiagonal = SCALING_DIAGONAL::CONSIDER_PRESCRIBED_DIAGONAL;
}
mOptions.Set(SILENT_WARNINGS, ThisParameters["silent_warnings"].GetBool());
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while (i != endit && (*i) != candidate) {
i++;
}
if (i == endit) {
v.push_back(candidate);
}
}
//******************************************************************************************
//******************************************************************************************
inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, DenseVector<unsigned int>& partitions)
{
partitions.resize(number_of_threads + 1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for (unsigned int i = 1; i < number_of_threads; i++) {
partitions[i] = partitions[i - 1] + partition_size;
}
}
inline unsigned int ForwardFind(const unsigned int id_to_find,
const unsigned int start,
const size_t* index_vector)
{
unsigned int pos = start;
while(id_to_find != index_vector[pos]) pos++;
return pos;
}
inline unsigned int BackwardFind(const unsigned int id_to_find,
const unsigned int start,
const size_t* index_vector)
{
unsigned int pos = start;
while(id_to_find != index_vector[pos]) pos--;
return pos;
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedBlockBuilderAndSolver */
///@}
///@name Type Definitions
///@{
// Here one should use the KRATOS_CREATE_LOCAL_FLAG, but it does not play nice with template parameters
template<class TSparseSpace, class TDenseSpace, class TLinearSolver>
const Kratos::Flags ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::SILENT_WARNINGS(Kratos::Flags::Create(0));
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER defined */
|
streamcluster_cl.h | /***********************************************
streamcluster_cl.h
: parallelized code of streamcluster
- original code from PARSEC Benchmark Suite
- parallelization with OpenCL API has been applied by
Jianbin Fang - j.fang@tudelft.nl
Delft University of Technology
Faculty of Electrical Engineering, Mathematics and Computer Science
Department of Software Technology
Parallel and Distributed Systems Group
on 15/03/2010
***********************************************/
#define THREADS_PER_BLOCK 256
#define MAXBLOCKS 65536
//#define PROFILE_TMP
typedef struct {
float weight;
long assign; /* number of point where this one is assigned */
float cost; /* cost of that assignment, weight*distance */
} Point_Struct;
/* host memory analogous to device memory. These memories are allocated in the function,
* but they are freed in the streamcluster.cpp. We cannot free them in the function as
* the funtion is called repeatedly in streamcluster.cpp. */
float *work_mem_h;
float *coord_h;
float *gl_lower;
Point_Struct *p_h;
//std::optional<buffer<float, 1>> work_mem_d;
//std::optional<buffer<int, 1>> center_table_d;
//std::optional<buffer<char, 1>> switch_membership_d;
//std::optional<buffer<Point_Struct,1>> p_d;
//std::optional<buffer<float,1>> coord_d;
static int c; // counters
void quit(char *message){
printf("%s\n", message);
exit(1);
}
float pgain( long x, Points *points, float z, long int *numcenters,
int kmax, bool *is_center, int *center_table, char *switch_membership,
double *serial, double *cpu_gpu_memcpy, double *memcpy_back, double *gpu_malloc, double *kernel_time) {
float gl_cost = 0;
try{
#ifdef PROFILE_TMP
double t1 = gettime();
#endif
int K = *numcenters ; // number of centers
int num = points->num; // number of points
int dim = points->dim; // number of dimension
kmax++;
/***** build center index table 1*****/
int count = 0;
for( int i=0; i<num; i++){
if( is_center[i] )
center_table[i] = count++;
}
#ifdef PROFILE_TMP
double t2 = gettime();
*serial += t2 - t1;
#endif
/***** initial memory allocation and preparation for transfer : execute once -1 *****/
if( c == 0 ) {
#ifdef PROFILE_TMP
double t3 = gettime();
#endif
coord_h = (float*) malloc( num * dim * sizeof(float)); // coordinates (host)
gl_lower = (float*) malloc( kmax * sizeof(float) );
work_mem_h = (float*) malloc ((kmax+1)*num*sizeof(float)); // not kmax*num*sizeof(float)
p_h = (Point_Struct*)malloc(num*sizeof(Point_Struct)); //by cambine: not compatibal with original Point
// prepare mapping for point coordinates
//--cambine: what's the use of point coordinates? for computing distance.
for(int i=0; i<dim; i++){
for(int j=0; j<num; j++)
coord_h[ (num*i)+j ] = points->p[j].coord[i];
}
#ifdef PROFILE_TMP
double t4 = gettime();
*serial += t4 - t3;
#endif
#pragma omp target enter data map(alloc: coord_h[0:dim*num],\
center_table[0:num],\
work_mem_h[0:(kmax+1)*num], \
switch_membership[0:num], \
p_h[0:num])
#ifdef PROFILE_TMP
double t5 = gettime();
*gpu_malloc += t5 - t4;
#endif
// copy coordinate to device memory
#pragma omp target update to(coord_h[0:num*dim])
#ifdef PROFILE_TMP
//q.wait();
double t6 = gettime();
*cpu_gpu_memcpy += t6 - t4;
#endif
} // first iteration
#ifdef PROFILE_TMP
double t100 = gettime();
#endif
for(int i=0; i<num; i++){
p_h[i].weight = ((points->p)[i]).weight;
p_h[i].assign = ((points->p)[i]).assign;
p_h[i].cost = ((points->p)[i]).cost;
}
#ifdef PROFILE_TMP
double t101 = gettime();
*serial += t101 - t100;
#endif
#ifdef PROFILE_TMP
double t7 = gettime();
#endif
/***** memory transfer from host to device *****/
#pragma omp target update to(p_h[0:num])
#pragma omp target update to(center_table[0:num])
#ifdef PROFILE_TMP
double t8 = gettime();
*cpu_gpu_memcpy += t8 - t7;
#endif
/***** kernel execution *****/
/* Determine the number of thread blocks in the x- and y-dimension */
//const size_t smSize = dim;
const size_t smSize = 256; // WARNING: OpenMP does not support dynamic size
// reset on the host
//::memset(switch_membership, 0, num);
#ifdef PROFILE_TMP
double t9 = gettime();
#endif
#pragma omp target teams distribute parallel for thread_limit(256)
for (int i = 0; i < num; i++) switch_membership[i] = 0;
#pragma omp target teams distribute parallel for thread_limit(256)
for (int i = 0; i < num*(K+1); i++) work_mem_h[i] = 0;
int work_group_size = THREADS_PER_BLOCK;
int work_items = num;
if(work_items%work_group_size != 0) //process situations that work_items cannot be divided by work_group_size
work_items = work_items + (work_group_size-(work_items%work_group_size));
#pragma omp target teams num_teams(work_items/work_group_size) thread_limit(work_group_size)
{
float coord_s_acc[smSize];
#pragma omp parallel
{
#include "kernel.h"
}
}
#ifdef PROFILE_TMP
double t10 = gettime();
*kernel_time += t10 - t9;
#endif
/***** copy back to host for CPU side work *****/
#pragma omp target update from(switch_membership[0:num])
#pragma omp target update from(work_mem_h[0:num*(K+1)])
#ifdef PROFILE_TMP
double t11 = gettime();
*memcpy_back += t11 - t10;
#endif
/****** cpu side work *****/
int numclose = 0;
gl_cost = z;
/* compute the number of centers to close if we are to open i */
for(int i=0; i < num; i++){ //--cambine:??
if( is_center[i] ) {
float low = z;
//printf("i=%d ", i);
for( int j = 0; j < num; j++ )
low += work_mem_h[ j*(K+1) + center_table[i] ];
//printf("low=%f\n", low);
gl_lower[center_table[i]] = low;
if ( low > 0 ) {
numclose++;
work_mem_h[i*(K+1)+K] -= low;
}
}
gl_cost += work_mem_h[i*(K+1)+K];
}
/* if opening a center at x saves cost (i.e. cost is negative) do so
otherwise, do nothing */
if ( gl_cost < 0 ) {
for(int i=0; i<num; i++){
bool close_center = gl_lower[center_table[points->p[i].assign]] > 0 ;
if ( (switch_membership[i]=='1') || close_center ) {
points->p[i].cost = points->p[i].weight * dist(points->p[i], points->p[x], points->dim);
points->p[i].assign = x;
}
}
for(int i=0; i<num; i++){
if( is_center[i] && gl_lower[center_table[i]] > 0 )
is_center[i] = false;
}
is_center[x] = true;
*numcenters = *numcenters +1 - numclose;
}
else
gl_cost = 0; // the value we'
#ifdef PROFILE_TMP
double t12 = gettime();
*serial += t12 - t11;
#endif
c++;
}
catch(string msg){
printf("--cambine:%s\n", msg.c_str());
exit(-1);
}
catch(...){
printf("--cambine: unknow reasons in pgain\n");
}
#ifdef DEBUG
FILE *fp = fopen("data_opencl.txt", "a");
fprintf(fp,"%d, %f\n", c, gl_cost);
fclose(fp);
#endif
return -gl_cost;
}
|
GB_binop__minus_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__minus_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__minus_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__minus_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__minus_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_int32)
// A*D function (colscale): GB (_AxD__minus_int32)
// D*A function (rowscale): GB (_DxB__minus_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__minus_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__minus_int32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_int32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_int32)
// C=scalar+B GB (_bind1st__minus_int32)
// C=scalar+B' GB (_bind1st_tran__minus_int32)
// C=A+scalar GB (_bind2nd__minus_int32)
// C=A'+scalar GB (_bind2nd_tran__minus_int32)
// C type: int32_t
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = (aij - bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x - y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_INT32 || GxB_NO_MINUS_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__minus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__minus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__minus_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__minus_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__minus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__minus_int32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__minus_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__minus_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__minus_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__minus_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__minus_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__minus_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x - bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__minus_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij - y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x - aij) ; \
}
GrB_Info GB (_bind1st_tran__minus_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - y) ; \
}
GrB_Info GB (_bind2nd_tran__minus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_task_red_taskloop.c | // RUN: %libomp-compile-and-run
// Parsing error until gcc8:
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8
// Parsing error until clang11:
// UNSUPPORTED: clang-10, clang-9, clang-8, clang-7
// Missing GOMP_taskgroup_reduction_(un)register in LLVM/OpenMP
// Should be removed once the functions are implemented
// XFAIL: gcc-9, gcc-10
#include <stdio.h>
#include <omp.h>
int r;
int work(int k, int l)
{
return k + l + 1;
}
void bar(int i) {
#pragma omp taskgroup task_reduction(+:r)
{ int th_gen = omp_get_thread_num();
#pragma omp task in_reduction(+:r) firstprivate(i, th_gen)
{
r += work(i, 0);
printf("executing task (%d, 0), th %d (gen by th %d)\n", i, omp_get_thread_num(), th_gen);
}
#pragma omp task in_reduction(+:r) firstprivate(i, th_gen)
{
r += work(i, 1);
printf("executing task (%d, 1), th %d (gen by th %d)\n", i, omp_get_thread_num(), th_gen);
}
}
}
int foo() {
int i;
int th_gen = omp_get_thread_num();
#pragma omp taskgroup task_reduction(+:r)
{
bar(0);
}
printf("th %d passed bar0\n", th_gen);
#pragma omp taskloop reduction(+:r) firstprivate(th_gen)
for (i = 1; i < 4; ++i) {
bar(i);
printf("th %d (gen by th %d) passed bar%d in taskloop\n", omp_get_thread_num(), th_gen, i);
#pragma omp task in_reduction(+:r)
r += i;
}
return 0;
}
// res = 2*((1+2)+(2+3)+(3+4)+(4+5)+1+2+3) = 60
#define res 60
int main()
{
r = 0;
#pragma omp parallel num_threads(2)
foo();
if (r == res) {
return 0;
} else {
printf("error r = %d (!= %d)\n", r, res);
return 1;
}
}
|
GB_unop__abs_uint32_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__abs_uint32_uint32)
// op(A') function: GB (_unop_tran__abs_uint32_uint32)
// C type: uint32_t
// A type: uint32_t
// cast: uint32_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__abs_uint32_uint32)
(
uint32_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
uint32_t z = aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
uint32_t z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__abs_uint32_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
remarks_parallel_in_multiple_target_state_machines.c | // RUN: %clang_cc1 -verify=host -Rpass=openmp-opt -Rpass-analysis=openmp-opt -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify=all,safe -Rpass=openmp-opt -Rpass-analysis=openmp-opt -fopenmp -O2 -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t.out
// RUN: %clang_cc1 -fexperimental-new-pass-manager -verify=all,safe -Rpass=openmp-opt -Rpass-analysis=openmp-opt -fopenmp -O2 -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t.out
// host-no-diagnostics
void baz(void) __attribute__((assume("omp_no_openmp")));
void bar1(void) {
#pragma omp parallel // #0
// all-remark@#0 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}}
// safe-remark@#0 {{Parallel region is used in unknown ways; will not attempt to rewrite the state machine.}}
// force-remark@#0 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__2_wrapper, kernel ID: <NONE>}}
{
}
}
void bar2(void) {
#pragma omp parallel // #1
// all-remark@#1 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}}
// safe-remark@#1 {{Parallel region is used in unknown ways; will not attempt to rewrite the state machine.}}
// force-remark@#1 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__6_wrapper, kernel ID: <NONE>}}
{
}
}
void foo1(void) {
#pragma omp target teams // #2
// all-remark@#2 {{Generic-mode kernel is executed with a customized state machine [3 known parallel regions] (good).}}
// all-remark@#2 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__1_wrapper, kernel ID: __omp_offloading}}
// all-remark@#2 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__2_wrapper, kernel ID: __omp_offloading}}
{
baz(); // all-remark {{Kernel will be executed in generic-mode due to this potential side-effect, consider to add `__attribute__((assume("ompx_spmd_amenable")))` to the called function '_Z3bazv'.}}
#pragma omp parallel // #3
// all-remark@#3 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}}
// all-remark@#3 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__1_wrapper, kernel ID: __omp_offloading}}
{
}
bar1();
#pragma omp parallel // #4
// all-remark@#4 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}}
// all-remark@#4 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__2_wrapper, kernel ID: __omp_offloading}}
{
}
}
}
void foo2(void) {
#pragma omp target teams // #5
// all-remark@#5 {{Generic-mode kernel is executed with a customized state machine [4 known parallel regions] (good).}}
// all-remark@#5 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__5_wrapper, kernel ID: __omp_offloading}}
// all-remark@#5 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__4_wrapper, kernel ID: __omp_offloading}}
{
baz(); // all-remark {{Kernel will be executed in generic-mode due to this potential side-effect, consider to add `__attribute__((assume("ompx_spmd_amenable")))` to the called function '_Z3bazv'.}}
#pragma omp parallel // #6
// all-remark@#6 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}}
// all-remark@#6 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__4_wrapper, kernel ID: __omp_offloading}}
{
}
bar1();
bar2();
#pragma omp parallel // #7
// all-remark@#7 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}}
// all-remark@#7 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__5_wrapper, kernel ID: __omp_offloading}}
{
}
bar1();
bar2();
}
}
void foo3(void) {
#pragma omp target teams // #8
// all-remark@#8 {{Generic-mode kernel is executed with a customized state machine [4 known parallel regions] (good).}}
// all-remark@#8 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__7_wrapper, kernel ID: __omp_offloading}}
// all-remark@#8 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__8_wrapper, kernel ID: __omp_offloading}}
{
baz(); // all-remark {{Kernel will be executed in generic-mode due to this potential side-effect, consider to add `__attribute__((assume("ompx_spmd_amenable")))` to the called function '_Z3bazv'.}}
#pragma omp parallel // #9
// all-remark@#9 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}}
// all-remark@#9 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__7_wrapper, kernel ID: __omp_offloading}}
{
}
bar1();
bar2();
#pragma omp parallel // #10
// all-remark@#10 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}}
// all-remark@#10 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__8_wrapper, kernel ID: __omp_offloading}}
{
}
bar1();
bar2();
}
}
void spmd(void) {
// Verify we do not emit the remarks above for "SPMD" regions.
#pragma omp target teams
#pragma omp parallel
{
}
#pragma omp target teams distribute parallel for
for (int i = 0; i < 100; ++i) {
}
}
// all-remark@* 5 {{OpenMP runtime call __kmpc_global_thread_num moved to beginning of OpenMP region}}
// all-remark@* 9 {{OpenMP runtime call __kmpc_global_thread_num deduplicated}}
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 24;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
bm2d.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include <sys/time.h>
#include "homp.h"
#include "bm2d.h"
/* 2D/3D stencil computation, take a maxwin sized coefficient matrix, and apply stencil computation to a matrix
* The stencil could be cross-based, which only uses neighbors from one dimension stride (A[i-1][j][k], or square-based
* which use neighbors from multiple dimension stride (A[i-2][j-1][k]).
*/
#define DEFAULT_DIMSIZE 256
/* use the macro (SQUARE_STENCIL) from compiler to build two versions of the stencil
* 1. CROSS-based stencil, default, coefficient is an array of 4*maxwin+1, [0] is the center value, and then row and column values
* 2. SQUARE-based stencil, coefficient is a square matrix with one dimension of (2*maxwin+1)
*/
void print_array(char * title, char * name, REAL * A, long n, long m) {
printf("%s:\n", title);
long i, j;
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
printf("%s[%d][%d]:%f\n", name, i, j, A[i * m + j]);
}
printf("\n");
}
printf("\n");
}
void init_array(long N, REAL *A, REAL lower, REAL upper) {
long i;
for (i = 0; i < N; i++) {
A[i] = (REAL)(lower + ((REAL)rand() / (REAL)RAND_MAX) * (upper - lower));
}
}
REAL check_accdiff(const REAL *output, const REAL *reference, const long dimx, const long dimy, const int maxwin, REAL tolerance){
int ix, iy;
REAL acc_diff = 0.0;
int count = 0;
for (ix = -maxwin ; ix < dimx + maxwin ; ix++) {
for (iy = -maxwin ; iy < dimy + maxwin ; iy++) {
if (ix >= 0 && ix < dimx && iy >= 0 && iy < dimy) {
// Determine the absolute difference
REAL difference = fabs(*reference - *output);
acc_diff += difference;
REAL error;
// Determine the relative error
if (*reference != 0)
error = difference / *reference;
else
error = difference;
// Check the error is within the tolerance
//printf("Data at point (%d,%d)\t%f instead of %f\n", ix, iy, *output, *reference);
if (error > tolerance) {
// if (count++<16) printf("Data error at point (%d,%d)\t%f instead of %f\n", ix, iy, *output, *reference);
}
}
++output;
++reference;
}
}
return acc_diff;
}
void bm2d_seq(long n, long m, REAL *u, int maxwin, REAL *filter, int num_its);
void bm2d_omp(long n, long m, REAL *u, int maxwin, REAL *coeff, int num_its);
int main(int argc, char * argv[]) {
long n = DEFAULT_DIMSIZE;
long m = DEFAULT_DIMSIZE;
int maxwin = 10;
int num_its = 100;
fprintf(stderr,"Usage: jacobi [<n> <m> <maxwin> <num_its>]\n");
fprintf(stderr, "\tn - grid dimension in x direction, default: %d\n", n);
fprintf(stderr, "\tm - grid dimension in y direction, default: n if provided or %d\n", m);
fprintf(stderr, "\tmaxwin - Max search window size in radius, default: %d\n", maxwin);
fprintf(stderr, "\tnum_its - # iterations for iterative solver, default: %d\n", num_its);
if (argc == 2) { sscanf(argv[1], "%d", &n); m = n; }
else if (argc == 3) { sscanf(argv[1], "%d", &n); sscanf(argv[2], "%d", &m); }
else if (argc == 4) { sscanf(argv[1], "%d", &n); sscanf(argv[2], "%d", &m); sscanf(argv[3], "%d", &maxwin); }
else if (argc == 5) { sscanf(argv[1], "%d", &n); sscanf(argv[2], "%d", &m); sscanf(argv[3], "%d", &maxwin); sscanf(argv[4], "%d", &num_its); }
else {
/* the rest of arg ignored */
}
if (num_its%2==0) num_its++; /* make it odd so uold/u exchange easier */
long u_dimX = n+maxwin+maxwin;
long u_dimY = m+maxwin+maxwin;
long u_volumn = u_dimX*u_dimY;
int coeff_volumn;
coeff_volumn = (2*maxwin+1)*(2*maxwin+1); /* this is for square. Even the cross stencil that use only 4*maxwin +1, we will use the same square coeff simply */
// coeff_volumn = 4*maxwin+1;
REAL * u = (REAL *)malloc(sizeof(REAL)* u_volumn);
REAL * u_omp = (REAL *)malloc(sizeof(REAL)* u_volumn);
REAL * u_omp_mdev = (REAL *)omp_unified_malloc(sizeof(REAL)* u_volumn);
REAL * u_omp_mdev_iterate = (REAL *)omp_unified_malloc(sizeof(REAL)* u_volumn);
REAL *coeff = (REAL *) omp_unified_malloc(sizeof(REAL)*coeff_volumn);
srand(0);
init_array(u_volumn, u, 0.0, 1.0);
init_array(coeff_volumn, coeff, 0.0, 1.0);
memcpy(u_omp, u, sizeof(REAL)*u_volumn);
memcpy(u_omp_mdev, u, sizeof(REAL)*u_volumn);
memcpy(u_omp_mdev_iterate, u, sizeof(REAL)*u_volumn);
//print_array("coeff", "coeff", coeff, 2*maxwin+1, 2*maxwin+1);
//print_array("original", "u", u, u_dimX, u_dimY);
printf("serial execution\n");
REAL base_elapsed = read_timer_ms();
// bm2d_seq(n, m, u, maxwin, coeff, num_its);
base_elapsed = read_timer_ms() - base_elapsed;
//print_array("after stencil", "us", u, u_dimX, u_dimY);
printf("OMP execution\n");
REAL omp_elapsed = read_timer_ms();
int i;
int num_runs = 1;
// for (i=0;i<num_runs;i++) bm2d_omp(n, m, u_omp, maxwin, coeff, num_its);
omp_elapsed = (read_timer_ms() - omp_elapsed)/num_runs;
omp_init_devices();
// printf("OMP mdev execution\n");
printf("OMP mdev iterate execution\n");
REAL mdev_elapsed = 0.0;
int num_active_devs = omp_get_num_active_devices();
int targets[num_active_devs];
int num_targets = 1;
double (*bm2d_omp_mdev_function)(int ndevs, int *targets, long n, long m, REAL *u, int maxwin, REAL *coeff,
int num_its);
if (LOOP_DIST_POLICY == OMP_DIST_POLICY_BLOCK || LOOP_DIST_POLICY == OMP_DIST_POLICY_MODEL_1_AUTO || LOOP_DIST_POLICY == OMP_DIST_POLICY_MODEL_2_AUTO) {
bm2d_omp_mdev_function = bm2d_omp_mdev_iterate;
} else {
bm2d_omp_mdev_function = bm2d_omp_mdev;
}
#if 0
/* one HOSTCPU */
num_targets = omp_get_devices(OMP_DEVICE_HOSTCPU, targets, 1);
mdev_elapsed = bm2d_omp_mdev_function(num_targets, targets, n, m, u_omp_mdev_iterate, maxwin, coeff, num_its);
/* one NVGPU */
num_targets = omp_get_devices(OMP_DEVICE_NVGPU, targets, 1);
mdev_elapsed = bm2d_omp_mdev_function(num_targets, targets, n, m, u_omp_mdev_iterate, maxwin, coeff, num_its);
/* two NVGPU */
num_targets = omp_get_devices(OMP_DEVICE_NVGPU, targets, 2);
mdev_elapsed = bm2d_omp_mdev_function(num_targets, targets, n, m, u_omp_mdev_iterate, maxwin, coeff, num_its);
/* three NVGPU */
num_targets = omp_get_devices(OMP_DEVICE_NVGPU, targets, 3);
mdev_elapsed = bm2d_omp_mdev_function(num_targets, targets, n, m, u_omp_mdev_iterate, maxwin, coeff, num_its);
/* four NVGPU */
num_targets = omp_get_devices(OMP_DEVICE_NVGPU, targets, 4);
mdev_elapsed = bm2d_omp_mdev_function(num_targets, targets, n, m, u_omp_mdev_iterate, maxwin, coeff, num_its);
/* one ITLMIC */
num_targets = omp_get_devices(OMP_DEVICE_ITLMIC, targets, 1);
mdev_elapsed = bm2d_omp_mdev_function(num_targets, targets, n, m, u_omp_mdev_iterate, maxwin, coeff, num_its);
/* two ITLMIC */
num_targets = omp_get_devices(OMP_DEVICE_ITLMIC, targets, 2);
mdev_elapsed = bm2d_omp_mdev_function(num_targets, targets, n, m, u_omp_mdev_iterate, maxwin, coeff, num_its);
/* one HOSTCPU and one NVGPU */
num_targets = omp_get_devices(OMP_DEVICE_HOSTCPU, targets, 1);
num_targets += omp_get_devices(OMP_DEVICE_NVGPU, targets+num_targets, 1);
mdev_elapsed = bm2d_omp_mdev_function(num_targets, targets, n, m, u_omp_mdev_iterate, maxwin, coeff, num_its);
/* one HOSTCPU and one ITLMIC */
num_targets = omp_get_devices(OMP_DEVICE_HOSTCPU, targets, 1);
num_targets += omp_get_devices(OMP_DEVICE_ITLMIC, targets+num_targets, 1);
mdev_elapsed = bm2d_omp_mdev_function(num_targets, targets, n, m, u_omp_mdev_iterate, maxwin, coeff, num_its);
/* one NVGPU and one ITLMIC */
num_targets = omp_get_devices(OMP_DEVICE_NVGPU, targets, 1);
num_targets += omp_get_devices(OMP_DEVICE_ITLMIC, targets+num_targets, 1);
mdev_elapsed = bm2d_omp_mdev_function(num_targets, targets, n, m, u_omp_mdev_iterate, maxwin, coeff, num_its);
/* one HOSTCPU and two NVGPU */
num_targets = omp_get_devices(OMP_DEVICE_HOSTCPU, targets, 1);
num_targets += omp_get_devices(OMP_DEVICE_NVGPU, targets+num_targets, 2);
mdev_elapsed = bm2d_omp_mdev_function(num_targets, targets, n, m, u_omp_mdev_iterate, maxwin, coeff, num_its);
/* one HOSTCPU and two ITLMIC */
num_targets = omp_get_devices(OMP_DEVICE_HOSTCPU, targets, 1);
num_targets += omp_get_devices(OMP_DEVICE_ITLMIC, targets+num_targets, 2);
mdev_elapsed = bm2d_omp_mdev_function(num_targets, targets, n, m, u_omp_mdev_iterate, maxwin, coeff, num_its);
/* two NVGPU and two ITLMIC */
num_targets = omp_get_devices(OMP_DEVICE_NVGPU, targets, 2);
num_targets += omp_get_devices(OMP_DEVICE_ITLMIC, targets+num_targets, 2);
mdev_elapsed = bm2d_omp_mdev_function(num_targets, targets, n, m, u_omp_mdev_iterate, maxwin, coeff, num_its);
/* four NVGPU and two ITLMIC */
num_targets = omp_get_devices(OMP_DEVICE_NVGPU, targets, 4);
num_targets += omp_get_devices(OMP_DEVICE_ITLMIC, targets+num_targets, 2);
mdev_elapsed = bm2d_omp_mdev_function(num_targets, targets, n, m, u_omp_mdev_iterate, maxwin, coeff, num_its);
/* one CPU, two NVGPU and two ITLMIC */
num_targets = omp_get_devices(OMP_DEVICE_HOSTCPU, targets, 1);
num_targets += omp_get_devices(OMP_DEVICE_NVGPU, targets+num_targets, 2);
num_targets += omp_get_devices(OMP_DEVICE_ITLMIC, targets+num_targets, 2);
mdev_elapsed = bm2d_omp_mdev_function(num_targets, targets, n, m, u_omp_mdev_iterate, maxwin, coeff, num_its);
/* one CPU, four NVGPU and two ITLMIC */
num_targets = omp_get_devices(OMP_DEVICE_HOSTCPU, targets, 1);
num_targets += omp_get_devices(OMP_DEVICE_NVGPU, targets+num_targets, 4);
num_targets += omp_get_devices(OMP_DEVICE_ITLMIC, targets+num_targets, 2);
mdev_elapsed = bm2d_omp_mdev_function(num_targets, targets, n, m, u_omp_mdev_iterate, maxwin, coeff, num_its);
#endif
/* run on all devices */
num_targets = num_active_devs;
for (i=0;i<num_active_devs;i++) targets[i] = i;
mdev_elapsed = bm2d_omp_mdev_function(num_targets, targets, n, m, u_omp_mdev_iterate, maxwin, coeff, num_its);
long flops = n*m*maxwin;
#ifdef SQUARE_SETNCIL
flops *= 8;
#else
flops *= 16;
#endif
printf("======================================================================================================\n");
printf("\tStencil 2D: %dx%d, stencil maxwin: %d, #iteratins: %d\n", n, m, maxwin, num_its);
printf("------------------------------------------------------------------------------------------------------\n");
printf("Performance:\t\tRuntime (ms)\t MFLOPS \t\tError (compared to base)\n");
printf("------------------------------------------------------------------------------------------------------\n");
printf("base:\t\t%4f\t%4f \t\t%g\n", base_elapsed, flops / (1.0e-3 * base_elapsed), 0.0); //check_accdiff(u, u, u_dimX, u_dimY, maxwin, 1.0e-7));
printf("omp: \t\t%4f\t%4f \t\t%g\n", omp_elapsed, flops / (1.0e-3 * omp_elapsed), check_accdiff(u, u_omp, n, m, maxwin, 0.00001f));
// printf("omp_mdev: \t%4f\t%4f \t\t%g\n", mdev_elapsed, flops / (1.0e-3 * mdev_elapsed), check_accdiff(u, u_omp_mdev, n, m, maxwin, 0.00001f));
printf("omp_mdev_it: \t%4f\t%4f \t\t%g\n", mdev_elapsed, flops / (1.0e-3 * mdev_elapsed), check_accdiff(u, u_omp_mdev_iterate, n, m, maxwin, 0.00001f));
free(u);
free(u_omp);
omp_unified_free(u_omp_mdev);
omp_unified_free(u_omp_mdev_iterate);
omp_unified_free(coeff);
omp_fini_devices();
return 0;
}
void bm2d_seq_normal(long n, long m, REAL *u, int maxwin, REAL *coeff, int num_its) {
long it; /* iteration */
long u_dimX = n + 2 * maxwin;
long u_dimY = m + 2 * maxwin;
int coeff_dimX = 2*maxwin+1;
REAL *uold = (REAL*)malloc(sizeof(REAL)*u_dimX * u_dimY);
memcpy(uold, u, sizeof(REAL)*u_dimX*u_dimY);
coeff = coeff + coeff_dimX * maxwin + maxwin; /* let coeff point to the center element */
REAL * uold_save = uold;
REAL * u_save = u;
for (it = 0; it < num_its; it++) {
int ix, iy, ir;
for (ix = 0; ix < n; ix++) {
for (iy = 0; iy < m; iy++) {
int radius = drand48() * maxwin;
if (radius < 0) continue;
int count = 4*radius+1;
#ifdef SQUARE_SETNCIL
count = coeff_dimX * coeff_dimX;
#endif
int offset = (ix+radius)*u_dimY+radius+iy;
REAL * temp_u = &u[offset];
REAL * temp_uold = &uold[offset];
REAL result = temp_uold[0] * coeff[0];
/* 2/4 way loop unrolling */
for (ir = 1; ir <= radius; ir++) {
result += coeff[ir] * temp_uold[ir]; //horizontal right
result += coeff[-ir]* temp_uold[-ir]; // horizontal left
result += coeff[-ir*coeff_dimX] * temp_uold[-ir * u_dimY]; //vertical up
result += coeff[ir*coeff_dimX] * temp_uold[ir * u_dimY]; // vertical bottom
#ifdef SQUARE_SETNCIL
result += coeff[-ir*coeff_dimX-ir] * temp_uold[-ir * u_dimY-ir] // left upper corner
result += coeff[-ir*coeff_dimX+ir] * temp_uold[-ir * u_dimY+ir] // right upper corner
result += coeff[ir*coeff_dimX-ir] * temp_uold[ir * u_dimY]-ir] // left bottom corner
result += coeff[ir*coeff_dimX+ir] * temp_uold[ir * u_dimY]+ir] // right bottom corner
#endif
}
*temp_u = result/count;
}
}
REAL * tmp = uold;
uold = u;
u = tmp;
// if (it % 500 == 0)
// printf("Finished %d iteration\n", it);
} /* End iteration loop */
free(uold_save);
}
#if 0
void bm2d_omp_mdev(long n, long m, REAL *u, int maxwin, REAL *coeff, int num_its) {
long it; /* iteration */
long u_dimX = n + 2 * maxwin;
long u_dimY = m + 2 * maxwin;
int coeff_dimX = 2 * maxwin + 1;
coeff = coeff + (2 * maxwin + 1) * maxwin + maxwin; /* let coeff point to the center element */
int count = 4*maxwin+1;
#ifdef SQUARE_SETNCIL
count = coeff_dimX * coeff_dimX;
#endif
/* uold should be simpliy allocated on the dev and then copy data from u, here we simplified the initialization */
REAL *uold = (REAL *) malloc(sizeof(REAL) * u_dimX * u_dimY);
memcpy(uold, u, sizeof(REAL)*u_dimX * u_dimY);
#pragma omp target data device(*) map(to:n, m, u_dimX, u_dimY, maxwin, coeff_center, coeff[coeff_dimX][coeff_dimX]) \
map(tofrom:u[u_dimX][u_dimY] dist_data(BLOCK,DUPLICATE) halo(maxwin,)) map(to:uold[u_dimX][u_dimY] dist_data(BLOCK,DUPLICATE) halo(maxwin,))
#pragma omp parallel shared(n, m, maxwin, coeff, num_its, u_dimX, u_dimY, coeff_dimX) private(it) firstprivate(u, uold) //num_threads(/* num of devices + number of cores */)
{
int ix, iy, ir;
/*
#pragma omp target device(*) dist_iteration(BLOCK)
#pragma omp for
for (ix = 0; ix < u_dimX; ix++) {
for (iy = 0; iy < u_dimY; iy++) {
uold[ix * u_dimY + iy] = u[ix * u_dimY + iy];
}
}
*/
for (it = 0; it < num_its; it++) {
#pragma omp target device(*) dist_iteration(BLOCK)
#pragma omp for
for (ix = 0; ix < n; ix++) {
REAL *temp_u = &u[(ix + maxwin) * u_dimY+maxwin];
REAL *temp_uold = &uold[(ix + maxwin) * u_dimY+maxwin];
for (iy = 0; iy < m; iy++) {
REAL result = temp_uold[0] * coeff[0];
/* 2/4 way loop unrolling */
for (ir = 1; ir <= maxwin; ir++) {
result += coeff[ir] * temp_uold[ir]; //horizontal right
result += coeff[-ir] * temp_uold[-ir]; // horizontal left
result += coeff[-ir * coeff_dimX] * temp_uold[-ir * u_dimY]; //vertical up
result += coeff[ir * coeff_dimX] * temp_uold[ir * u_dimY]; // vertical bottom
#ifdef SQUARE_SETNCIL
result += coeff[-ir*coeff_dimX-ir] * temp_uold[-ir * u_dimY-ir] // left upper corner
result += coeff[-ir*coeff_dimX+ir] * temp_uold[-ir * u_dimY+ir] // right upper corner
result += coeff[ir*coeff_dimX-ir] * temp_uold[ir * u_dimY]-ir] // left bottom corner
result += coeff[ir*coeff_dimX+ir] * temp_uold[ir * u_dimY]+ir] // right bottom corner
#endif
}
*temp_u = result/count;
temp_u++;
temp_uold++;
}
}
#pragma omp halo_exchange(u);
REAL *tmp = uold;
uold = u;
u = tmp;
// if (it % 500 == 0)
// printf("Finished %d iteration by thread %d of %d\n", it, omp_get_thread_num(), omp_get_num_threads());
} /* End iteration loop */
}
free(uold);
}
#endif
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 4;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
vdi_fmt_plug.c | /* VirtualBox (VDI) volume support to John The Ripper
*
* Written by JimF <jfoug at openwall.net> in 2015. No copyright
* is claimed, and the software is hereby placed in the public domain.
* In case this attempt to disclaim copyright and place the software in the
* public domain is deemed null and void, then the software is
* Copyright (c) 2015 JimF and it is hereby released to the
* general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* There's ABSOLUTELY NO WARRANTY, express or implied.
*
* information about this algorithm taken from:
* http://www.sinfocol.org/archivos/2015/07/VBOXDIECracker.phps
*/
#include "arch.h"
#if FMT_EXTERNS_H
extern struct fmt_main fmt_vdi;
#elif FMT_REGISTERS_H
john_register_one(&fmt_vdi);
#else
#include "xts.h"
#include <string.h>
#include "misc.h"
#include "memory.h"
#include "common.h"
#include "formats.h"
#include "crc32.h"
#include "johnswap.h"
#include "base64_convert.h"
#include "pbkdf2_hmac_sha256.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#ifdef __MIC__
#define OMP_SCALE 16
#else
#define OMP_SCALE 4
#endif // __MIC__
#endif // OMP_SCALE
#endif // _OPENMP
#include "memdbg.h"
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(struct vdi_salt)
#define SALT_ALIGN 4
#define BINARY_SIZE 32
#define BINARY_ALIGN 4
#define MAX_SALT_LEN 32
#define MAX_KEY_LEN 64
#define FORMAT_LABEL "vdi"
#define FORMAT_NAME "VirtualBox-VDI AES_XTS"
#define FORMAT_TAG "$vdi$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "PBKDF2-SHA256 " SHA256_ALGORITHM_NAME " + AES_XTS"
#if SSE_GROUP_SZ_SHA256
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static unsigned char (*key_buffer)[PLAINTEXT_LENGTH + 1];
static unsigned char (*crypt_out)[MAX_SALT_LEN];
static struct fmt_tests tests[] = {
// The 'jtr' test hashed were made with VirtualBox. The others were made with pass_gen.pl
{"$vdi$aes-xts256$sha256$2000$2000$64$32$709f6df123f1ccb126ea1f3e565beb78d39cafdc98e0daa2e42cc43cef11f786$0340f137136ad54f59f4b24ef0bf35240e140dfd56bbc19ce70aee6575f0aabf$0a27e178f47a0b05a752d6e917b89ef4205c6ae76705c34858390f8afa6cf03a45d98fab53b76d8d1c68507e7810633db4b83501a2496b7e443eccb53dbc8473$7ac5f4ad6286406e84af31fd36881cf558d375ae29085b08e6f65ebfd15376ca", "jtr"},
{"$vdi$aes-xts256$sha256$2000$2000$64$32$d72ee0aecd496b084117bb8d87f5e37de71973518a2ef992c895907a09b73b83$afb33e56a7f81b1e3db70f599b62ecf3d223405abb63bcf569bb29acab9c81a6$3b3769fd3cfaf8e11f67fdc9d54aed8c8962a769f3f66cb2b9cb8700c01a66e6b1c996fdee9727188c765bde224047b8ced7a9b5f5381e7ad7271a9cbf049fde$1c5bca64cbedd76802eddc3e6ffd834e8c1f1ff1157de6ae6feb3740051e2cfa", "password"},
{"$vdi$aes-xts256$sha256$2000$2000$64$32$a4e4480927153ecbb7509afb8d49468e62c8bb22aaab458f4115bff63364de41$c69605220d1ed03618f0236a88e225db1ec69e7a95dbe63ee00778cc8b91424e$0a1de9c85452fafd19ceb0821a115c7fec6fab4ef51fc57fabc25bf973417684a78683267513923f88231a6efd2442ce9279f2a5614d4cfcb930b5ef413f34c3$d79ea5522ad79fc409bbcd1e8a2bb75e16a53e1eef940b4fe954cee1c2491fd2", "ripper"},
{"$vdi$aes-xts256$sha256$2000$2000$64$32$450ce441592003821931e73ea314dcd0effff1b74b61a8fc4046573d0f448057$18c48e3d7677bc9471607cec83d036b963f23f7bb16f09ea438395b61dcf14d5$c4893bce14fa3a1f915004b9ec0fd6a7215ddebdd2ca4bc2b4ec164253c2f2319685a8f8245ec8e2d9e7a53c6aec5fd2d4ca7ba510ffc7456a72285d40ce7d35$793e58317b9bf6318d1b4cef1e05f5a8579a50fb7efde884ea68b096b7043aad", "john"},
{"$vdi$aes-xts256$sha256$2000$2000$64$32$472476df7d16f80d612d4c9ff35678a2011605dc98b76b6d78632859c259d5d0$aa89f9bea1139da6ace97e13c823d713030fda0c8c55ad2fcea358746cc0b4cc$507aaf7c9e00b492042072a17b3975fc88e30e1d5927e63cb335c630b7b873e4c9af2df63c95b42896e15bb33c37c9f572a65f97441b3707ce5d81c521dfd30e$111004a8d9167b55ff5db510cc136f2bceacf4a9f50807742e2bbc110847174e", "really long password with ! stuff!!! ;)"},
// some aes-128 samples They run exactly same speed as the AES-256 hashes.
{"$vdi$aes-xts128$sha256$2000$2000$32$32$d3fd2bb27734f25918ac726717b192091253441c4bc71a814d0a6483e73325ea$ef560858b4c068bd8d994cdf038f51cb1b9f59335d72cb874e79a13c5b6aa84a$79ff000f7638d39b0d02ad08dfcede8740087e334e98022465a380bdf78fff13$302f4c4f58c0dee9676dfdaf3ada9e3d7ec4b5bfc7e6565c941f4ec7337368d4", "jtr"},
{"$vdi$aes-xts128$sha256$2000$2000$32$32$16894e7496bac97bc467faa3efe5a3ba009e1591990c9422e4352bfb39ead4d6$00780af3703680b63239b61d0395e9ff673ee843d7a77d61541e0fdc096c49d1$72434a81a27bb1cd85be529600c3620e4eeed45d12f8ef337cc51c040306be7d$4a5b2129577289a8a0f6a93d7a578cd248d158bc70d6ab89f5ccf31704812e85", "blowhard"},
{"$vdi$aes-xts128$sha256$2000$2000$32$32$4e9d103c944479a4e2b2e33d4757e11fc1a7263ba3b2e99d9ad4bc9aeb7f9337$ade43b6eb1d878f0a5532070fb81697a8164ff7b9798e35649df465068ae7e81$f1e443252c872e305eda848d05676a20af8df405262984b39baf0f0aa1b48247$2601e9e08d19ca20745a6a33f74259bdca06014455370b0bb6b79eb0c5e60581", "foobar"},
{NULL}
};
static struct vdi_salt {
unsigned char salt1[MAX_SALT_LEN];
unsigned char salt2[MAX_SALT_LEN];
unsigned char encr[MAX_KEY_LEN];
int crypt_type; // 1, 256, 384, 512 for the pbkdf2 algo (currently ONLY 256 implemented, so that is all we handle right now).
int evp_type; // 128 or 256 for AES-128XTS or AES-256XTS
int rounds1;
int rounds2;
int keylen;
int saltlen;
} *psalt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
key_buffer = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*key_buffer));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(key_buffer);
MEM_FREE(crypt_out);
}
static int valid(char* ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr;
int keylen;
int saltlen;
char *p;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
ctcopy = strdup(ciphertext + FORMAT_TAG_LEN);
keeptr = ctcopy;
if ((p = strtokm(ctcopy, "$")) == NULL) /* decr type*/
goto err;
if (strcmp(p, "aes-xts256") && strcmp(p, "aes-xts128"))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* pbkdf2 algo */
goto err;
//if (strcmp(p, "sha1") && strcmp(p, "sha256") && strcmp(p, "sha384") && strcmp(p, "sha512"))
if (strcmp(p, "sha256"))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* pbkdf2-1 iterations */
goto err;
if (!isdec(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* pbkdf2-2 iterations */
goto err;
if (!isdec(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* key length */
goto err;
if (!isdec(p))
goto err;
keylen = atoi(p);
if (keylen > MAX_KEY_LEN)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* salt length */
goto err;
if (!isdec(p))
goto err;
saltlen = atoi(p);
if (saltlen > MAX_SALT_LEN)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* salt1 */
goto err;
if (strlen(p) != saltlen * 2)
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* salt2 */
goto err;
if (strlen(p) != saltlen * 2)
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* encr_key */
goto err;
if (strlen(p) != keylen * 2)
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* final_result */
goto err;
if (strlen(p) != saltlen * 2)
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "$")) != NULL)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void set_salt(void *salt)
{
psalt = salt;
}
static void* get_salt(char *ciphertext)
{
static char buf[sizeof(struct vdi_salt)+4];
struct vdi_salt *s = (struct vdi_salt *)mem_align(buf, 4);
char *ctcopy, *keeptr;
char *p;
memset(buf, 0, sizeof(buf));
ctcopy = strdup(ciphertext + FORMAT_TAG_LEN);
keeptr = ctcopy;
p = strtokm(ctcopy, "$"); /* decr type*/
s->evp_type = !strcmp(p, "aes-xts128") ? 128 : 256;
p = strtokm(NULL, "$"); /* pbkdf2 algo */
s->crypt_type = 256; /* right now, we ONLY handle pbkdf2-sha256 */
p = strtokm(NULL, "$"); /* pbkdf2-1 iterations */
s->rounds1 = atoi(p);
p = strtokm(NULL, "$"); /* pbkdf2-2 iterations */
s->rounds2 = atoi(p);
p = strtokm(NULL, "$"); /* key length */
s->keylen = atoi(p);
p = strtokm(NULL, "$"); /* salt length */
s->saltlen = atoi(p);
p = strtokm(NULL, "$"); /* salt1 */
base64_convert(p, e_b64_hex, s->saltlen*2, s->salt1, e_b64_raw, s->saltlen, 0, 0);
p = strtokm(NULL, "$"); /* salt2 */
base64_convert(p, e_b64_hex, s->saltlen*2, s->salt2, e_b64_raw, s->saltlen, 0, 0);
p = strtokm(NULL, "$"); /* encr_key */
base64_convert(p, e_b64_hex, s->keylen*2, s->encr, e_b64_raw, s->keylen, 0, 0);
MEM_FREE(keeptr);
return s;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int i;
int inc=1;
const int count = *pcount;
#if SSE_GROUP_SZ_SHA256
inc = SSE_GROUP_SZ_SHA256;
#endif
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < count; i += inc)
{
unsigned char key[MAX_KEY_LEN];
#if SSE_GROUP_SZ_SHA256
unsigned char Keys[SSE_GROUP_SZ_SHA256][MAX_KEY_LEN];
unsigned char Decr[SSE_GROUP_SZ_SHA256][MAX_KEY_LEN];
#else
unsigned char Decr[1][MAX_KEY_LEN];
int ksz = strlen((char *)key_buffer[i]);
#endif
int j;
#if SSE_GROUP_SZ_SHA256
int lens[SSE_GROUP_SZ_SHA256];
unsigned char *pin[SSE_GROUP_SZ_SHA256];
union {
unsigned char *pout[SSE_GROUP_SZ_SHA256];
unsigned char *poutc;
} x;
for (j = 0; j < SSE_GROUP_SZ_SHA256; ++j) {
lens[j] = strlen((char*)(key_buffer[i+j]));
pin[j] = key_buffer[i+j];
x.pout[j] = Keys[j];
}
pbkdf2_sha256_sse((const unsigned char **)pin, lens, psalt->salt1, psalt->saltlen, psalt->rounds1, &(x.poutc), psalt->keylen, 0);
#else
pbkdf2_sha256((const unsigned char*)key_buffer[i], ksz, psalt->salt1, psalt->saltlen, psalt->rounds1, key, psalt->keylen, 0);
#endif
for (j = 0; j < inc; ++j) {
#if SSE_GROUP_SZ_SHA256
memcpy(key, Keys[j], sizeof(key));
#endif
// Try to decrypt using AES
AES_XTS_decrypt(key, Decr[j], psalt->encr, psalt->keylen, psalt->evp_type);
}
#if SSE_GROUP_SZ_SHA256
for (j = 0; j < SSE_GROUP_SZ_SHA256; ++j) {
lens[j] = psalt->keylen;
pin[j] = Decr[j];
x.pout[j] = crypt_out[i+j];
}
pbkdf2_sha256_sse((const unsigned char **)pin, lens, psalt->salt2, psalt->saltlen, psalt->rounds2, &(x.poutc), psalt->saltlen, 0);
#else
pbkdf2_sha256(Decr[0], psalt->keylen, psalt->salt2, psalt->saltlen, psalt->rounds2, crypt_out[i], psalt->saltlen, 0);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], 4))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void set_key(char* key, int index)
{
strcpy((char*)(key_buffer[index]), key);
}
static char *get_key(int index)
{
return (char*)(key_buffer[index]);
}
static int salt_hash(void *salt)
{
unsigned v=0, i;
unsigned char *psalt = (unsigned char *)salt;
psalt += 40; // skips us to the salt stuff.
for (i = 0; i < 64; ++i) {
v *= 11;
v += psalt[i];
}
return v & (SALT_HASH_SIZE - 1);
}
static void *binary(char *ciphertext) {
static uint32_t full[MAX_SALT_LEN / 4];
unsigned char *realcipher = (unsigned char*)full;
ciphertext = strrchr(ciphertext, '$') + 1;
base64_convert(ciphertext, e_b64_hex, strlen(ciphertext), realcipher, e_b64_raw, MAX_SALT_LEN, 0, 0);
return (void*)realcipher;
}
struct fmt_main fmt_vdi = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
"", // BENCHMARK_COMMENT
-1, // BENCHMARK_LENGTH
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
ten_tusscher_2004_epi_S2_14.c | //Original Ten Tusscher
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_2004_epi_S2_14.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
//TODO: this should be called only once for the whole mesh, like in the GPU code
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5952182591768,0.00128266400523176,0.780370393090429,0.780208222766858,0.000174041905078485,0.485370727173588,0.00293466121399432,0.999998357055344,1.92482840573537e-08,1.88428105751378e-05,0.999770837182767,1.00699532179645,0.999993733315635,4.75139548173797e-05,0.266377866651071,10.2975786179389,139.536672800382};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
/// real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
//#ifdef EPI
real Gto=0.294;
//#endif
// #ifdef ENDO
// real Gto=0.073;
//#endif
//#ifdef MCELL
// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.5369194152843,0.000421161732329444,0.000123555730992675,0.000438546024943873,0.268273630830681,0.123585165023946,0.171035514336793,5.02847725301225,0.0110176202871206,1.84752137000130,1095.52052508604,0.000393152126659795,0.528629865494676,0.00975540076461500,0.00491948125354052,8.11442676720905e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
colormap.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO L OOO RRRR M M AAA PPPP %
% C O O L O O R R MM MM A A P P %
% C O O L O O RRRR M M M AAAAA PPPP %
% C O O L O O R R M M A A P %
% CCCC OOO LLLLL OOO R R M M A A P %
% %
% %
% MagickCore Colormap Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% We use linked-lists because splay-trees do not currently support duplicate
% key / value pairs (.e.g X11 green compliance and SVG green compliance).
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/client.h"
#include "MagickCore/configure.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/xml-tree.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageColormap() allocates an image colormap and initializes
% it to a linear gray colorspace. If the image already has a colormap,
% it is replaced. AcquireImageColormap() returns MagickTrue if successful,
% otherwise MagickFalse if there is not enough memory.
%
% The format of the AcquireImageColormap method is:
%
% MagickBooleanType AcquireImageColormap(Image *image,const size_t colors,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colors: the number of colors in the image colormap.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline size_t MagickMax(const size_t x,
const size_t y)
{
if (x > y)
return(x);
return(y);
}
MagickExport MagickBooleanType AcquireImageColormap(Image *image,
const size_t colors,ExceptionInfo *exception)
{
register ssize_t
i;
/*
Allocate image colormap.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->colors=MagickMax(colors,2);
if (image->colormap == (PixelInfo *) NULL)
image->colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,
sizeof(*image->colormap));
else
image->colormap=(PixelInfo *) ResizeQuantumMemory(image->colormap,
image->colors,sizeof(*image->colormap));
if (image->colormap == (PixelInfo *) NULL)
{
image->colors=0;
image->storage_class=DirectClass;
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
for (i=0; i < (ssize_t) image->colors; i++)
{
double
pixel;
pixel=(double) (i*(QuantumRange/(image->colors-1)));
GetPixelInfo(image,image->colormap+i);
image->colormap[i].alpha_trait=BlendPixelTrait;
image->colormap[i].red=pixel;
image->colormap[i].green=pixel;
image->colormap[i].blue=pixel;
image->colormap[i].alpha=OpaqueAlpha;
}
return(SetImageStorageClass(image,PseudoClass,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C y c l e C o l o r m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CycleColormap() displaces an image's colormap by a given number of
% positions. If you cycle the colormap a number of times you can produce
% a psychodelic effect.
%
% WARNING: this assumes an images colormap is in a well know and defined
% order. Currently Imagemagick has no way of setting that order.
%
% The format of the CycleColormapImage method is:
%
% MagickBooleanType CycleColormapImage(Image *image,const ssize_t displace,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o displace: displace the colormap this amount.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CycleColormapImage(Image *image,
const ssize_t displace,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == DirectClass)
(void) SetImageType(image,PaletteType,exception);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*restrict q;
ssize_t
index;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) (GetPixelIndex(image,q)+displace) % image->colors;
if (index < 0)
index+=(ssize_t) image->colors;
SetPixelIndex(image,(Quantum) index,q);
SetPixelInfoPixel(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S o r t C o l o r m a p B y I n t e n s i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SortColormapByIntensity() sorts the colormap of a PseudoClass image by
% decreasing color intensity.
%
% The format of the SortColormapByIntensity method is:
%
% MagickBooleanType SortColormapByIntensity(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: A pointer to an Image structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const PixelInfo
*color_1,
*color_2;
int
intensity;
color_1=(const PixelInfo *) x;
color_2=(const PixelInfo *) y;
intensity=(int) GetPixelInfoIntensity(color_2)-(int)
GetPixelInfoIntensity(color_1);
return(intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
MagickExport MagickBooleanType SortColormapByIntensity(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
y;
unsigned short
*pixels;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (image->storage_class != PseudoClass)
return(MagickTrue);
/*
Allocate memory for pixel indexes.
*/
pixels=(unsigned short *) AcquireQuantumMemory((size_t) image->colors,
sizeof(*pixels));
if (pixels == (unsigned short *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Assign index values to colormap entries.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].alpha=(double) i;
/*
Sort image colormap by decreasing color popularity.
*/
qsort((void *) image->colormap,(size_t) image->colors,
sizeof(*image->colormap),IntensityCompare);
/*
Update image colormap indexes to sorted colormap order.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
pixels[(ssize_t) image->colormap[i].alpha]=(unsigned short) i;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
index;
register ssize_t
x;
register Quantum
*restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(Quantum) pixels[(ssize_t) GetPixelIndex(image,q)];
SetPixelIndex(image,index,q);
SetPixelInfoPixel(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (status == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
pixels=(unsigned short *) RelinquishMagickMemory(pixels);
return(status);
}
|
GB_binop__max_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__max_int16)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__max_int16)
// A.*B function (eWiseMult): GB (_AemultB_03__max_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__max_int16)
// A*D function (colscale): GB (_AxD__max_int16)
// D*A function (rowscale): GB (_DxB__max_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__max_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__max_int16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_int16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_int16)
// C=scalar+B GB (_bind1st__max_int16)
// C=scalar+B' GB (_bind1st_tran__max_int16)
// C=A+scalar GB (_bind2nd__max_int16)
// C=A'+scalar GB (_bind2nd_tran__max_int16)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = GB_IMAX (aij, bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IMAX (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_INT16 || GxB_NO_MAX_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__max_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__max_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__max_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__max_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__max_int16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__max_int16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__max_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__max_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__max_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__max_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__max_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__max_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = Bx [p] ;
Cx [p] = GB_IMAX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__max_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = Ax [p] ;
Cx [p] = GB_IMAX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = GB_IMAX (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__max_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = GB_IMAX (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__max_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
multibit_fmt_plug.c | /*
* JtR format to crack password protected MultiBit Wallets.
*
* This software is Copyright (c) 2017, Dhiru Kholia <kholia at kth.se> and it
* is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* All credit goes to Christopher Gurnee for making this work possible.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_multibit;
#elif FMT_REGISTERS_H
john_register_one(&fmt_multibit);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 128
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "aes.h"
#include "md5.h"
#include "escrypt/crypto_scrypt.h"
#include "jumbo.h"
#include "memdbg.h"
#include "unicode.h"
#define FORMAT_NAME "MultiBit Wallet"
#define FORMAT_LABEL "multibit"
#define FORMAT_TAG "$multibit$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define ALGORITHM_NAME "MD5 AES 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1001
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(uint32_t)
#define PLAINTEXT_LENGTH 125
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests multibit_tests[] = {
// Wallets created by MultiBit Classic 0.5.18
{"$multibit$1*0908a1bd44147709*c82b6d0409c1e46a4660ea6d4fa9ae12e4e234c98a71a51ced105c7e66a57ca3", "openwall"},
{"$multibit$1*2043ebb14b6d9670*24284a38a62b6a63fb0912ebc05aa9d26d6fd828134d20b9778d8d841f65f584", "openwall123"},
// MultiBit HD wallet 0.5.0
{"$multibit$2*081e3a1252c26731120d0d63783ae46f*8354d5b454e78fb15f81c9e6289ba9b8*081e3a1252c26731120d0d63783ae46f", "openwall"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked, cracked_count;
static struct custom_salt {
uint32_t type;
unsigned char salt[16];
unsigned char block[32];
unsigned char iv[16];
unsigned char block2[16];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_num_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt);
cracked_count = self->params.max_keys_per_crypt;
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
int value, extra;
if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += TAG_LENGTH;
if ((p = strtokm(ctcopy, "*")) == NULL) // type
goto err;
if (!isdec(p))
goto err;
value = atoi(p);
if (value != 1 && value != 2)
goto err;
if (value == 1) {
if ((p = strtokm(NULL, "*")) == NULL) // salt
goto err;
if (hexlenl(p, &extra) != 8 * 2 || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) // encrypted blocks
goto err;
if (hexlenl(p, &extra) != 32 * 2 || extra)
goto err;
} else if (value == 2) {
if ((p = strtokm(NULL, "*")) == NULL) // iv
goto err;
if (hexlenl(p, &extra) != 16 * 2 || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) // encrypted block with iv
goto err;
if (hexlenl(p, &extra) != 16 * 2 || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) // encrypted block with hardcoded iv
goto err;
if (hexlenl(p, &extra) != 16 * 2 || extra)
goto err;
}
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
int i;
memset(&cs, 0, SALT_SIZE);
ctcopy += TAG_LENGTH;
p = strtokm(ctcopy, "*");
cs.type = atoi(p);
p = strtokm(NULL, "*");
if (cs.type == 1) {
for (i = 0; i < 8; i++)
cs.salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])];
p = strtokm(NULL, "*");
for (i = 0; i < 32; i++)
cs.block[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])];
} else if (cs.type == 2) {
for (i = 0; i < 16; i++)
cs.iv[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])];
p = strtokm(NULL, "*");
for (i = 0; i < 16; i++)
cs.block[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])];
p = strtokm(NULL, "*");
for (i = 0; i < 16; i++)
cs.block2[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])];
}
MEM_FREE(keeptr);
return &cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static void multibit_set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1);
}
static char *get_key(int index)
{
return saved_key[index];
}
static int is_bitcoinj_protobuf_data(unsigned char *block)
{
unsigned char c;
int i;
// Does it look like a bitcoinj protobuf (newest Bitcoin for Android backup)?
if ((strncmp((const char*)block + 2, "org.", 4) == 0) && block[0] == '\x0a' && block[1] < 128) {
// If it doesn't look like a lower alpha domain name of len >= 8 (e.g. 'bitcoin.'), fail (btcrecover)
for (i = 6; i < 14; i++) {
c = block[i];
if ((c > 'z') || ((c < 'a') && ((c != '.'))))
return 0;
}
return 1; // success
}
return 0;
}
static int is_base58(unsigned char *buffer, int length)
{
unsigned char c;
int i;
for (i = 0; i < length; i++) {
c = buffer[i];
if ((c > 'z') || (c < '1') || ((c > '9') && (c < 'A')) || ((c > 'Z') && (c < 'a'))) {
return 0;
}
}
return 1; // success
}
static const unsigned char *salt_hardcoded = (unsigned char*)"\x35\x51\x03\x80\x75\xa3\xb0\xc5";
static const unsigned char *iv_hardcoded = (unsigned char*)"\xa3\x44\x39\x1f\x53\x83\x11\xb3\x29\x54\x86\x16\xc4\x89\x72\x3e";
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
memset(cracked, 0, sizeof(cracked[0]) * cracked_count);
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
unsigned char iv[16];
unsigned char key[32];
unsigned char outbuf[32 + 1];
AES_KEY aes_decrypt_key;
if (cur_salt->type == 1) {
unsigned char c;
MD5_CTX ctx;
// key
MD5_Init(&ctx);
MD5_Update(&ctx, saved_key[index], strlen(saved_key[index]));
MD5_Update(&ctx, cur_salt->salt, 8);
MD5_Final(key, &ctx);
// key + 16
MD5_Init(&ctx);
MD5_Update(&ctx, key, 16);
MD5_Update(&ctx, saved_key[index], strlen(saved_key[index]));
MD5_Update(&ctx, cur_salt->salt, 8);
MD5_Final(key + 16, &ctx);
// iv
MD5_Init(&ctx);
MD5_Update(&ctx, key + 16, 16);
MD5_Update(&ctx, saved_key[index], strlen(saved_key[index]));
MD5_Update(&ctx, cur_salt->salt, 8);
MD5_Final(iv, &ctx);
outbuf[16] = 0; // NULL terminate
AES_set_decrypt_key(key, 256, &aes_decrypt_key);
AES_cbc_encrypt(cur_salt->block, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT);
c = outbuf[0];
if (c == 'L' || c == 'K' || c == '5' || c == 'Q' || c == '\x0a' || c == '#') {
// Does it look like a base58 private key (MultiBit, MultiDoge, or oldest-format Android key backup)? (btcrecover)
if (c == 'L' || c == 'K' || c == '5' || c == 'Q') {
// check if bytes are in base58 set [1-9A-HJ-NP-Za-km-z]
if (is_base58(outbuf + 1, 15)) {
// decrypt second block
AES_cbc_encrypt(cur_salt->block + 16, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT);
if (is_base58(outbuf, 16))
cracked[index] = 1;
else
cracked[index] = 0;
} else {
cracked[index] = 0;
}
} else {
// Does it look like a KnC for Android key backup?
if (strncmp((const char*)outbuf, "# KEEP YOUR PRIV", 8) == 0) // 8 should be enough
cracked[index] = 1;
// Does it look like a bitcoinj protobuf (newest Bitcoin for Android backup)? (btcrecover)
else if (is_bitcoinj_protobuf_data(outbuf)) {
cracked[index] = 1;
}
}
}
} else if (cur_salt->type == 2) {
unsigned char key[32];
unsigned char outbuf2[16 + 1];
unsigned char iv[16];
UTF16 password[PLAINTEXT_LENGTH * 2 + 1];
outbuf2[16] = 0;
cracked[index] = 0;
enc_to_utf16_be(password, PLAINTEXT_LENGTH, (const unsigned char*)saved_key[index], strlen(saved_key[index]) + 1);
crypto_scrypt((const unsigned char*)password, (strlen16(password) + 1) * 2, salt_hardcoded, 8, 16384, 8, 1, key, 32);
// 1
AES_set_decrypt_key(key, 128 * 2, &aes_decrypt_key);
memcpy(iv, cur_salt->iv, 16);
AES_cbc_encrypt(cur_salt->block, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT);
if (is_bitcoinj_protobuf_data(outbuf))
cracked[index] = 1;
// 2
AES_set_decrypt_key(key, 128 * 2, &aes_decrypt_key);
memcpy(iv, iv_hardcoded, 16);
AES_cbc_encrypt(cur_salt->block2, outbuf2, 16, &aes_decrypt_key, iv, AES_DECRYPT);
if (is_bitcoinj_protobuf_data(outbuf2))
cracked[index] = 1;
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_multibit = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT,
{ NULL },
{ FORMAT_TAG },
multibit_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
multibit_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_binop__max_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__max_int32
// A.*B function (eWiseMult): GB_AemultB__max_int32
// A*D function (colscale): GB_AxD__max_int32
// D*A function (rowscale): GB_DxB__max_int32
// C+=B function (dense accum): GB_Cdense_accumB__max_int32
// C+=b function (dense accum): GB_Cdense_accumb__max_int32
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__max_int32
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__max_int32
// C=scalar+B GB_bind1st__max_int32
// C=scalar+B' GB_bind1st_tran__max_int32
// C=A+scalar GB_bind2nd__max_int32
// C=A'+scalar GB_bind2nd_tran__max_int32
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = GB_IMAX (aij, bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IMAX (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_INT32 || GxB_NO_MAX_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__max_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__max_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__max_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__max_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__max_int32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__max_int32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__max_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__max_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__max_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = Bx [p] ;
Cx [p] = GB_IMAX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__max_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = Ax [p] ;
Cx [p] = GB_IMAX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = GB_IMAX (x, aij) ; \
}
GrB_Info GB_bind1st_tran__max_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = GB_IMAX (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__max_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.