source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
openmp-unsupported.c | // RUN: %clang_cc1 -triple i386-apple-darwin10 -analyze -analyzer-checker=core.builtin -fopenmp -verify %s
// expected-no-diagnostics
void openmp_parallel_crash_test() {
#pragma omp parallel
;
#pragma omp parallel for
for (int i = 0; i < 8; ++i)
for (int j = 0, k = 0; j < 8; ++j)
;
}
|
dynamics.h | /*
* Copyright (C) 2016 Leo Fang <leofang@phy.duke.edu>
*
* This program is free software. It comes without any warranty,
* to the extent permitted by applicable law. You can redistribute
* it and/or modify it under the terms of the WTFPL, Version 2, as
* published by Sam Hocevar. See the accompanying LICENSE file or
* http://www.wtfpl.net/ for more details.
*/
#ifndef __DYNAMICS_H__
#define __DYNAMICS_H__
#include <math.h>
#include "grid.h"
extern double complex W; //declared in main.c
/* Update Jun 21, 2017:
* To utilize the C99 inline specifier, it is required to put the function definitions
* in the header file which is visible to all source files, and then to provide an
* "extern inline" declaration in one and only one source file (translation unit to be
* precise). The "extern inline" specifier there can be either replaced by "extern" or
* removed completely, but it may NOT be replaced by "inline" alone. For further
* discussions, see the references below:
* http://www.greenend.org.uk/rjk/tech/inline.html
* https://gcc.gnu.org/onlinedocs/gcc/Inline.html
* https://stackoverflow.com/questions/6312597/
* https://gustedt.wordpress.com/2010/11/29/myth-and-reality-about-inline-in-c99/
* http://www.drdobbs.com/the-new-c-inline-functions/184401540
*/
// this function computes the average of 4 points that form a square
// j and i are the array indices psi[j][i] of the upper right corner
inline double complex square_average(int j, int i, grid * simulation)
{
if(j<1) //everything is zero below t=0
return 0;
return (simulation->psi[j-1][i-1]+simulation->psi[j-1][i]+simulation->psi[j][i-1]+simulation->psi[j][i])/4.;
}
// this function computes the average of 2 points that form a bar
// j and i are the array indices psi[j][i] of the right end
inline double complex bar_average(int j, int i, grid * simulation)
{
if(j<0) //everything is zero below t=0
return 0;
return (simulation->psi[j][i]+simulation->psi[j][i-1])/2.;
}
// this function computes the exponential wavepacket with a sharp wavefront at x=-a
// update: argument x now refer to the "unit-less" coordinates, so "true x" = x * Delta
inline double complex one_photon_exponential(double x, double k, double alpha, grid * simulation)
{
if(x>-simulation->nx/2)
return 0;
double a_g = alpha * simulation->Gamma;
return I*csqrt(a_g) * cexp( (I*k*x + 0.5*a_g*(x+0.5*simulation->nx)) * simulation->Delta );
}
//this function computes chi(x1, x2, 0)
//update: arguments x1 & x2 now refer to the "unit-less" coordinates, so "true x1" = x1 * Delta and so on
inline double complex two_photon_input(double x1, double x2, grid * simulation)
{
double complex chi = 0;
switch(simulation->init_cond)
{
//two-photon plane waves
case 1: { chi = cexp( I * simulation->k * (x1+x2) * simulation->Delta); } break;
//two-photon exponentail wavepacket (init_cond=3)
case 3: {
if(simulation->identical_photons)
chi = one_photon_exponential(x1, simulation->k, simulation->alpha, simulation) \
* one_photon_exponential(x2, simulation->k, simulation->alpha, simulation);
else
{
chi = simulation->A / sqrt(2.) *
( one_photon_exponential(x1, simulation->k1, simulation->alpha1, simulation) \
* one_photon_exponential(x2, simulation->k2, simulation->alpha2, simulation) \
+ one_photon_exponential(x2, simulation->k1, simulation->alpha1, simulation) \
* one_photon_exponential(x1, simulation->k2, simulation->alpha2, simulation) );
}
} break;
//TODO: add other different inputs here
default: { /* bad input, but sanity_check ensures we never arrives here */ }
}
return chi;
}
inline double complex chi(int j, int x1, int x2, grid * simulation)
{
double complex chi = 0;
double complex temp = 0;
double regularization_x1 = (x1==simulation->minus_a_index || x1==simulation->plus_a_index?0.5:1.0);
double regularization_x2 = (x2==simulation->minus_a_index || x2==simulation->plus_a_index?0.5:1.0);
//initial condition chi(x1, x2, 0)
if(simulation->init_cond == 1 || simulation->init_cond == 3)
chi += two_photon_input(x1-simulation->origin_index-j, x2-simulation->origin_index-j, simulation);
if( x1>=simulation->minus_a_index && j>=x1-simulation->minus_a_index)
temp += regularization_x1 * simulation->psi[j-x1+simulation->minus_a_index][x2-x1+simulation->minus_a_index];
if( x1>=simulation->plus_a_index && j>=x1-simulation->plus_a_index)
temp -= regularization_x1 * simulation->psi[j-x1+simulation->plus_a_index][x2-x1+simulation->plus_a_index];
if( x2>=simulation->minus_a_index && j>=x2-simulation->minus_a_index)
temp += regularization_x2 * simulation->psi[j-x2+simulation->minus_a_index][x1-x2+simulation->minus_a_index];
if( x2>=simulation->plus_a_index && j>=x2-simulation->plus_a_index)
temp -= regularization_x2 * simulation->psi[j-x2+simulation->plus_a_index][x1-x2+simulation->plus_a_index];
chi -= sqrt(simulation->Gamma)/2.0 * temp;
return chi;
}
//this function calculates \int dx |\psi(x,t)|^2
inline double psi_square_integral(int j, grid * simulation)
{
double sum = 0;
double Lambda = 0;
double t = j*simulation->Delta;
switch(simulation->init_cond)
{
case 2: {// = e^(-alpha*Gamma*t)|e1(t)|^2 + \int_{-a}^\infty dx |psi(x,t)|^2
if(j==0) return 1.0;
Lambda += exp(-simulation->alpha*simulation->Gamma*t);
Lambda *= pow(cabs(simulation->e1[j]), 2.0);
}
break;
case 3: {
if(j==0) return 0.0;
if(simulation->identical_photons)
{
Lambda += exp(-simulation->alpha*simulation->Gamma*t);
Lambda *= 2.0 * pow(cabs(simulation->e0[j]), 2.0);
}
else
{
Lambda += exp(-simulation->alpha1 * simulation->Gamma * t) * pow(cabs(simulation->e0_2[j]), 2.0);
Lambda += exp(-simulation->alpha2 * simulation->Gamma * t) * pow(cabs(simulation->e0_1[j]), 2.0);
double complex temp;
temp = I*sqrt(simulation->alpha1*simulation->alpha2)*simulation->Gamma*conj(simulation->e0_2[j])*simulation->e0_1[j]
*cexp( (I*(simulation->k1-simulation->k2)-0.5*(simulation->alpha1+simulation->alpha2)*simulation->Gamma) * t)
/(simulation->k1-simulation->k2+0.5*I*(simulation->alpha1+simulation->alpha2)*simulation->Gamma);
Lambda += 2.0 * creal(temp);
Lambda *= simulation->A * simulation->A;
}
}
break;
default: { /* bad input, but sanity_check ensures we never arrives here */ }
}
int xmax = j + simulation->plus_a_index;
//trapezoidal rule for x>=-a
sum += 0.5 * pow(cabs(simulation->psi[j][simulation->minus_a_index]), 2.0);
#if _OPENMP>=201307 //simd construct begins v4.0
#pragma omp parallel for simd reduction(+:sum) //reduction EXPERIMENTAL!!!
#else
#pragma omp parallel for reduction(+:sum) //reduction EXPERIMENTAL!!!
#endif
for(int i = simulation->minus_a_index+1; i<xmax; i++)
sum += pow(cabs(simulation->psi[j][i]), 2.0);
sum += 0.5 * pow(cabs(simulation->psi[j][xmax]), 2.0);
Lambda += simulation->Delta * sum;
return Lambda;
}
//this function calculates 2*\int_{-a}^{+a}dx1 \int_{+a}^{+L}dx2 |\chi(x1, x2, t)|^2
//the window L here is NOT the same as that in check_normalization and save_chi_map
//EXPERIMENTAL!!!!
inline double chi_square_double_integral(int j, grid * simulation)
{
//determine the map size
int L = simulation->Nx-simulation->nx;
double abs_chi_square = 0;
//compute the desired integral at t=j*Delta
//be careful: x1 & x2 here are array indices!!!
#pragma omp parallel for collapse(2), reduction(+:abs_chi_square)
for(int x2=simulation->plus_a_index; x2<=simulation->origin_index+L; x2++)
{
for(int x1=simulation->minus_a_index; x1<=simulation->plus_a_index; x1++)
{
double abs_chi = cabs(chi(j, x1, x2, simulation));
double trapezoidal_2D = 1.0;
if(x1==simulation->minus_a_index || x1==simulation->plus_a_index)
trapezoidal_2D *= 0.5;
if(x2==simulation->origin_index+L || x2==simulation->plus_a_index)
trapezoidal_2D *= 0.5;
abs_chi_square += trapezoidal_2D*abs_chi*abs_chi;
}
}
abs_chi_square *= 2.0 * simulation->Delta * simulation->Delta;
return abs_chi_square;
}
//this function calculates 2*\int_{-L}^{+L}dx2 |\chi(x1, x2, t)|^2
//the window L here is same as that in check_normalization and save_chi_map
inline double chi_square_single_integral(int j, int x1, grid * simulation)
{
//determine the map size
//int L = simulation->Nx-simulation->nx;
int L = (int)ceil(simulation->Nx/2.0-simulation->nx/4.0);
double abs_chi_square = 0;
//compute the desired integral at t=j*Delta
//be careful: x1 & x2 here are array indices!!!
#pragma omp parallel for reduction(+:abs_chi_square)
for(int x2=simulation->origin_index-L; x2<=simulation->origin_index+L; x2++)
{
double trapezoidal_1D = 0;
if(x2==simulation->origin_index-L || x2==simulation->origin_index+L)
trapezoidal_1D = 0.5;
else
trapezoidal_1D = 1.0;
double abs_chi = cabs(chi(j, x1, x2, simulation));
abs_chi_square += trapezoidal_1D*abs_chi*abs_chi;
}
abs_chi_square *= 2.0 * simulation->Delta;
return abs_chi_square;
}
//this function calculates 2*\int_{-L}^{+L}dx2 \chi^*(x1, x2, t)\chi(-x1, x2, t)
//the window L here is same as that in check_normalization and save_chi_map
inline double complex chi_square_single_integral_mirrored(int j, int x1, grid * simulation)
{
//determine the map size
//int L = simulation->Nx-simulation->nx;
int L = (int)ceil(simulation->Nx/2.0-simulation->nx/4.0);
int x1_m = 2*simulation->origin_index - x1; //mirrored position
double complex chi_product = 0;
//compute the desired integral at t=j*Delta
//be careful: x1 & x2 here are array indices!!!
#pragma omp parallel for reduction(+:chi_product)
for(int x2=simulation->origin_index-L; x2<=simulation->origin_index+L; x2++)
{
double trapezoidal_1D = 0;
if(x2==simulation->origin_index-L || x2==simulation->origin_index+L)
trapezoidal_1D = 0.5;
else
trapezoidal_1D = 1.0;
chi_product += trapezoidal_1D*conj(chi(j, x1, x2, simulation))*chi(j, x1_m, x2, simulation);
}
chi_product *= 2.0 * simulation->Delta;
return chi_product;
}
//core of FDTD; orginally written in main()
//#ifndef __FDTD_PTHREAD_SUPPORT__
#if _OPENMP>=201307 //simd construct begins v4.0
#pragma omp declare simd uniform(simulation)
#endif
//#endif
inline void solver(int j, int i, grid * simulation)
{
//points (i) right next to the 1st light cone and in tile B1,
//and (ii) right next to the 2nd light cone
//should be strictly zero under any circumstances
if( ((j < simulation->nx) && (i==j+simulation->minus_a_index+1))
|| (i==j+simulation->plus_a_index+1) )
return;
//continue; //do nothing, as psi is already zero initailized
//#ifdef _OPENMP
//if( i<simulation->nx+1 || i>= simulation->Ntotal)
// return; //beyond the grid range
//#endif
//free propagation (decay included)
simulation->psi[j][i] = (1./simulation->Delta-0.25*W)*simulation->psi[j-1][i-1] \
-0.25*W*(simulation->psi[j-1][i]+simulation->psi[j][i-1]);
//delay term: psi(x-2a, t-2a)theta(t-2a)
if(j>simulation->nx)
simulation->psi[j][i] += 0.5*simulation->Gamma*square_average(j-simulation->nx, i-simulation->nx, simulation);
//left light cone No.1: psi(-x-2a, t-x-a)theta(x+a)theta(t-x-a)
if( (i>simulation->minus_a_index) && (j-i>=-simulation->minus_a_index) )
{
double on_light_cone = (j-i == -simulation->minus_a_index?0.5:1.0);
simulation->psi[j][i] -= 0.5*simulation->Gamma*bar_average(j-(i-simulation->origin_index)-simulation->nx/2, \
2*simulation->origin_index-i-simulation->nx+1, simulation)*on_light_cone;
}
//left light cone No.2: -psi(-x, t-x-a)theta(x+a)theta(t-x-a)
if( (i>simulation->minus_a_index) && (j-i>=-simulation->minus_a_index) )
{
double on_light_cone = (j-i == -simulation->minus_a_index?0.5:1.0);
simulation->psi[j][i] += 0.5*simulation->Gamma*bar_average(j-(i-simulation->origin_index)-simulation->nx/2, \
2*simulation->origin_index-i+1, simulation)*on_light_cone;
}
//right light cone No.1: psi(2a-x, t-x+a)theta(x-a)theta(t-x+a)
if( (i>simulation->plus_a_index) && (j-i>=-simulation->plus_a_index) )
{
double on_light_cone = (j-i == -simulation->plus_a_index?0.5:1.0);
simulation->psi[j][i] -= 0.5*simulation->Gamma*bar_average(j-(i-simulation->origin_index)+simulation->nx/2, \
2*simulation->origin_index-i+simulation->nx+1, simulation)*on_light_cone;
}
//right light cone No.2: -psi(-x, t-x+a)theta(x-a)theta(t-x+a)
if( (i>simulation->plus_a_index) && (j-i>=-simulation->plus_a_index) )
{
double on_light_cone = (j-i == -simulation->plus_a_index?0.5:1.0);
simulation->psi[j][i] += 0.5*simulation->Gamma*bar_average(j-(i-simulation->origin_index)+simulation->nx/2, \
2*simulation->origin_index-i+1, simulation)*on_light_cone;
}
//two-photon input: 2*( chi(x-t,-a-t, 0)-chi(x-t,a-t,0) )
if( (simulation->init_cond == 1 || simulation->init_cond == 3) \
&& j-i>=-simulation->minus_a_index ) //it's nonzero only when t-x-a>=0
{
double on_light_cone = (j-i == -simulation->minus_a_index?0.5:1.0);
//shift +0.5 due to Taylor expansion at the center of square
simulation->psi[j][i] += sqrt(simulation->Gamma) * on_light_cone \
* two_photon_input((i-simulation->origin_index)-j, -simulation->nx/2-j+0.5, simulation);
if(j>simulation->nx)
{
simulation->psi[j][i] -= sqrt(simulation->Gamma) * on_light_cone \
* two_photon_input((i-simulation->origin_index)-j, simulation->nx/2-j+0.5, simulation);
}
}
//prefactor
simulation->psi[j][i] /= (1./simulation->Delta+0.25*W);
}
#endif
|
GB_unop__sqrt_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__sqrt_fc32_fc32
// op(A') function: GB_unop_tran__sqrt_fc32_fc32
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = csqrtf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = csqrtf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = csqrtf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SQRT || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__sqrt_fc32_fc32
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = csqrtf (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__sqrt_fc32_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bounds.c | /******************************************************************************
* *
* BOUNDS.C *
* *
* PHYSICAL BOUNDARY CONDITIONS *
* *
******************************************************************************/
#include "decs.h"
void inflow_check(double *Pr, int ii, int jj, int type);
void bound_prim(grid_prim_type prim) {
timer_start(TIMER_BOUND);
if (global_start[1] == 0) {
#pragma omp parallel for collapse(2)
JSLOOP(0, N2 - 1) {
KSLOOP(0, N3 - 1) {
ISLOOP(-NG, -1) {
#if N1 < NG
int iactive = NG;
PLOOP prim[i][j][k][ip] = prim[iactive][j][k][ip];
pflag[i][j][k] = pflag[iactive][j][k];
#else
{
#if X1L_GAS_BOUND == BC_OUTFLOW
int iactive = NG;
PLOOP prim[i][j][k][ip] = prim[iactive][j][k][ip];
pflag[i][j][k] = pflag[iactive][j][k];
double rescale_fac =
ggeom[iactive][j][CENT].g / ggeom[i][j][CENT].g;
prim[i][j][k][B1] *= rescale_fac;
prim[i][j][k][B2] *= rescale_fac;
prim[i][j][k][B3] *= rescale_fac;
#elif X1L_GAS_BOUND == BC_PROB
bound_gas_prob_x1l(i, j, k, prim);
#elif X1L_GAS_BOUND != BC_PERIODIC
printf("X1L_GAS_BOUND choice %i not supported\n", X1L_GAS_BOUND);
exit(-1);
#endif
}
#endif
}
}
}
} // global_start[1] == 0
sync_mpi_boundaries_X1L(prim);
if (global_stop[1] == N1TOT) {
#pragma omp parallel for collapse(2)
JSLOOP(0, N2 - 1) {
KSLOOP(0, N3 - 1) {
ISLOOP(N1, N1 - 1 + NG) {
#if N1 < NG
int iactive = N1 - 1 + NG;
PLOOP prim[i][j][k][ip] = prim[iactive][j][k][ip];
pflag[i][j][k] = pflag[iactive][j][k];
#else
{
#if X1R_GAS_BOUND == BC_OUTFLOW
int iactive = N1 - 1 + NG;
PLOOP prim[i][j][k][ip] = prim[iactive][j][k][ip];
pflag[i][j][k] = pflag[iactive][j][k];
double rescale_fac =
ggeom[iactive][j][CENT].g / ggeom[i][j][CENT].g;
prim[i][j][k][B1] *= rescale_fac;
prim[i][j][k][B2] *= rescale_fac;
prim[i][j][k][B3] *= rescale_fac;
#elif X1R_GAS_BOUND == BC_PROB
bound_gas_prob_x1r(i, j, k, prim);
#elif X1R_GAS_BOUND != BC_PERIODIC
printf("X1R_GAS_BOUND choice %i not supported\n", X1R_GAS_BOUND);
exit(-1);
#endif
}
#endif
}
}
}
} // global_stop[1] == N1TOT
sync_mpi_boundaries_X1R(prim);
if (global_start[2] == 0) {
#pragma omp parallel for collapse(2)
ILOOPALL {
KSLOOP(0, N3 - 1) {
JSLOOP(-NG, -1) {
#if N2 < NG
int jactive = NG;
PLOOP prim[i][j][k][ip] = prim[i][jactive][k][ip];
pflag[i][j][k] = pflag[i][jactive][k];
#else
{
#if X2L_GAS_BOUND == BC_OUTFLOW
int jactive = NG;
PLOOP prim[i][j][k][ip] = prim[i][jactive][k][ip];
pflag[i][j][k] = pflag[i][jactive][k];
#elif X2L_GAS_BOUND == BC_POLAR
int jactive = -j + 2 * NG - 1;
PLOOP prim[i][j][k][ip] = prim[i][jactive][k][ip];
pflag[i][j][k] = pflag[i][jactive][k];
prim[i][j][k][U2] *= -1.;
prim[i][j][k][B2] *= -1.;
#elif X2L_GAS_BOUND == BC_PROB
printf("X2L_GAS_BOUND choice BC_PROB not supported\n");
exit(-1);
#elif X2L_GAS_BOUND != BC_PERIODIC
printf("X2L_GAS_BOUND choice %i not supported\n", X2L_GAS_BOUND);
exit(-1);
#endif
}
#endif
}
}
}
} // global_start[2] == 0
sync_mpi_boundaries_X2L(prim);
if (global_stop[2] == N2TOT) {
#pragma omp parallel for collapse(2)
ILOOPALL {
KSLOOP(0, N3 - 1) {
JSLOOP(N2, N2 - 1 + NG) {
#if N2 < NG
int jactive = N2 - 1 + NG;
PLOOP prim[i][j][k][ip] = prim[i][jactive][k][ip];
pflag[i][j][k] = pflag[i][jactive][k];
#else
{
#if X2R_GAS_BOUND == BC_OUTFLOW
int jactive = N2 - 1 + NG;
PLOOP prim[i][j][k][ip] = prim[i][jactive][k][ip];
pflag[i][j][k] = pflag[i][jactive][k];
#elif X2R_GAS_BOUND == BC_POLAR
int jactive = -j + 2 * (N2 + NG) - 1;
PLOOP prim[i][j][k][ip] = prim[i][jactive][k][ip];
pflag[i][j][k] = pflag[i][jactive][k];
prim[i][j][k][U2] *= -1.;
prim[i][j][k][B2] *= -1.;
#elif X2R_GAS_BOUND == BC_PROB
printf("X2R_GAS_BOUND choice BC_PROB not supported\n");
exit(-1);
#elif X2R_GAS_BOUND != BC_PERIODIC
printf("X2R_GAS_BOUND choice %i not supported\n", X2R_GAS_BOUND);
exit(-1);
#endif
}
#endif
}
}
}
} // global_stop[2] == N2TOT
sync_mpi_boundaries_X2R(prim);
if (global_start[3] == 0) {
#pragma omp parallel for collapse(2)
ILOOPALL {
JLOOPALL {
KSLOOP(-NG, -1) {
#if N3 < NG
int kactive = NG;
PLOOP prim[i][j][k][ip] = prim[i][j][kactive][ip];
pflag[i][j][k] = pflag[i][j][kactive];
#else
{
#if X3L_GAS_BOUND == BC_OUTFLOW
int kactive = NG;
PLOOP prim[i][j][k][ip] = prim[i][j][kactive][ip];
pflag[i][j][k] = pflag[i][j][kactive];
#elif X3L_GAS_BOUND == BC_PROB
printf("X3L_GAS_BOUND choice BC_PROB not supported\n");
exit(-1);
#elif X3L_GAS_BOUND != BC_PERIODIC
printf("X3L_GAS_BOUND choice %i not supported\n", X3L_GAS_BOUND);
exit(-1);
#endif
}
#endif
}
}
}
} // global_start[3] == 0
sync_mpi_boundaries_X3L(prim);
if (global_stop[3] == N3TOT) {
#pragma omp parallel for collapse(2)
ISLOOP(-NG, N1 - 1 + NG) {
JSLOOP(-NG, N2 - 1 + NG) {
KSLOOP(N3, N3 - 1 + NG) {
#if N3 < NG
int kactive = N3 - 1 + NG;
PLOOP prim[i][j][k][ip] = prim[i][j][kactive][ip];
pflag[i][j][k] = pflag[i][j][kactive];
#else
{
#if X3R_GAS_BOUND == BC_OUTFLOW
int kactive = NG;
PLOOP prim[i][j][k][ip] = prim[i][j][kactive][ip];
pflag[i][j][k] = pflag[i][j][kactive];
#elif X3R_GAS_BOUND == BC_PROB
printf("X3R_GAS_BOUND choice BC_PROB not supported\n");
exit(-1);
#elif X3R_GAS_BOUND != BC_PERIODIC
printf("X3R_GAS_BOUND choice %i not supported\n", X3R_GAS_BOUND);
exit(-1);
#endif
}
#endif
}
}
}
} // global_stop[3] == N3TOT
sync_mpi_boundaries_X3R(prim);
#if METRIC == MKS
if (global_start[1] == 0 && X1L_INFLOW == 0) {
// Make sure there is no inflow at the inner boundary
#pragma omp parallel for collapse(2)
ISLOOP(-NG, -1) {
JLOOPALL {
KLOOPALL { inflow_check(prim[i][j][k], i, j, 0); }
}
}
}
if (global_stop[1] == N1TOT && X1R_INFLOW == 0) {
// Make sure there is no inflow at the outer boundary
#pragma omp parallel for collapse(2)
ISLOOP(N1, N1 - 1 + NG) {
JLOOPALL {
KLOOPALL { inflow_check(prim[i][j][k], i, j, 1); }
}
}
}
#endif
timer_stop(TIMER_BOUND);
}
#if METRIC == MKS
void inflow_check(double *Pr, int ii, int jj, int type) {
struct of_geom *geom;
double ucon[NDIM];
double alpha, beta1, gamma, vsq;
geom = get_geometry(ii, jj, 0, CENT);
ucon_calc(Pr, geom, ucon);
if (((ucon[1] > 0.) && (type == 0)) || ((ucon[1] < 0.) && (type == 1))) {
// Find gamma and remove it from primitives
if (mhd_gamma_calc(Pr, geom, &gamma)) {
fprintf(stderr, "\ninflow_check(): gamma failure\n");
fail(FAIL_GAMMA);
}
Pr[U1] /= gamma;
Pr[U2] /= gamma;
Pr[U3] /= gamma;
alpha = geom->alpha;
beta1 = geom->gcon[0][1] * alpha * alpha;
// Reset radial velocity so radial 4-velocity is zero
Pr[U1] = beta1 / alpha;
// Now find new gamma and put it back in
vsq = 0.;
for (int mu = 1; mu < NDIM; mu++) {
for (int nu = 1; nu < NDIM; nu++) {
vsq += geom->gcov[mu][nu] * Pr[U1 + mu - 1] * Pr[U1 + nu - 1];
}
}
if (fabs(vsq) < 1.e-13)
vsq = 1.e-13;
if (vsq >= 1.) {
vsq = 1. - 1. / (GAMMAMAX * GAMMAMAX);
}
gamma = 1. / sqrt(1. - vsq);
Pr[U1] *= gamma;
Pr[U2] *= gamma;
Pr[U3] *= gamma;
}
}
void fix_flux(grid_prim_type F1, grid_prim_type F2, grid_prim_type F3) {
if (global_start[1] == 0 && X1L_INFLOW == 0) {
#pragma omp parallel for collapse(2)
JLOOPALL {
KLOOPALL {
// JSLOOP(0, N2-1) {
// KSLOOP(0, N3-1) {
F1[0 + NG][j][k][RHO] = MY_MIN(F1[0 + NG][j][k][RHO], 0.);
}
}
}
if (global_stop[1] == N1TOT && X1R_INFLOW == 0) {
#pragma omp parallel for collapse(2)
JLOOPALL {
KLOOPALL {
// JSLOOP(0, N2-1) {
// KSLOOP(0, N3-1) {
F1[N1 + NG][j][k][RHO] = MY_MAX(F1[N1 + NG][j][k][RHO], 0.);
}
}
}
if (global_start[2] == 0 && X2L_INFLOW == 0) {
#pragma omp parallel for collapse(2)
ILOOPALL {
KLOOPALL {
// ISLOOP(0, N1-1) {
// KSLOOP(0, N3-1) {
F1[i][-1 + NG][k][B2] = -F1[i][0 + NG][k][B2];
F3[i][-1 + NG][k][B2] = -F3[i][0 + NG][k][B2];
PLOOP F2[i][0 + NG][k][ip] = 0.;
}
}
}
if (global_stop[2] == N2TOT && X2R_INFLOW == 0) {
#pragma omp parallel for collapse(2)
ILOOPALL {
KLOOPALL {
// ISLOOP(0, N1-1) {
// KSLOOP(0, N3-1) {
F1[i][N2 + NG][k][B2] = -F1[i][N2 - 1 + NG][k][B2];
F3[i][N2 + NG][k][B2] = -F3[i][N2 - 1 + NG][k][B2];
PLOOP F2[i][N2 + NG][k][ip] = 0.;
}
}
}
}
#endif // METRIC
#if RADIATION
void bound_superphotons(grid_prim_type P, double t, double dt) {
timer_start(TIMER_BOUND);
int step_lost_local = 0;
int step_tot_local = 0;
int tracer_tot_local = 0;
/*double ephot_beg = 0.;
int ns_beg = 0;
#pragma omp parallel reduction(+:ephot_beg) reduction(+:ns_beg)
{
struct of_photon *ph = photon_lists[omp_get_thread_num()];
while (ph != NULL) {
ephot_beg -= ph->w*kphys_to_num*ph->Kcov[2][0];
if (ph->w > 0.)
ns_beg += 1;
ph = ph->next;
}
}*/
int n_to_send = 0;
#pragma omp parallel reduction(+:step_lost_local) \
reduction(+:step_tot_local) reduction(+:n_to_send) \
reduction(+:tracer_tot_local)
{
double X[NDIM], Kcov[NDIM], Kcon[NDIM];
struct of_photon *ph, *head, *prev;
ph = photon_lists[omp_get_thread_num()];
prev = NULL;
head = ph;
while (ph != NULL) {
int status = get_X_K_interp(ph, t + dt, P, X, Kcov, Kcon);
// Test whether superphoton inactive at t = t + dt
int active_bc = bound_rad_isactive(X, ph);
bound_rad_transport(X, ph, 0);
int active_error = rad_error_check(&ph);
// Update global log variables
// rad_boundary_log(ph);
if (!active_bc || !active_error || status == SPH_INTERP_FAIL) {
#if RADIATION == RADTYPE_NEUTRINOS
record_lepton_flux(ph);
#endif
step_lost_local++;
step_tot_local--;
if (ph->type == TYPE_TRACER)
tracer_tot_local--;
list_remove(&ph, &head, &prev);
continue;
}
if (rad_mpi_transport(&ph, &head, &prev, X, active_bc)) {
n_to_send++;
continue;
}
prev = ph;
ph = ph->next;
}
photon_lists[omp_get_thread_num()] = head;
} // omp parallel
step_lost += step_lost_local;
step_tot += step_tot_local;
tracer_tot += tracer_tot_local;
// Reduce MPI-ready superphotons across threads
struct of_photon *ph_mpi = NULL, *ph = NULL;
for (int n = 0; n < nthreads; n++) {
if (photon_mpi_lists[n] != NULL) {
ph = photon_mpi_lists[n];
while (ph != NULL) {
swap_ph(&ph, &ph_mpi);
}
photon_mpi_lists[n] = NULL;
}
}
sync_mpi_photons(&ph_mpi, P, t, dt);
sync_radG();
sync_Jrad();
sync_radtype_vec(Nem_phys);
sync_radtype_vec(Nabs_phys);
timer_stop(TIMER_BOUND);
}
int rad_error_check(struct of_photon **ph) {
int active = 1;
if ((*ph)->w < SMALL || (*ph)->Kcov[2][0] > 0.)
active = 0;
return active;
}
// Return 1 if superphoton should be communicated over MPI
int rad_mpi_transport(struct of_photon **ph, struct of_photon **head,
struct of_photon **prev, double X[NDIM], int active) {
// Only transport superphotons active at X[0] = t + dt
if (!active)
return 0;
if (((X[1] < startx_proc[1] || X[1] > stopx_proc[1]) && N1CPU > 1) ||
((X[2] < startx_proc[2] || X[2] > stopx_proc[2]) && N2CPU > 1) ||
((X[3] < startx_proc[3] || X[3] > stopx_proc[3]) && N3CPU > 1)) {
struct of_photon *next = (*ph)->next;
(*ph)->next = photon_mpi_lists[omp_get_thread_num()];
photon_mpi_lists[omp_get_thread_num()] = *ph;
if (*prev != NULL) {
(*prev)->next = next;
*ph = (*prev)->next;
} else {
(*head) = next;
*ph = *head;
}
return 1;
} else {
return 0;
}
}
#define DX2MIN (0.05)
// Replace superphoton with one headed away from polar boundary
void polar_fix(double X[NDIM], struct of_photon *ph) {
#if METRIC == MKS
if (X[2] < DX2MIN || X[2] > 1. - DX2MIN) {
printf("POLAR FIXUP!\n");
printf("BEFORE:\n");
for (int mu = 0; mu < NDIM; mu++) {
printf("[%i] X[][] = %e %e %e\n", mu, ph->X[0][mu], ph->X[1][mu],
ph->X[2][mu]);
printf("[%i] Kcon[][] = %e %e %e\n", mu, ph->Kcon[0][mu], ph->Kcon[1][mu],
ph->Kcon[2][mu]);
printf("[%i] Kcov[][] = %e %e %e\n", mu, ph->Kcov[0][mu], ph->Kcov[1][mu],
ph->Kcov[2][mu]);
printf("KDOTK = %e\n", ph->Kcon[2][0] * ph->Kcov[2][0] +
ph->Kcon[2][1] * ph->Kcov[2][1] +
ph->Kcon[2][2] * ph->Kcov[2][2] +
ph->Kcon[2][3] * ph->Kcov[2][3]);
}
ph->t0 = ph->X[1][0];
for (int mu = 1; mu < NDIM; mu++) {
ph->Kcon[1][mu] *= -1.;
ph->Kcov[1][mu] *= -1.;
}
for (int mu = 0; mu < NDIM; mu++) {
ph->X[2][mu] = ph->X[1][mu];
ph->Kcon[2][mu] = ph->Kcon[1][mu];
ph->Kcov[2][mu] = ph->Kcov[1][mu];
ph->X[0][mu] = ph->X[1][mu];
ph->Kcon[0][mu] = ph->Kcon[1][mu];
ph->Kcov[0][mu] = ph->Kcov[1][mu];
}
printf("AFTER:\n");
for (int mu = 0; mu < NDIM; mu++) {
printf("[%i] X[][] = %e %e %e\n", mu, ph->X[0][mu], ph->X[1][mu],
ph->X[2][mu]);
printf("[%i] Kcon[][] = %e %e %e\n", mu, ph->Kcon[0][mu], ph->Kcon[1][mu],
ph->Kcon[2][mu]);
printf("[%i] Kcov[][] = %e %e %e\n", mu, ph->Kcov[0][mu], ph->Kcov[1][mu],
ph->Kcov[2][mu]);
printf("KDOTK = %e\n", ph->Kcon[2][0] * ph->Kcov[2][0] +
ph->Kcon[2][1] * ph->Kcov[2][1] +
ph->Kcon[2][2] * ph->Kcov[2][2] +
ph->Kcon[2][3] * ph->Kcov[2][3]);
}
}
#endif // METRIC
}
#undef DX2MIN
// If X[] (at t+dt) calls for boundary transport, modify both current and
// previous X^{\mu}, K_{\mu}
void bound_rad_transport(
double X[NDIM], struct of_photon *ph, int is_transported) {
if (X[1] < startx_rad[1]) {
#if X1L_RAD_BOUND == BC_PERIODIC
if (N1CPU == 1 || is_transported) {
for (int n = 0; n < NSUP; n++) {
ph->X[n][1] = stopx_rad[1] - (startx_rad[1] - ph->X[n][1]);
}
}
#endif
}
if (X[1] > stopx_rad[1]) {
#if X1R_RAD_BOUND == BC_PERIODIC
if (N1CPU == 1 || is_transported) {
for (int n = 0; n < NSUP; n++) {
ph->X[n][1] = startx_rad[1] + (ph->X[n][1] - stopx_rad[1]);
}
}
#endif
}
if (X[2] < startx_rad[2]) {
#if X2L_RAD_BOUND == BC_PERIODIC
if (N2CPU == 1 || is_transported) {
for (int n = 0; n < NSUP; n++) {
ph->X[n][2] = stopx_rad[2] - (startx_rad[2] - ph->X[n][2]);
}
}
#endif
}
if (X[2] > stopx_rad[2]) {
#if X2R_RAD_BOUND == BC_PERIODIC
if (N2CPU == 1 || is_transported) {
for (int n = 0; n < NSUP; n++) {
ph->X[n][2] = startx_rad[2] + (ph->X[n][2] - stopx_rad[2]);
}
}
#endif
}
if (X[3] < startx_rad[3]) {
#if X3L_RAD_BOUND == BC_PERIODIC
if (N3CPU == 1 || is_transported) {
for (int n = 0; n < NSUP; n++) {
ph->X[n][3] = stopx_rad[3] - (startx_rad[3] - ph->X[n][3]);
}
}
#endif
}
if (X[3] > stopx_rad[3]) {
#if X3R_RAD_BOUND == BC_PERIODIC
if (N3CPU == 1 || is_transported) {
for (int n = 0; n < NSUP; n++) {
ph->X[n][3] = startx_rad[3] + (ph->X[n][3] - stopx_rad[3]);
}
}
#endif
}
}
int bound_rad_isactive(double X[NDIM], struct of_photon *ph) {
int active = 1;
// Polar fix, to prevent challenging MPI transports and geodesic explosions
#if METRIC == MKS
double th = th_of_X(X) * 180. / M_PI;
// if (N3TOT > 1 && (th < 3. || th > 177.)) {
if (th < 3. || th > 177.) {
active = 0;
}
#endif
if (X[1] < startx_rad[1]) {
#if X1L_RAD_BOUND == BC_PERIODIC
#elif X1L_RAD_BOUND == BC_ESCAPE
active = 0;
#elif X1L_RAD_BOUND == BC_EQUILIB
printf("X1L_RAD_BOUND BC_EQUILIB not supported\n");
exit(-1);
#elif X1L_RAD_BOUND == BC_CAMERA
printf("X1L_RAD_BOUND BC_CAMERA not supported\n");
exit(-1);
#else
printf("X1L_RAD_BOUND %i not supported\n", X1L_RAD_BOUND);
exit(-1);
#endif
}
if (X[1] > stopx_rad[1] && ph->type != TYPE_TRACER) {
#if X1R_RAD_BOUND == BC_PERIODIC
#elif X1R_RAD_BOUND == BC_ESCAPE
active = 0;
#elif X1R_RAD_BOUND == BC_REQUILIB
printf("X1R_RAD_BOUND BC_EQUILIB not supported\n");
exit(-1);
#elif X1R_RAD_BOUND == BC_CAMERA
#if METRIC == MINKOWSKI
printf("BC_CAMERA not supported with MINKOWSKI METRIC\n");
exit(-1);
#endif
record_superphoton(X, ph);
active = 0;
// record_super_photon -- put energy into relevant bins, dont worry about
// destroying superphoton
// printf("X1R_RAD_BOUND BC_CAMERA not supported\n");
// exit(-1);
#else
printf("X1R_RAD_BOUND %i not supported\n", X1R_RAD_BOUND);
exit(-1);
#endif
}
// Special case for tracers since their boundary is at Rout, not
// Rout_rad
if (X[1] > stopx[1] && ph->type == TYPE_TRACER) {
#if X1R_RAD_BOUND == BC_PERIODIC
#elif X1R_RAD_BOUND == BC_ESCAPE
active = 0;
#elif X1R_RAD_BOUND == BC_REQUILIB
printf("X1R_RAD_BOUND BC_EQUILIB not supported\n");
exit(-1);
#elif X1R_RAD_BOUND == BC_CAMERA
#if METRIC == MINKOWSKI
printf("BC_CAMERA not supported with MINKOWSKI METRIC\n");
exit(-1);
#endif
active = 0;
#else
printf("X1R_RAD_BOUND %i not supported\n", X1R_RAD_BOUND);
exit(-1);
#endif
}
if (X[2] < startx_rad[2]) {
#if X2L_RAD_BOUND == BC_PERIODIC
#elif X2L_RAD_BOUND == BC_ESCAPE
active = 0;
#elif X2L_RAD_BOUND == BC_EQUILIB
printf("X2L_RAD_BOUND BC_EQUILIB not supported\n");
exit(-1);
#elif X2L_RAD_BOUND == BC_CAMERA
printf("X2L_RAD_BOUND BC_CAMERA not supported\n");
exit(-1);
#else
printf("X2L_RAD_BOUND %i not supported\n", X2L_RAD_BOUND);
exit(-1);
#endif
}
if (X[2] > stopx_rad[2]) {
#if X2R_RAD_BOUND == BC_PERIODIC
#elif X2R_RAD_BOUND == BC_ESCAPE
active = 0;
#elif X2R_RAD_BOUND == BC_EQUILIB
printf("X2R_RAD_BOUND BC_EQUILIB not supported\n");
exit(-1);
#elif X2R_RAD_BOUND == BC_CAMERA
printf("X2R_RAD_BOUND BC_CAMERA not supported\n");
exit(-1);
#else
printf("X2R_RAD_BOUND %i not supported\n", X2R_RAD_BOUND);
exit(-1);
#endif
}
if (X[3] < startx_rad[3]) {
#if X3L_RAD_BOUND == BC_PERIODIC
#elif X3L_RAD_BOUND == BC_ESCAPE
active = 0;
#elif X3L_RAD_BOUND == BC_EQUILIB
printf("X3L_RAD_BOUND BC_EQUILIB not supported\n");
exit(-1);
#elif X3L_RAD_BOUND == BC_CAMERA
printf("X3L_RAD_BOUND BC_CAMERA not supported\n");
exit(-1);
#else
printf("X3L_RAD_BOUND %i not supported\n", X3L_RAD_BOUND);
exit(-1);
#endif
}
if (X[3] > stopx_rad[3]) {
#if X3R_RAD_BOUND == BC_PERIODIC
#elif X3R_RAD_BOUND == BC_ESCAPE
active = 0;
#elif X3R_RAD_BOUND == BC_EQUILIB
printf("X3R_RAD_BOUND BC_EQUILIB not supported\n");
exit(-1);
#elif X3R_RAD_BOUND == BC_CAMERA
printf("X3R_RAD_BOUND BC_CAMERA not supported\n");
exit(-1);
#else
printf("X3R_RAD_BOUND %i not supported\n", X3R_RAD_BOUND);
exit(-1);
#endif
}
return active;
}
#endif // RADIATION
|
shape.h | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
/*
* shape.h
*
* Created on: Dec 28, 2015
* Author: agibsonccc
*/
#ifndef SHAPE_H_
#define SHAPE_H_
#include <cstring>
#include <cstdio>
#include "../dll.h"
#include "../nd4jmalloc.h"
#include "../templatemath.h"
#include "../helpers/logger.h"
#include "../pointercast.h"
#include "../cnpy/cnpy.h"
#include <op_boilerplate.h>
#define MAX_DIMENSION 0x7fffffff
#define MAX_NUM_THREADS 1024
#define MAX_RANK 32
#define MAX_SHAPEINFOLENGTH 2*MAX_RANK+4
#define MAX_COORD 3
#define PREALLOC_SIZE 33554432
#ifdef __CUDACC__
#include <cuda.h>
#include <cuda_runtime.h>
#include <helpers/sharedmem.h>
#endif
#ifdef __CUDACC__
#define INLINEDEF inline
#else
#define INLINEDEF inline
#endif
#include "../pairwise_util.h"
#include <stdint.h>
#include <array/ArrayOptions.h>
typedef unsigned int uint;
namespace shape {
/**
* Shape information approximating
* the information on an ndarray
*/
struct ND4J_EXPORT ShapeInformation {
_CUDA_HD ShapeInformation(Nd4jLong *shape_ = nullptr, Nd4jLong *stride_ = nullptr, char order_ = 0, int rank_ = 0, int offset_ = 0, int elementWiseStride_ = 0)
: shape(shape_), stride(stride_), order(order_), rank(rank_), offset(offset_), elementWiseStride(elementWiseStride_)
{}
Nd4jLong *shape;
Nd4jLong *stride;
char order;
int rank;
int offset;
int elementWiseStride;
};
/**
* Indexing information
* for bounds checking
*/
struct ND4J_EXPORT CurrentIndexing {
int numElementsPerThread;
int blockStartingIndex;
int startingThreadIndex;
int endingThreadIndex;
};
ND4J_EXPORT _CUDA_HD bool shapeEquals(const int shape1Rank, const Nd4jLong *shape1, const int shape2Rank, const Nd4jLong *shape2);
ND4J_EXPORT _CUDA_HD Nd4jLong* detachShape(Nd4jLong *originalShape);
ND4J_EXPORT _CUDA_HD Nd4jLong* copyShape(Nd4jLong *originalShape);
ND4J_EXPORT _CUDA_HD bool shapeEquals(const Nd4jLong *shapeInfo1, const Nd4jLong *shapeInfo2);
ND4J_EXPORT _CUDA_HD bool strideEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2);
ND4J_EXPORT _CUDA_HD bool strideEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2);
ND4J_EXPORT _CUDA_HD bool strideEquals(Nd4jLong *stride1,int rank1,Nd4jLong *stride2,int rank2);
ND4J_EXPORT _CUDA_HD bool equalsSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB);
ND4J_EXPORT _CUDA_HD bool equalsTypesAndShapesSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB);
ND4J_EXPORT _CUDA_HD bool equalsStrict(const Nd4jLong *shapeA, const Nd4jLong *shapeB);
ND4J_EXPORT _CUDA_HD bool haveSameOffsets(const Nd4jLong *shapeA, const Nd4jLong *shapeB);
ND4J_EXPORT _CUDA_HD int sizeAt(const Nd4jLong *shape, const int dim);
template <typename T>
ND4J_EXPORT _CUDA_HD void fill(T* buffer, T value, Nd4jLong length);
ND4J_EXPORT _CUDA_HD void traceNew(int id);
ND4J_EXPORT _CUDA_HD int tadIndexForLinear(int linearIndex, int tadLength);
ND4J_EXPORT _CUDA_HD int tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength);
ND4J_EXPORT _CUDA_HD bool canReshape(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShape, bool isFOrder);
ND4J_EXPORT _CUDA_HD bool reshapeCF(const int oldRank, const Nd4jLong* oldShapeInfo, const int newRank, const Nd4jLong* newShape, const bool isFOrder, Nd4jLong* newShapeInfo);
/**
* Get the shape info buffer
* for the given rank and shape.
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *buffer);
/**
* Get the shape info buffer
* for the given rank and shape.
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *output);
//ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange, Nd4jLong *tmpBuffer);
ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(int rank, Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer);
#ifdef __CUDACC__
template <typename T>
__device__ ND4J_EXPORT Nd4jLong *cuMalloc(Nd4jLong *buffer, long size, UnifiedSharedMemory *manager);
__device__ ND4J_EXPORT Nd4jLong *cuMalloc(Nd4jLong *buffer, long size);
#endif
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
ND4J_EXPORT _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank);
ND4J_EXPORT _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, Nd4jLong* ret);
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank);
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, Nd4jLong* ret);
ND4J_EXPORT _CUDA_HD void updateStrides(Nd4jLong *shape, const char order);
ND4J_EXPORT _CUDA_HD void updateStrides(const int rank, const Nd4jLong *shapeOnly, Nd4jLong *stridesOnly, const char order);
// check whether input dimensions are permuted, not permuted dimensions order have to be 0,....,rank-1
template <typename T>
ND4J_EXPORT _CUDA_HD bool isDimPermuted(const T* dimensions, const int dimSize);
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStridesFortran(Nd4jLong *shape, int rank, int startNum);
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStridesFortran(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret);
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, int startNum);
ND4J_EXPORT _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret);
/**
* @param toCopy the shape to copy
* @return a copy of the original struct
*/
ND4J_EXPORT _CUDA_HD ShapeInformation *shapeCopy( ShapeInformation *toCopy);
ND4J_EXPORT _CUDA_HD bool strideDescendingCAscendingF(const Nd4jLong *shapeBuffer);
ND4J_EXPORT _CUDA_HD bool isStrideSimple(const Nd4jLong* shapeInfo);
/**
* copy-past from java hasDefaultStridesForShape function
* check whether array is not permuted and has contiguous elements in memory
*/
ND4J_EXPORT _CUDA_HD bool areStridesDefault(const Nd4jLong* shapeInfo);
/**
* Compute the element wise stride
* for a given shape/stride configuration
* @param rank the rank of the shape/stride
* @param shape the shape
* @param stride the stride
* @param isFOrder 0 or 1 for whether the array is f
* ordered or not
* @return 0 if there is no element wise stride the
* element wise stride of reshape(1,length) otherwise
*/
ND4J_EXPORT _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder);
/**
* Compute the element wise stride
* for a given shape/stride configuration
* @param rank the rank of the shape/stride
* @param shape the shape
* @param stride the stride
* @param isFOrder 0 or 1 for whether the array is f
* ordered or not
* @return 0 if there is no element wise stride the
* element wise stride of reshape(1,length) otherwise
*/
ND4J_EXPORT _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder, Nd4jLong *dimension, int dimensionLength);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride, Nd4jLong *buffer);
/**
*
* @param length
* @param shape
* @param rearrange
* @return
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *doPermuteSwap(int length, Nd4jLong *shape, int* rearrange);
/**
* In place permute swap
* @param length
* @param shape
* @param rearrange
*/
ND4J_EXPORT _CUDA_HD void doPermuteSwap(int length, Nd4jLong **shape, int* rearrange);
ND4J_EXPORT _CUDA_HD Nd4jLong *permuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange);
ND4J_EXPORT _CUDA_HD void permuteShapeBufferInPlace(Nd4jLong *shapeBuffer, int* rearrange, Nd4jLong *out);
ND4J_EXPORT _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeBuffer, const int *rearrange);
ND4J_EXPORT _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeBuffer, const Nd4jLong *rearrange);
ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange);
ND4J_EXPORT _CUDA_HD void doPermuteShapeBuffer(int rank,Nd4jLong *shapeBuffer, int* rearrange);
/**
* Rearrange the permute indexes
* according to which dimensions are specified.
*
* For example, dimension is implicitly:
* 0,1,2
*
* If you want to do a reduce along dimensions 0 and 1,
* you need to permute the indexes to be:
* 2,0,1
*
* which will give us the ability to ierate along an element
* wise stride.
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* createPermuteIndexes(int originalRank, int *dimension,int dimensionLength);
ND4J_EXPORT _CUDA_HD Nd4jLong* computeResultShape(Nd4jLong *originalShapeBuffer, int *dimension,int dimensionLength);
/**
* This method does inplace transpose of given shapeBuffer
*
* @param shapeBuffer
*/
ND4J_EXPORT _CUDA_HD void transposeInplace(Nd4jLong *shapeBuffer);
/**
* Get the ordering for the device
* @param length
* @param shape
* @param stride
* @param elementStride
* @return
*/
ND4J_EXPORT _CUDA_HD char getOrder(int length, Nd4jLong *shape, Nd4jLong *stride, int elementStride);
/**
* Ensure that every value in the re arrange
* array is unique
* @param arr
* @param shape
* @param arrLength
* @param shapeLength
* @return
*/
template <typename T>
ND4J_EXPORT _CUDA_HD int checkArrangeArray(T *arr, int arrLength, int shapeLength);
/**
* Permute the shape information
* @param info the shape information to permute
* @param rearrange the order to re arrange
* @param rank the rank of the rearrange array
*/
ND4J_EXPORT _CUDA_HD void permute(ShapeInformation **info, int *rearrange, int rank);
/**
* Returns whether the
* given shape is a vector or not
* @param shape the shape of the array
* @param rank the rank of cthe shape
*/
ND4J_EXPORT _CUDA_HD int isVector(Nd4jLong *shape, int rank);
/**
* When 1 dimension is the whole length of the
* array
*/
ND4J_EXPORT _CUDA_HD int oneDimEqualToLength(Nd4jLong *shape, int rank);
ND4J_EXPORT _CUDA_HD int oneDimEqualToLength(Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD int isVector(const Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD bool isLikeVector(Nd4jLong *shapeInfo, int& posOfNonUnityDim);
ND4J_EXPORT _CUDA_HD bool isCommonVector(const Nd4jLong *shapeInfo, int& posOfNonUnityDim);
ND4J_EXPORT _CUDA_HD bool isRowVector(const Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD bool isColumnVector(Nd4jLong *shapeInfo);
/**
* Returns whether the
* given shape is a vector or not
* @param shape the shape of the array
* @param rank the rank of the shape
*/
ND4J_EXPORT _CUDA_HD int isMatrix(Nd4jLong *shape, int rank);
INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shapeInfo);
/**
* Returns the shape portion of an information
* buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeOf(Nd4jLong *buffer);
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy);
template <typename T>
ND4J_EXPORT _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy, T *ret);
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
template <typename T>
ND4J_EXPORT _CUDA_HD void copyTo(Nd4jLong length, T *from, T *to);
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
ND4J_EXPORT _CUDA_HD void copyTo(int length, Nd4jLong *from, Nd4jLong *to, Nd4jLong *indexes);
/**
* Permute the given strides
* in the given rearrange order
* @param toPermute the buffer to permute
* @param shapeRank the length of the buffer to permute
* @param rearrange the rearrange order (must be 0 based indexes
* and all must be filled in)
* @return the rearranged array
*/
//ND4J_EXPORT _CUDA_HD Nd4jLong *permutedStrides(Nd4jLong *toPermute, int shapeRank, Nd4jLong *rearrange);
/**
* Return the slice (shape + 1 in pointer arithmetic)
* @param shape the shape to take the slice of
* @return the shape array - the first entry
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *slice(Nd4jLong *shape);
ND4J_EXPORT _CUDA_HD int slices(Nd4jLong *shapeBuffer);
ND4J_EXPORT _CUDA_HD Nd4jLong *sliceOfShapeBuffer(Nd4jLong sliceIdx, Nd4jLong *shapeBuffer);
/**
* Returns the length of the
* shape information buffer:
* rank * 2 + 3
* @param rank the rank to get the shape
* info length for
* @return rank * 2 + 4
*/
ND4J_EXPORT _CUDA_HD int shapeInfoLength(int rank);
ND4J_EXPORT _CUDA_HD int shapeInfoLength(Nd4jLong* shapeInfo);
ND4J_EXPORT _CUDA_HD int shapeInfoLength(const Nd4jLong* shapeInfo);
ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(int rank);
ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(const Nd4jLong* shapeInfo);
ND4J_EXPORT _CUDA_HD size_t shapeInfoByteLength(const Nd4jLong* shapeInfo);
/**
* Returns the rank portion of
* an information buffer
*/
ND4J_EXPORT _CUDA_HD int rank(const Nd4jLong *buffer);
ND4J_EXPORT _CUDA_HD int rank(const int *buffer);
ND4J_EXPORT _CUDA_HD int rank(const unsigned int *buffer);
/**
* Converts a raw int buffer of the layout:
* rank
* shape
* stride
* offset
* elementWiseStride
*
* where shape and stride are both straight int pointers
*/
ND4J_EXPORT _CUDA_HD ShapeInformation *infoFromBuffer(Nd4jLong *buffer);
/**
* Returns the stride portion of an information
* buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *stride(const Nd4jLong *buffer);
/**
* Compute the length of the given shape
*/
ND4J_EXPORT _CUDA_HD bool isEmpty(const Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD Nd4jLong length(const Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD Nd4jLong length(std::initializer_list<int>& shape);
ND4J_EXPORT _CUDA_HD Nd4jLong length(std::initializer_list<Nd4jLong>& shape);
/***
* Returns the offset portion of an information buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong offset(Nd4jLong *buffer);
ND4J_EXPORT _CUDA_HD Nd4jLong& extra(Nd4jLong *buffer);
/**
* Returns the ordering
* for this shape information buffer
*/
ND4J_EXPORT _CUDA_HD char order(const Nd4jLong *buffer);
/**
* Returns the element wise stride for this information
* buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong elementWiseStride(const Nd4jLong *buffer);
/**
* Returns the element wise stride for this information
* buffer
* relative to a dimension and ordering for a reduction index
*/
ND4J_EXPORT _CUDA_HD Nd4jLong reductionIndexElementWiseStride(Nd4jLong *buffer, int *dimension, int dimensionLength);
/**
* Returns whether
* the given shape info buffer
* represents a scalar shape
*/
ND4J_EXPORT _CUDA_HD int isScalar(Nd4jLong *info);
/**
* Returns whether
* the given shape information
* represents a scalar
* shape or not
*/
ND4J_EXPORT _CUDA_HD int isScalar(volatile ShapeInformation *info);
/**
* Return a copy of this array with the
* given index omitted
*
* @param data the data to copy
* @param indexes the index of the item to remove
* @param dataLength the length of the data array
* @param indexesLength the length of the data array
* @return the new array with the omitted
*
* item
*/
template <typename T1, typename T2>
ND4J_EXPORT _CUDA_HD void removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength, T1 *out);
/**
* Return a copy of this array with the
* given index omitted
*
* @param data the data to copy
* @param indexes the index of the item to remove
* @param dataLength the length of the data array
* @param indexesLength the length of the data array
* @return the new array with the omitted
*
* item
*/
template <typename T1, typename T2>
ND4J_EXPORT _CUDA_HD T1* removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength);
/**
* Iterate over a given set of indexes
* the begin and end indexes are 0 based.
* 1 padding is automatically assumed for the ending.
*
* For example if you want to iterate over 0 to 4
* it will go to 4 rather than 3.
*
* indexes should be the indexes to exclude
* indexes length should be the length of indexes
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* everyIndexBut(Nd4jLong *indexes,int indexesLength,int begin,int end);
/**
* Computes the offset for accessing
* a global element given the shape information
* and the offset to be read.
*/
//#ifdef __CUDACC__
// __device__
//#endif
// ND4J_EXPORT int tadOffset(shape::ShapeInformation *xInfo, int offset);
/**
* Returns a shape
* forces the given length to be 2.
* @param shape the shape to modify
* @param dimension the dimension (row or column)
* for the shape to be returned as
* @return the new shape
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* ensureVectorShape(Nd4jLong *shape);
ND4J_EXPORT _CUDA_HD Nd4jLong* createScalarShapeInfo();
ND4J_EXPORT _CUDA_HD Nd4jLong* createScalarShapeInfo(Nd4jLong *ret);
/**
* Generate an int buffer
* up to the given length
* at the specified increment
*
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* range(int from, int to, int increment);
/**
* Range between from and two with an
* increment of 1
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* range(int from, int to);
/**
* Keep the given indexes
* in the data
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *keep(volatile Nd4jLong *data, int* index, int indexLength, int dataLength);
/**
* Generate reverse copy of the data
* @param data
* @param length
* @return
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* reverseCopy(T *data, Nd4jLong length);
template <typename T>
ND4J_EXPORT _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong length);
template <typename T>
ND4J_EXPORT _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong *indexes, Nd4jLong length);
template <typename T1, typename T2>
ND4J_EXPORT _CUDA_H void convertT(T1 *from, T2 *to, Nd4jLong length);
/**
*
* @param arr1
* @param arr1Length
* @param arr2
* @param arr2Length
* @return
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* concat(T* arr1, Nd4jLong arr1Length, T* arr2, Nd4jLong arr2Length);
/**
*
* @param numArrays
* @param numTotalElements
* @param arr
* @param lengths
* @return
*/
template <typename T>
ND4J_EXPORT _CUDA_HD T* concat(int numArrays, int numTotalElements, Nd4jLong **arr, Nd4jLong *lengths);
/**
* Get the length per slice of the
* given shape and the dimension
* @param rank the rank of the shape
* @param shape the shape of to get
* the length per slice for
* @param dimension the dimension to
* get the length per slice for
* @param dimensionLength the length of the dimension array
* @return the length per slice of the given shape
* along the given dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong lengthPerSlice(int rank, Nd4jLong *shape, int *dimension, int dimensionLength);
/**
* calculates the offset for a tensor
* @param index
* @param arr
* @param tensorShape
* @return
*/
ND4J_EXPORT _CUDA_HD Nd4jLong sliceOffsetForTensor(int rank,
int index,
Nd4jLong *shape,
Nd4jLong *tensorShape,
int tensorShapeLength,
int *dimension,
int dimensionLength);
/**
* calculates the offset for a tensor
* @param index
* @param arr
* @param tensorShape
* @return
*/
ND4J_EXPORT _CUDA_HD Nd4jLong sliceOffsetForTensor(int index,int tensorLength,int lengthPerSlice2);
/**
* Computes the tensor along dimension
* offset
* @param index the index to get the offset for the tad for
* @param rank the rank of the shapes and strides
* @param info the shape information to use for tad
* @param dimension the dimensions to use for computing the tensor along dimensions
*/
// ND4J_EXPORT _CUDA_HD int offset(int index,
// int rank,
// shape::ShapeInformation *info,
// Nd4jLong *dimension,
// int dimensionLength);
/**
* Computes the number
* of tensors along
* a given dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong tensorsAlongDimension(int rank,
volatile int length,
volatile Nd4jLong *shape,
int *dimension,
int dimensionLength);
/**
* Computes the number
* of tensors along
* a given dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong tensorsAlongDimension(Nd4jLong *shapeInfo, int *dimension, int dimensionLength);
/**
* Returns the tensor along dimension
* for the given block index
* @param blockSize
* @param blockIdx
* @param i
* @return
*/
ND4J_EXPORT _CUDA_HD int tadForBlockIndex(int blockSize, int blockIdx, int i);
/**
* Computes the number of tads per block
*
*/
ND4J_EXPORT _CUDA_HD int tadsPerBlock(int blockSize, int tads);
// ND4J_EXPORT _CUDA_HD Nd4jLong *tadShapeInfo(int index, Nd4jLong *xShapeInfo, Nd4jLong *dimension,
// int dimensionLength);
/**
* Returns a shape buffer
* for the shape information metadata.
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info);
ND4J_EXPORT _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info, Nd4jLong* ret);
/**
* Returns the number of elements per thread
*/
//#ifdef __CUDACC__
// __device__
//#endif
// int numElementsPerThread(int N);
/**
* Returns the block starting index
*/
//#ifdef __CUDACC__
// __device__
//#endif
// int blockStartingIndex(int N);
/**
* Returns the thread starting index
*/
//#ifdef __CUDACC__
// __device__
//#endif
// int threadStartingIndex(int N, int stride, int offset);
/**
* Returns the thread ending index
*/
//#ifdef __CUDACC__
// __device__
//#endif
// int threadEndingIndex(int N, int stride, int offset);
/**
* Returns indexing information
* for the current kernel invocation
*/
//#ifdef __CUDACC__
// __device__
//#endif
// CurrentIndexing *currentIndex(int N, int offset, int stride);
/** Given an linear index, element wise stride
* and the length of each tad
* map a linear index to a tad
* @param i the index to map
* @param the element wise stride for the tads
* @param numElementsPerTad the number of elements
* per tad
*/
ND4J_EXPORT _CUDA_HD int tadIndex(int i, int elementWiseStride, int numElementsPerTad);
/**
* Map a tad to a
* reduction index.
* @param tadIndexForOriginal the original tad index for the
* split up problem (eg: split is dimension 3 mapping to a 2,3 problem)
* @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3)
* @param tadsForOriginal the number of tads for the smaller problem (eg: 3)
*/
ND4J_EXPORT _CUDA_HD int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced,
int tadsForOriginal);
/**
* Computes the number of tads
* per reduce index for the
* reduction tad.
*/
ND4J_EXPORT _CUDA_HD int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal);
/**
* Maps a linear index to a reduction index
* @param i the linear index to map
* @param elementWiseStride the element wise stride
* for the multiple problem
* @param tadNum the number of tads for the shrunken problem
* @param originalTadNum the tad number for the reduced version of the problem
*/
ND4J_EXPORT _CUDA_HD int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad,
int tadNum, int originalTadNum);
/**
* Returns the prod of the data
* up to the given length
*/
ND4J_EXPORT _CUDA_HD int prod(Nd4jLong *data, int length);
ND4J_EXPORT _CUDA_HD Nd4jLong prodLong(const Nd4jLong *data, int length);
/**
* Returns the rear most left over item not present in
* the dimension array. This assumes that the dimension array is sorted.
*
* For example, given a dimension array of:
* 0,2
*
* and
*
* 12,4,2,1 in data
*
* You end up with 1 (data[3])
* since the first item won't match
* the last item of the dimension array
*/
// ND4J_EXPORT _CUDA_HD int rearMostLeftOverItem(Nd4jLong *data,int length,Nd4jLong *dimension,int dimensionLength);
/**
* Get an offset for retrieval
* from a data buffer
* based on the given
* shape stride and given indices
* @param baseOffset the offset to start from
* @param shape the shape of the array
* @param stride the stride of the array
* @param indices the indices to iterate over
* @return the double at the specified index
*/
ND4J_EXPORT _CUDA_HD Nd4jLong getOffset(Nd4jLong baseOffset, const Nd4jLong *shape, const Nd4jLong *stride, const Nd4jLong *indices,int rank);
ND4J_EXPORT _CUDA_HD Nd4jLong* createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank);
ND4J_EXPORT _CUDA_HD Nd4jLong* createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank, Nd4jLong *buffer);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices);
ND4J_EXPORT _CUDA_HD Nd4jLong *ind2sub(int rank, Nd4jLong *shape, Nd4jLong index);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2sub(int rank,Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices,Nd4jLong *out);
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong *out);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD Nd4jLong* ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *out);
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong *out);
/**
* Convert the given index (such as 1,1)
* to a linear index
* @param shape the shape of the indexes to convert
* @param indices the index to convert
* @return the linear index given the shape
* and indices
*/
ND4J_EXPORT _CUDA_HD Nd4jLong sub2Ind(const int rank, const Nd4jLong *shape, const Nd4jLong *indices);
/**
* increment n-dimensional array by one iteration by changing coord appropriately
* for example we have array with shape {2, 3}:
* - if input coord = {0,1}, then output coord = {0,2}
* - if input coord = {0,2}, then output coord = {1,0}
* so the aim is to produce following subsequence of coord: {0,0}, {0,1}, {0,2}, {1,0}, {1,1}, {1,2}
*/
/* calculates an array buffer offset for given "index" using following formula: offset = coord_0*stride_0 + coord_1*stride_1 + ... + coord_{rank-1}*stride_{rank-1}
* arrLen - array length
*/
ND4J_EXPORT _CUDA_HD uint getIndexOffset(uint index, const uint *shapeInfo, uint arrLen);
ND4J_EXPORT _CUDA_HD Nd4jLong getIndexOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen);
ND4J_EXPORT _CUDA_HD Nd4jLong getIndexOrderOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen, const char order);
ND4J_EXPORT _CUDA_HD Nd4jLong indexOffset(Nd4jLong index, const Nd4jLong* lShapeInfo, const uint* uShapeInfo, Nd4jLong arrLen, const bool useUnsigned);
/**
* Compute the real linear indices for the given shape and stride
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *computeIndices(int rank, Nd4jLong *shape, Nd4jLong *stride);
/**
* Compute the real linear indices for the
* given shape buffer. Shape,stride and rank are derived
* from the buffer
*/
ND4J_EXPORT _CUDA_HD Nd4jLong *computeIndices( Nd4jLong *shapeBuffer);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index, Nd4jLong numIndices,Nd4jLong *out);
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
ND4J_EXPORT _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index,Nd4jLong *out);
ND4J_EXPORT _CUDA_HD void printShapeInfo(Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const char *msg, Nd4jLong *shapeInfo);
ND4J_EXPORT _CUDA_HD void printShapeInfoLinear(const char *msg, int rank, Nd4jLong *shape, Nd4jLong *strides);
ND4J_EXPORT _CUDA_HD void printIntArray(const Nd4jLong *arr, const int length);
ND4J_EXPORT _CUDA_HD void printIntArray(const int *arr, const int length);
ND4J_EXPORT _CUDA_HD void printArray(float *arr,int length);
template<typename T>
ND4J_EXPORT _CUDA_HD void printArray(T *arr,int length, const char *message);
ND4J_EXPORT _CUDA_HD Nd4jLong* shapeBufferOfNpy(int rank, unsigned int *shape,bool fortranOrder);
ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferOfNpy(cnpy::NpyArray arr);
// ND4J_EXPORT _CUDA_HD Nd4jLong *shapeBufferOfNpyBuffer(char *buffer);
// this function checks the consistence of dimensions with array rank (negative dimensions, too large dimensions, too big number of dimensions)
// also sort input array of dimensions, this operation is also necessary for creating TAD object
ND4J_EXPORT _CUDA_H void checkDimensions(const int rank, std::vector<int>& dimensions);
// function calculates linear index of array min, min is sub-array of max, index to be returned is min-array's index and corresponds to maxIdx of max array
// dimsToExclude - should be sorted in increasing order
ND4J_EXPORT _CUDA_HD Nd4jLong subArrayIndex(const Nd4jLong maxIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr);
// function calculates absolute offset of min array, min is sub-array of max, offset to be returned corresponds to maxIdx of max array
// dimsToExclude - should be sorted in increasing order
ND4J_EXPORT _CUDA_HD Nd4jLong subArrayOffset(const Nd4jLong maxIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr);
// max array is outer for min array, min array is sub-array of max array
// function calculates the coordinates of min array (and saves them into minIdxs) given coordinates of max array (already stored in maxIdxs)
// dimsToExclude - should be sorted in increasing order
ND4J_EXPORT _CUDA_HD void maxIndToMinInd(Nd4jLong* maxIdxs, Nd4jLong* minIdxs, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr);
// calculate indexes of max-array, these output indexes correspond to one minIdx index of min-array which is sub-array of max-array
// dimsToExclude - should be sorted in increasing order
ND4J_EXPORT _CUDA_HD int outerArrayIndexes(Nd4jLong* maxIdxs, const Nd4jLong minIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr);
// calculate offsets of max-array, these output offsets correspond to one minIdx index of min-array which is sub-array of max-array
// dimsToExclude - should be sorted in increasing order
ND4J_EXPORT _CUDA_HD int outerArrayOffsets(Nd4jLong* maxOffsets, const Nd4jLong minIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude = nullptr);
// calculates offsets for numOfSubArrs sub-arrays, shape in this context means dominions excluded from outer array
// rank is equal to size of shape
ND4J_EXPORT void calcSubArrOffsets(const Nd4jLong numOfSubArrs, const int rank, const Nd4jLong* shape, const Nd4jLong* strides, Nd4jLong* subArrOffsets);
ND4J_EXPORT _CUDA_HD void shapeOldScalar(nd4j::DataType dtype, Nd4jLong* const buffer, const char order);
//END HEADERS
//BEGIN IMPLEMENTATIONS
#ifdef __CUDACC__
template <typename T>
__device__ INLINEDEF Nd4jLong *cuMalloc(Nd4jLong *buffer, long size, UnifiedSharedMemory *manager) {
// if we go for 3 dimensions coord space or below - just use shared memory for that
if (size <= MAX_COORD * 4) {
Nd4jLong *ptr = new Nd4jLong[size / 4];//manager->getSharedCoordBuffer() + (threadIdx.x * MAX_COORD);
return ptr;
} else {
// otherwise go to preallocated global memory :(
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid * size > PREALLOC_SIZE - size) {
return (Nd4jLong *) malloc(size);
} else {
Nd4jLong *ret = buffer;
ret += (tid * size);
return ret;
}
}
}
#endif
#ifdef __CUDACC__
/**
* BEWARE: THIS METHOD DOES NOT CHECKS ALLOCATION BOUNDARIES
*/
__device__ INLINEDEF Nd4jLong *cuMalloc(Nd4jLong *buffer, long size) {
Nd4jLong *ret = buffer;
ret += (threadIdx.x * size);
return ret;
}
#endif
/**
* Length of a tad given
* the shape information
*/
INLINEDEF _CUDA_HD int tadLength(Nd4jLong *shapeInfo, int *dimension, int dimensionLength) {
if(dimensionLength == 1) {
return shape::shapeOf(shapeInfo)[dimension[0]];
}
else {
int ret = 1;
for(int i = 0; i < shape::rank(shapeInfo); i++) {
for(int j = 0; j < dimensionLength; j++) {
if(i == dimension[j])
ret *= shape::shapeOf(shapeInfo)[dimension[j]];
}
}
return ret;
}
}
/**
* Tad element wise stride:
* given the inner most dimension (the sorted dimension of the last)
* the element wise stride of the tad (disregarding order) is the
* last dimension's stride.
*
* For a given singular dimension this will just be the only entry.
* For example, given the following c order shape/stride:
* 2,2,3,2
* 12,6,2,1
*
* The tad element wise stride for 3 will be 1.
* For zero it wil be 12
*
* For 2,3 it's 1
*
* Note here that the multi dimensional 2,3 case
* is equivalent to the singular 3 case.
*
*
* Note that this is for the dimension that ultimately
* ends up removed.
*
* Again: this may not preserve ordering of the tad
* but maybe used for reductions.
*/
INLINEDEF _CUDA_HD int tadElementWiseStride(Nd4jLong *shapeInfo, int *dimension,int dimensionLength) {
return reductionIndexElementWiseStride(shapeInfo,dimension,dimensionLength);
}
INLINEDEF _CUDA_HD bool shapeEquals(const int shape1Rank, const Nd4jLong *shape1, const int shape2Rank, const Nd4jLong *shape2) {
if(shape1Rank != shape2Rank)
return false;
//rank not equals
for(int i = 0; i < shape1Rank; i++) {
if(shape1[i] != shape2[i])
return false;
}
return true;
}
INLINEDEF _CUDA_HD bool shapeEquals(const Nd4jLong *shapeInfo1, const Nd4jLong *shapeInfo2) {
return shape::shapeEquals(shape::rank(shapeInfo1), shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo1)), shape::rank(shapeInfo2), shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo2)));
}
INLINEDEF _CUDA_HD bool strideEquals(int shape1Rank,Nd4jLong *shape1,int shape2Rank,Nd4jLong *shape2) {
if(shape1Rank != shape2Rank)
return false;
//rank not equals
for(int i = 0; i < shape1Rank; i++) {
if(shape1[i] != shape2[i])
return false;
}
return true;
}
INLINEDEF _CUDA_HD bool strideEquals(Nd4jLong *shapeInfo1,Nd4jLong *shapeInfo2) {
return shape::strideEquals(shape::rank(shapeInfo1),shape::stride(shapeInfo1),shape::rank(shapeInfo2),shape::stride(shapeInfo2));
}
INLINEDEF _CUDA_HD bool strideEquals(Nd4jLong *stride1,int rank1 , Nd4jLong *stride2, int rank2) {
if(rank1 != rank2)
return false;
for(int i = 0; i < rank1; i++) {
if(stride1[i] != stride2[i])
return false;
}
return true;
}
INLINEDEF _CUDA_HD Nd4jLong *computeResultShape(Nd4jLong *originalShapeBuffer, int* dimension,int dimensionLength) {
Nd4jLong *retShape;
int retShapeLength;
if(dimensionLength == 1 && dimension[0] == 2147483647) {
retShape = new Nd4jLong[2];
retShape[0] = 1;
retShape[1] = 1;
retShapeLength = 2;
}
else {
retShape = shape::removeIndex<Nd4jLong, int>(shape::shapeOf(originalShapeBuffer), dimension, shape::shapeInfoLength(shape::rank(originalShapeBuffer)), dimensionLength);
retShapeLength = shape::rank(originalShapeBuffer) - dimensionLength;
}
//ensure vector is proper shape
if (retShapeLength == 1) {
if (dimension[0] == 0) {
auto newRetShape = new Nd4jLong[2]{1, retShape[0]};
delete[] retShape;
retShape = newRetShape;
retShapeLength = 2;
}
else {
auto newRetShape = new Nd4jLong[2]{retShape[0], 1};
delete[] retShape;
retShape = newRetShape;
retShapeLength = 2;
}
} else if (retShapeLength == 0) {
auto newRetShape = new Nd4jLong[2]{1, 1};
delete[] retShape;
retShape = newRetShape;
retShapeLength = 2;
}
auto ret = shape::shapeBuffer(retShapeLength, nd4j::ArrayOptions::dataType(originalShapeBuffer), retShape);
delete[] retShape;
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride, Nd4jLong *buffer) {
Nd4jLong *theShape = shape::shapeOf(shapeInfo);
Nd4jLong *theStride = shape::stride(shapeInfo);
int rank = dimensionLength == 1 ? 2 : dimensionLength;
Nd4jLong *ret = buffer;
//set the rank
ret[0] = rank;
Nd4jLong *retShape = shape::shapeOf(ret);
Nd4jLong *retStride = shape::stride(ret);
int len = rank;
if(dimensionLength == 1) {
if(shape::isMatrix(theShape,shape::rank(shapeInfo))) {
if(dimension[0] == 0) {
Nd4jLong newStride[2] = {theStride[dimension[0]],1};
Nd4jLong newShape[2] = {theShape[dimension[0]],1};
retShape[0] = newShape[0];
retShape[1] = newShape[1];
retStride[0] = newStride[0];
retStride[1] = newStride[1];
}
else {
Nd4jLong newStride[2] = {theStride[dimension[0]],1};
Nd4jLong newShape[2] = {theShape[dimension[0]],1};
retShape[0] = newShape[0];
retShape[1] = newShape[1];
retStride[0] = newStride[0];
retStride[1] = newStride[1];
}
}
else {
Nd4jLong newStride[2] = {1,theStride[dimension[0]]};
Nd4jLong newShape[2] = {1,theShape[dimension[0]]};
retShape[0] = newShape[0];
retShape[1] = newShape[1];
retStride[0] = newStride[0];
retStride[1] = newStride[1];
}
}
else {
Nd4jLong *newIndexes = dimension;
if(reverseCopyStride)
shape::reverseCopyTo(theStride, retStride, newIndexes, len);
else
shape::copyTo(len, theStride, retStride, newIndexes);
shape::copyTo(len, theShape, retShape, newIndexes);
}
ret[shape::shapeInfoLength(rank) - 1] = shape::order(shapeInfo);
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong *shapeInfoOnlyShapeAndStride(Nd4jLong *shapeInfo, Nd4jLong *dimension, int dimensionLength,bool reverseCopyStride) {
int rank = dimensionLength == 1 ? 2 : dimensionLength;
traceNew(4);
Nd4jLong *ret = new Nd4jLong[shape::shapeInfoLength(rank)];
return shapeInfoOnlyShapeAndStride(shapeInfo, dimension, dimensionLength, reverseCopyStride, ret);
}
INLINEDEF _CUDA_HD Nd4jLong * createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank) {
traceNew(5);
Nd4jLong *ret = new Nd4jLong[shape::shapeInfoLength(rank)];
return createShapeInfo(shape, stride, rank, ret);
}
INLINEDEF _CUDA_HD Nd4jLong * createShapeInfo(Nd4jLong *shape, Nd4jLong *stride, int rank, Nd4jLong *buffer) {
buffer[0] = rank;
Nd4jLong *retShape = shape::shapeOf(buffer);
Nd4jLong *retStride = shape::stride(buffer);
for(int i = 0;i < rank; i++) {
retShape[i] = shape[i];
retStride[i] = stride[i];
}
return buffer;
}
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, int startNum) {
if (isVector(shape, rank)) {
traceNew(5);
Nd4jLong *ret = new Nd4jLong[2];
for (int i = 0; i < 2; i++)
ret[i] = 1;
return ret;
}
int dimensions = rank;
traceNew(6);
Nd4jLong *stride = new Nd4jLong[dimensions];
int st = startNum;
for (int j = 0; j < rank; j++) {
stride[j] = st;
st *= shape[j];
}
return stride;
}
INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, int startNum, Nd4jLong *ret) {
if (isVector(shape, rank)) {
for (int i = 0; i < 2; i++)
ret[i] = 1;
return ret;
}
int dimensions = rank;
int st = startNum;
for (int j = 0; j < rank; j++) {
ret[j] = st;
st *= shape[j];
}
return ret;
}
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
INLINEDEF _CUDA_HD Nd4jLong * calcStrides(Nd4jLong *shape, int rank, int startNum) {
traceNew(7);
Nd4jLong *stride = new Nd4jLong[rank];
if (rank == 1) {
stride[0] = 1;
return stride;
}
// if (shape::isVector(shape, rank)) {
// for (int i = 0; i < 2; i++)
// stride[i] = 1;
// return stride;
// }
int st = startNum;
for (int j = rank - 1; j >= 0; j--) {
stride[j] = st;
st *= shape[j];
}
return stride;
}
INLINEDEF _CUDA_HD Nd4jLong * calcStrides(Nd4jLong *shape, int rank, int startNum, Nd4jLong* ret) {
if (rank == 1) {
ret[0] = 1;
return ret;
}
// if (shape::isVector(shape, rank)) {
// for (int i = 0; i < 2; i++)
// ret[i] = 1;
// return ret;
// }
int st = startNum;
for (int j = rank - 1; j >= 0; j--) {
ret[j] = st;
st *= shape[j];
}
return ret;
}
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank) {
return calcStridesFortran(shape, rank, 1);
}
INLINEDEF _CUDA_HD Nd4jLong * calcStridesFortran(Nd4jLong *shape, int rank, Nd4jLong* ret) {
return calcStridesFortran(shape, rank, 1, ret);
}
/**
* Computes the standard packed array strides for a given shape.
*
* @param shape the shape of a matrix:
* @param startNum the start number for the strides
* @return the strides for a matrix of n dimensions
*/
INLINEDEF _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank) {
return calcStrides(shape, rank, 1);
}
INLINEDEF _CUDA_HD Nd4jLong* calcStrides(Nd4jLong *shape, int rank, Nd4jLong* ret) {
return calcStrides(shape, rank, 1, ret);
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD void updateStrides(Nd4jLong *shapeInfo, const char order) {
int rank = shapeInfo[0];
int doubleRank = 2*rank;
if (rank > 0) {
if (order == 'c') {
shapeInfo[doubleRank] = 1; // set unity as last stride for c order
for (int j = 1; j < rank; ++j) {
shapeInfo[doubleRank - j] = shapeInfo[doubleRank - j + 1] * shapeInfo[rank + 1 - j];
}
} else {
shapeInfo[rank + 1] = 1; // set unity as first stride for f order
for (int j = rank + 1; j < doubleRank; ++j) {
shapeInfo[j + 1] = shapeInfo[j] * shapeInfo[j - rank];
}
}
}
// set last 2 elements in shapeInfo
shapeInfo[doubleRank + 2] = 1;
shapeInfo[doubleRank + 3] = (int)order;
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD void updateStrides(const int rank, const Nd4jLong *shapeOnly, Nd4jLong *stridesOnly, const char order) {
if (rank > 0) {
if (order == 'c') {
stridesOnly[rank - 1] = 1; // set unity as last stride for c order
for (int j = 1; j < rank; ++j)
stridesOnly[rank - 1 - j] = stridesOnly[rank - j] * shapeOnly[rank - j];
}
else {
stridesOnly[0] = 1; // set unity as first stride for f order
for (int j = 1; j < rank; ++j) {
stridesOnly[j] = stridesOnly[j - 1] * shapeOnly[j - 1];
}
}
}
}
// check whether input dimensions are permuted, not permuted dimensions order have to be 0,....,rank-1
template <typename T>
INLINEDEF _CUDA_HD bool isDimPermuted(const T* dimensions, const Nd4jLong dimSize ) {
for(int i=0; i<dimSize-1; ++i)
if(dimensions[i] > dimensions[i+1])
return true;
return false;
}
/**
* @param toCopy the shape to copy
* @return a copy of the original struct
*/
INLINEDEF _CUDA_HD ShapeInformation *shapeCopy( ShapeInformation *toCopy) {
auto copy = new ShapeInformation;
traceNew(8);
copy->shape = new Nd4jLong[toCopy->rank];
memcpy(copy->shape, toCopy->shape, toCopy->rank * sizeof(Nd4jLong));
traceNew(9);
copy->stride = new Nd4jLong[toCopy->rank];
for (int i = 0; i < toCopy->rank; i++) {
copy->stride[i] = toCopy->stride[i];
}
copy->order = toCopy->order;
copy->rank = toCopy->rank;
copy->offset = toCopy->offset;
copy->elementWiseStride = toCopy->elementWiseStride;
return copy;
}
INLINEDEF _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder) {
if (rank == 0)
return 1;
if(shape::isVector(shape,rank)) {
return stride[rank - 1];
}
else {
int oldnd;
Nd4jLong *oldDims = shape::copyOf(rank, shape);
Nd4jLong *oldStrides = shape::copyOf(rank, stride);
int np, op, last_stride;
int oldStart, oldStop, ok, newStart, newStop, nk;
traceNew(10);
auto newStrides = new Nd4jLong[rank];
oldnd = 0;
//set the shape to be 1 x length
int newShapeRank = 2;
auto newShape = new Nd4jLong[newShapeRank];
newShape[0] = 1;
newShape[1] = shape::prodLong(shape, rank);
/*
* Remove axes with dimension 1 from the old array. They have no effect
* but would need special cases since their strides do not matter.
*/
for (oldStart = 0; oldStart < rank; oldStart++) {
if (shape[oldStart] != 1) {
oldDims[oldnd] = shape[oldStart];
oldStrides[oldnd] = stride[oldStart];
oldnd++;
}
}
np = 1;
for (newStart = 0; newStart < newShapeRank; newStart++) {
np *= newShape[newStart];
}
op = 1;
for (oldStart = 0; oldStart < oldnd; oldStart++) {
op *= oldDims[oldStart];
}
if (np != op) {
/* different total sizes; no hope */
delete[] newStrides;
delete[] newShape;
delete[] oldStrides;
delete[] oldDims;
return 0;
}
if (np == 0) {
/* the current code does not handle 0-sized arrays, so give up */
delete[] newStrides;
delete[] newShape;
delete[] oldStrides;
delete[] oldDims;
return 0;
}
/* oldStart to oldStop and newStart to newStop give the axis ranges currently worked with */
oldStart = 0;
oldStop = 1;
newStart = 0;
newStop = 1;
while (newStart < newShapeRank && oldStart < oldnd) {
np = newShape[newStart];
op = oldDims[oldStart];
while (np != op) {
if (np < op) {
/* Misses trailing 1s, these are handled later */
np *= newShape[newStop++];
} else {
op *= oldDims[oldStop++];
}
}
/* Check whether the original axes can be combined */
for (ok = oldStart; ok < oldStop - 1; ok++) {
if (isFOrder) {
if (oldStrides[ok + 1] != oldDims[ok] * oldStrides[ok]) {
/* not contiguous enough */
delete[] newStrides;
delete[] newShape;
delete[] oldStrides;
delete[] oldDims;
return 0;
}
} else {
/* C order */
if (oldStrides[ok] != oldDims[ok + 1] * oldStrides[ok + 1]) {
/* not contiguous enough */
delete[] newStrides;
delete[] newShape;
delete[] oldStrides;
delete[] oldDims;
return 0;
}
}
}
/* Calculate new strides for all axes currently worked with */
if (isFOrder) {
newStrides[newStart] = oldStrides[oldStart];
for (nk = newStart + 1; nk < newStop; nk++) {
newStrides[nk] = newStrides[nk - 1] * newShape[nk - 1];
}
} else {
/* C order */
newStrides[newStop - 1] = oldStrides[oldStop - 1];
for (nk = newStop - 1; nk > newStart; nk--) {
newStrides[nk - 1] = newStrides[nk] * newShape[nk];
}
}
newStart = newStop++;
oldStart = oldStop++;
}
/*
* Set strides corresponding to trailing 1s of the new shape.
*/
if (newStart >= 1) {
last_stride = newStrides[newStart - 1];
} else {
last_stride = stride[rank - 1];
}
if (isFOrder) {
if (newStart >= 1)
last_stride *= newShape[newStart - 1];
}
for (nk = newStart; nk < newShapeRank; nk++) {
newStrides[nk] = last_stride;
}
//returns the last element of the new stride array
int ret = last_stride;
delete[] newStrides;
delete[] newShape;
delete[] oldStrides;
delete[] oldDims;
return ret;
}
}
INLINEDEF _CUDA_HD int computeElementWiseStride(int rank, Nd4jLong *shape, Nd4jLong *stride, int isFOrder,
Nd4jLong *dimension, int dimensionLength) {
if(dimensionLength == 1) {
return stride[dimension[0]];
}
return 0;
}
/**
* Get the shape info buffer
* for the given rank and shape.
*/
INLINEDEF _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape) {
Nd4jLong *stride = shape::calcStrides(shape, rank);
traceNew(11);
auto shapeInfo = new shape::ShapeInformation();
shapeInfo->shape = shape;
shapeInfo->stride = stride;
shapeInfo->offset = 0;
shapeInfo->rank = rank;
int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0);
shapeInfo->order = 'c';
shapeInfo->elementWiseStride = elementWiseStride;
auto shapeInfoBuffer = shape::toShapeBuffer(shapeInfo);
delete[] stride;
delete shapeInfo;
nd4j::ArrayOptions::setDataType(shapeInfoBuffer, dtype);
return shapeInfoBuffer;
}
/**
* This is special method, it returns ONLY 2D shapebuffer.
*
* This method is used only for SoftMax
*/
INLINEDEF _CUDA_HD Nd4jLong *shapeBuffer(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *buffer) {
Nd4jLong stride[MAX_RANK];
shape::calcStrides(shape,rank, stride);
shape::ShapeInformation shapeInfo;
shapeInfo.shape = shape;
shapeInfo.stride = stride;
shapeInfo.offset = 0;
shapeInfo.rank = rank;
auto elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0);
shapeInfo.order = 'c';
shapeInfo.elementWiseStride = elementWiseStride;
shape::toShapeBuffer(&shapeInfo, buffer);
nd4j::ArrayOptions::setDataType(buffer, dtype);
return buffer;
}
/**
* Get the shape info buffer
* for the given rank and shape.
*/
INLINEDEF _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape) {
auto stride = shape::calcStridesFortran(shape,rank);
traceNew(12);
auto shapeInfo = new shape::ShapeInformation();
shapeInfo->shape = shape;
shapeInfo->stride = stride;
shapeInfo->offset = 0;
shapeInfo->rank = rank;
int elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0);
shapeInfo->order = 'f';
shapeInfo->elementWiseStride = elementWiseStride;
auto shapeInfoBuffer = shape::toShapeBuffer(shapeInfo);
delete[] stride;
delete shapeInfo;
nd4j::ArrayOptions::setDataType(shapeInfoBuffer, dtype);
return shapeInfoBuffer;
}
INLINEDEF _CUDA_HD Nd4jLong *shapeBufferFortran(int rank, nd4j::DataType dtype, Nd4jLong *shape, Nd4jLong *output) {
Nd4jLong stride[MAX_RANK];
shape::calcStridesFortran(shape,rank, stride);
shape::ShapeInformation shapeInfo;
shapeInfo.shape = shape;
shapeInfo.stride = stride;
shapeInfo.offset = 0;
shapeInfo.rank = rank;
auto elementWiseStride = shape::computeElementWiseStride(rank, shape, stride, 0);
shapeInfo.order = 'f';
shapeInfo.elementWiseStride = elementWiseStride;
shape::toShapeBuffer(&shapeInfo, output);
nd4j::ArrayOptions::setDataType(output, dtype);
return output;
}
/**
* Compute the real linear indices for the given shape and stride
*/
INLINEDEF _CUDA_HD Nd4jLong *computeIndices(int rank, Nd4jLong *shape, Nd4jLong *stride) {
Nd4jLong length = shape::prodLong(shape,rank);
traceNew(13);
Nd4jLong *ret = new Nd4jLong[length];
for(int i = 0; i < length; i++) {
Nd4jLong *idx = shape::ind2sub(rank, shape, i);
ret[i] = shape::getOffset(0, shape, stride, idx, rank);
delete[] idx;
}
return ret;
}
/**
* Compute the real linear indices for the given shape and stride
*/
INLINEDEF _CUDA_HD Nd4jLong *computeIndices(Nd4jLong *shapeBuffer) {
return computeIndices(shape::rank(shapeBuffer),shape::shapeOf(shapeBuffer),shape::stride(shapeBuffer));
}
/**
* Convert the given index (such as 1,1)
* to a linear index
* @param shape the shape of the indexes to convert
* @param indices the index to convert
* @return the linear index given the shape
* and indices
*/
INLINEDEF _CUDA_HD Nd4jLong sub2Ind(const int rank, const Nd4jLong *shape, const Nd4jLong *indices) {
Nd4jLong index = indices[rank-1];
Nd4jLong shift = 1;
for(int i = rank-2; i >= 0; --i) {
shift *= shape[i+1];
index += shift * indices[i];
}
return index;
}
template <typename T>
INLINEDEF _CUDA_HD void fill(T* buffer, T value, Nd4jLong length) {
PRAGMA_OMP_SIMD
for (int e = 0; e < length; e++)
buffer[e] = value;
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices) {
auto ret = new Nd4jLong[rank];
ind2sub(rank, shape, index, numIndices, ret);
return ret;
}
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD Nd4jLong* ind2sub(int rank, Nd4jLong *shape, Nd4jLong index) {
return ind2sub(rank,shape, index, shape::prodLong(shape,rank));
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2sub(int rank, Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *ret) {
int denom = numIndices;
for(int i = rank - 1; i >= 0; i--) {
denom /= shape[i];
ret[i] = index / denom;
index %= denom;
}
}
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2sub(int rank,Nd4jLong *shape, Nd4jLong index, Nd4jLong *out) {
ind2sub(rank,shape, index, shape::prodLong(shape,rank),out);
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD Nd4jLong * ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong numIndices) {
auto ret = new Nd4jLong[rank];
ind2subC(rank, shape, index, numIndices, ret);
return ret;
}
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD Nd4jLong *ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index) {
return ind2subC(rank,shape, index, shape::prodLong(shape,rank));
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param arrLen the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong arrLen, Nd4jLong *ret) {
for(int i = 0; i < rank; i++) {
arrLen /= shape[i];
if(arrLen > 0) {
ret[i] = index / arrLen;
index %= arrLen;
}
else
ret[i] = 0;
}
}
/**
* Convert a linear index to
* the equivalent nd index.
* Infers the number of indices from the specified shape.
*
* @param shape the shape of the dimensions
* @param index the index to map
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2subC(const int rank, const Nd4jLong *shape, Nd4jLong index, Nd4jLong *out) {
ind2subC(rank,shape, index,shape::prodLong(shape,rank),out);
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD Nd4jLong getIndexOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen) {
const Nd4jLong ews = shapeInfo[shapeInfo[0] + shapeInfo[0] + 2];
if(ews > 0 && order(shapeInfo) == 'c')
if (ews == 1)
return index;
else
return ews * index;
Nd4jLong offset = 0;
for(int i = 1; i <= shapeInfo[0]; ++i) {
arrLen /= shapeInfo[i];
if(arrLen > 0 && shapeInfo[i] > 1) {
offset += (index / arrLen) * shapeInfo[i + shapeInfo[0]];
index %= arrLen;
}
}
return offset;
}
INLINEDEF _CUDA_HD uint getIndexOffset(uint index, const uint *shapeInfo, uint arrLen) {
const uint rank = shapeInfo[0];
const uint ews = shapeInfo[rank + rank + 2];
if(ews > 0 && shapeInfo[rank + rank + 3] == 99)
if (ews == 1)
return index;
else
return ews * index;
uint offset = 0;
for(uint i = 1; i <= rank; ++i) {
arrLen /= shapeInfo[i];
if(arrLen > 0 && shapeInfo[i] > 1) {
offset += (index / arrLen) * shapeInfo[i + rank];
index %= arrLen;
}
}
return offset;
}
INLINEDEF _CUDA_HD Nd4jLong indexOffset(Nd4jLong index, const Nd4jLong* lShapeInfo, const uint* uShapeInfo, Nd4jLong arrLen, const bool useUnsigned) {
if(useUnsigned)
return getIndexOffset(static_cast<uint>(index), uShapeInfo, static_cast<uint>(arrLen));
return getIndexOffset(index, lShapeInfo, arrLen);
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD Nd4jLong getIndexOrderOffset(Nd4jLong index, const Nd4jLong *shapeInfo, Nd4jLong arrLen, const char order) {
Nd4jLong offset = 0;
if(order == 'c') {
for(int i = 1; i <= *shapeInfo; ++i) {
arrLen /= shapeInfo[i];
if(arrLen > 0 && shapeInfo[i] > 1) {
offset += (index / arrLen) * shapeInfo[i + *shapeInfo];
index %= arrLen;
}
}
}
else {
for(int i = *shapeInfo; i >= 1 ; --i) {
arrLen /= shapeInfo[i];
if(arrLen > 0 && shapeInfo[i] > 1) {
offset += (index / arrLen) * shapeInfo[i + *shapeInfo];
index %= arrLen;
}
}
}
return offset;
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index, Nd4jLong numIndices, Nd4jLong *out) {
if(shape::order(shapeInfo) == 'f') {
shape::ind2sub(
shape::rank(shapeInfo),
shape::shapeOf(shapeInfo),
index,
numIndices,
out);
}
else {
shape::ind2subC(
shape::rank(shapeInfo),
shape::shapeOf(shapeInfo),
index,
numIndices,
out);
}
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
INLINEDEF _CUDA_HD void ind2subOrder(Nd4jLong *shapeInfo, Nd4jLong index, Nd4jLong *out) {
ind2subOrder(shapeInfo,index,shape::length(shapeInfo),out);
}
/**
* Convert a linear index to
* the equivalent nd index
* @param shape the shape of the dimensions
* @param index the index to map
* @param numIndices the number of total indices (typically prod of shape(
* @return the mapped indexes along each dimension
*/
/**
*
* @param length
* @param shape
* @param rearrange
* @return
*/
INLINEDEF _CUDA_HD Nd4jLong *doPermuteSwap(int length, Nd4jLong *shape, int *rearrange) {
traceNew(16);
Nd4jLong *ret = new Nd4jLong[length];
for (int i = 0; i < length; i++) {
ret[i] = shape[rearrange[i]];
}
return ret;
}
/**
*
* @param length
* @param shape
* @param rearrange
* @return
*/
INLINEDEF _CUDA_HD void doPermuteSwap(int length, Nd4jLong **shape, int *rearrange) {
if(length == 1) {
return;
}
else {
Nd4jLong *shapeDeref = *shape;
if(shape::prodLong(shapeDeref,length) < 2) {
return;
}
}
bool inOrder = true;
for(int i = 0; i < length - 1; i++) {
inOrder = inOrder && rearrange[i] + 1 == rearrange[i + 1];
}
//all in order, nothing to do
if(inOrder)
return;
Nd4jLong *shapeDeref = *shape;
//we know they are just reversed, dimension length of 2
if(length == 2) {
auto shapeFirst = shapeDeref[0];
auto shapeSecond = shapeDeref[1];
shapeDeref[0] = shapeSecond;
shapeDeref[1] = shapeFirst;
return;
}
else if(length == 1) {
//no permute
return;
}
auto temp = new Nd4jLong[length];
memcpy(temp,shapeDeref,sizeof(Nd4jLong) * length);
for (int i = 0; i < length; i++) {
shapeDeref[i] = temp[rearrange[i]];
}
delete[] temp;
}
INLINEDEF _CUDA_HD void permuteShapeBufferInPlace(Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *out) {
if(shapeBuffer != out)
memcpy(out,shapeBuffer,sizeof(Nd4jLong) * shape::shapeInfoLength(shape::rank(shapeBuffer)));
doPermuteShapeBuffer(shape::rank(shapeBuffer), shapeBuffer, rearrange, out);
}
INLINEDEF _CUDA_HD Nd4jLong *permuteShapeBuffer(Nd4jLong *shapeBuffer, int* rearrange) {
auto len = shape::shapeInfoLength(shape::rank(shapeBuffer));
Nd4jLong *copy = shape::copyOf(len, shapeBuffer);
doPermuteShapeBuffer(copy,rearrange);
return copy;
}
INLINEDEF _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeInfo, const Nd4jLong *rearrange) {
const int rank = shape::rank(shapeInfo);
//check whether shape is like {1} or {1,1} or {1,1,1,1,...} - in this case we don't need permute
if(prodLong(shape::shapeOf(shapeInfo), rank) < 2)
return;
// check whether rearrange is like {0,1,2,3,...} - in this case we don't need permute as well
bool isPermutNecessary = false;
for(int i = 0; i < rank; ++i)
if(rearrange[i] != i) {
isPermutNecessary = true;
break;
}
if(!isPermutNecessary)
return;
// check whether rearrange contains correct indexes
for(int i = 0; i < rank; ++i)
if(rearrange[i] >= rank || rearrange[i] < 0) {
printf("shape::doPermuteShapeInfo function failed: rearrange indexes are incorrect !\n");
return;
}
// if everything is ok then perform permute
auto temp = new Nd4jLong[shape::shapeInfoLength(rank)];
memcpy(temp, shapeInfo, sizeof(Nd4jLong) * shape::shapeInfoLength(rank));
for (int i = 0; i < rank; ++i) {
shapeInfo[i + 1] = temp[rearrange[i] + 1];
shapeInfo[i + 1 + rank] = temp[rearrange[i] + 1 + rank];
}
shapeInfo[2 * rank + 2] = 0; // ews
shapeInfo[2 * rank + 3] = shape::getOrder(rank, shape::shapeOf(shapeInfo),shape::stride(shapeInfo),1); // order
delete[] temp;
}
INLINEDEF _CUDA_HD void doPermuteShapeInfo(Nd4jLong *shapeInfo, const int* rearrange) {
const int rank = shape::rank(shapeInfo);
//check whether shape is like {1} or {1,1} or {1,1,1,1,...} - in this case we don't need permute
if(prodLong(shape::shapeOf(shapeInfo), rank) < 2)
return;
// check whether rearrange is like {0,1,2,3,...} - in this case we don't need permute as well
bool isPermutNecessary = false;
for(int i = 0; i < rank; ++i)
if(rearrange[i] != i) {
isPermutNecessary = true;
break;
}
if(!isPermutNecessary)
return;
// check whether rearrange contains correct indexes
for(int i = 0; i < rank; ++i)
if(rearrange[i] >= rank || rearrange[i] < 0) {
printf("shape::doPermuteShapeInfo function failed: rearrange indexes are incorrect !\n");
return;
}
// if everything is ok then perform permute
auto temp = new Nd4jLong[shape::shapeInfoLength(rank)];
memcpy(temp, shapeInfo, sizeof(Nd4jLong) * shape::shapeInfoLength(rank));
for (int i = 0; i < rank; ++i) {
shapeInfo[i + 1] = temp[rearrange[i] + 1];
shapeInfo[i + 1 + rank] = temp[rearrange[i] + 1 + rank];
}
shapeInfo[shapeInfoLength(rank) - 2] = 0;
shapeInfo[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank, shape::shapeOf(shapeInfo),shape::stride(shapeInfo), 1);
delete[] temp;
}
INLINEDEF _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer,int *rearrange) {
//no swapping needs to happen
if(shape::isScalar(shapeBuffer)) {
return;
}
Nd4jLong *shapeRef = shapeBuffer;
//rank of the rearrange array == rank of shape buffer
int rearrageRank = shape::rank(shapeRef);
Nd4jLong *shape = shape::shapeOf(shapeRef);
Nd4jLong *stride = shape::stride(shapeRef);
shape::doPermuteSwap(rearrageRank,&shape,rearrange);
shape::doPermuteSwap(rearrageRank,&stride,rearrange);
shapeRef[shapeInfoLength(rearrageRank) - 2] = 0;
shapeRef[shape::shapeInfoLength(rearrageRank) - 1] = shape::getOrder(rearrageRank,shape,stride,1);
// doPermuteShapeInfo(shapeBuffer, rearrange); // possible fix of integer overflow issue when strides are too large
}
/*
INLINEDEF _CUDA_HD void doPermuteShapeBuffer(Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer) {
auto shapeRef = shapeBuffer;
//rank of the rearrange array == rank of shape buffer
int rearrageRank = shape::rank(shapeRef);
auto shape = shape::shapeOf(shapeRef);
auto stride = shape::stride(shapeRef);
shape::copyOf(rearrageRank,rearrange, tmpBuffer);
shape::doPermuteSwap(rearrageRank,&shape, tmpBuffer);
shape::copyOf(rearrageRank,rearrange, tmpBuffer);
shape::doPermuteSwap(rearrageRank,&stride,tmpBuffer);
shapeRef[shapeInfoLength(rearrageRank) - 2] = 0;
shapeRef[shape::shapeInfoLength(rearrageRank) - 1] = shape::getOrder(rearrageRank,shape,stride,1);
}
*/
INLINEDEF _CUDA_HD void doPermuteShapeBuffer(int rank,Nd4jLong *shapeBuffer, int *rearrange) {
Nd4jLong *shapeRef = shapeBuffer;
//rank of the rearrange array == rank of shape buffer
int rearrageRank = rank;
Nd4jLong *shape = shape::shapeOf(shapeRef);
Nd4jLong *stride = shape::stride(shapeRef);
auto rearrangeCopy1 = shape::copyOf(rearrageRank, rearrange);
shape::doPermuteSwap(rearrageRank,&shape,rearrangeCopy1);
delete[] rearrangeCopy1;
auto rearrangeCopy2 = shape::copyOf(rearrageRank,rearrange);
shape::doPermuteSwap(rearrageRank, &stride, rearrangeCopy2);
shapeBuffer[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank,shape,stride,1);
shapeBuffer[shape::shapeInfoLength(rank) - 2] = 0;
delete[] rearrangeCopy2;
}
INLINEDEF _CUDA_HD void doPermuteShapeBuffer(int rank, Nd4jLong *shapeBuffer, int *rearrange, Nd4jLong *tmpBuffer) {
Nd4jLong *shapeRef = shapeBuffer;
//rank of the rearrange array == rank of shape buffer
int rearrageRank = rank;
auto shape = shape::shapeOf(shapeRef);
auto stride = shape::stride(shapeRef);
if(shapeBuffer != tmpBuffer)
shape::copyOf(rearrageRank,shapeBuffer, tmpBuffer);
shape::doPermuteSwap(rearrageRank,&shape,rearrange);
shape::doPermuteSwap(rearrageRank,&stride,rearrange);
shapeRef[shapeInfoLength(rank) - 2] = 0;
shapeRef[shape::shapeInfoLength(rank) - 1] = shape::getOrder(rank,shape,stride,1);
}
INLINEDEF _CUDA_HD Nd4jLong *createPermuteIndexes(int originalRank, int *dimension,int dimensionLength) {
int delta = originalRank - dimensionLength;
traceNew(17);
Nd4jLong *ret = new Nd4jLong[originalRank];
for(int i = 0; i < delta; i++) {
ret[i] = i + dimensionLength;
}
for(int i = delta; i < originalRank; i++) {
ret[i] = i - delta;
}
return ret;
}
/**
* Get the ordering for the device
* @param length
* @param shape
* @param stride
* @param elementStride
* @return
*/
INLINEDEF _CUDA_HD char getOrder(int length, Nd4jLong *shape, Nd4jLong *stride, int elementStride) {
int sd = -1;
int dim = -1;
int i = -1;
int cContiguous = 1;
int isFortran = 1;
sd = 1;
for (i = length - 1; i >= 0; --i) {
dim = shape[i];
if (stride[i] != sd) {
cContiguous = 0;
break;
}
/* contiguous, if it got this far */
if (dim == 0) {
break;
}
sd *= dim;
}
/* check if fortran contiguous */
sd = elementStride;
for (i = 0; i < length; ++i) {
dim = shape[i];
if (stride[i] != sd) {
isFortran = 0;
}
if (dim == 0) {
break;
}
sd *= dim;
}
if (isFortran && cContiguous)
return 'a';
else if (isFortran && !cContiguous)
return 'f';
else if (!isFortran && !cContiguous)
return 'c';
else
return 'c';
}
/**
* Ensure that every value in the re arrange
* array is unique
* @param arr
* @param shape
* @param arrLength
* @param shapeLength
* @return
*/
template <typename T>
INLINEDEF _CUDA_HD int checkArrangeArray(T *arr, int arrLength, int shapeLength) {
if (arrLength != shapeLength)
return -1;
for (int i = 0; i < arrLength; i++) {
if (arr[i] >= arrLength || arr[i] < 0)
return -1;
}
for (int i = 0; i < arrLength; i++) {
for (int j = 0; j < arrLength; j++) {
if (i != j && arr[i] == arr[j])
return -1;
}
}
return 1;
}
INLINEDEF _CUDA_HD void traceNew(int id) {
//printf("new happened: [%i]\n", id);
#ifndef __CUDACC__
//fflush(stdout);
#endif
}
/**
* Permute the shape information
* @param info the shape information to permute
* @param rearrange the order to re arrange
* @param rank the rank of the rearrange array
*/
INLINEDEF _CUDA_HD void permute(ShapeInformation **info, int *rearrange, int rank) {
ShapeInformation *infoDeref = *info;
checkArrangeArray(rearrange, rank, rank);
shape::doPermuteSwap(rank, &infoDeref->shape, rearrange);
shape::doPermuteSwap(rank, &infoDeref->stride, rearrange);
char order = getOrder(rank,
infoDeref->shape,
infoDeref->stride,
infoDeref->elementWiseStride);
infoDeref->order = order;
}
/**
* Returns whether the
* given shape is a vector or not
* @param shape the shape of the array
* @param rank the rank of the shape
*/
INLINEDEF _CUDA_HD int isVector(Nd4jLong *shape, int rank) {
if (rank == 0)
return 0;
if (rank == 1)
return 1;
if (rank > 2)
return 0;
else if (rank <= 2) {
if (shape[0] == 1 || shape[1] == 1)
return 1;
}
return 0;
}
INLINEDEF _CUDA_HD bool isLikeVector(Nd4jLong *shapeInfo, int& posOfNonUnityDim) {
int numOfNonUnity = 0;
for(int i = 1; i <= shapeInfo[0]; ++i) {
if(shapeInfo[i] != 1) {
++numOfNonUnity;
posOfNonUnityDim = i-1;
}
}
return numOfNonUnity == 1 && shapeInfo[0] > 2;
}
INLINEDEF _CUDA_HD bool isCommonVector(const Nd4jLong *shapeInfo, int& posOfNonUnityDim) {
if(rank(shapeInfo) > 0 && length(shapeInfo) == 1)
return true;
int numOfNonUnity = 0;
for(int i = 1; i <= shapeInfo[0]; ++i) {
if(shapeInfo[i] != 1) {
++numOfNonUnity;
posOfNonUnityDim = i-1;
}
}
return numOfNonUnity == 1;
}
INLINEDEF _CUDA_H Nd4jLong* detachShape(Nd4jLong *originalShape) {
Nd4jLong *newShape = new Nd4jLong[shape::shapeInfoLength(originalShape)];
memcpy(newShape, originalShape, shape::shapeInfoByteLength(originalShape));
return newShape;
}
INLINEDEF _CUDA_H Nd4jLong* copyShape(Nd4jLong *originalShape) {
Nd4jLong *newShape = new Nd4jLong[shape::shapeInfoLength(originalShape)];
memcpy(newShape, originalShape, shape::shapeInfoByteLength(originalShape));
return newShape;
}
INLINEDEF _CUDA_HD int isVector(const Nd4jLong *shapeInfo) {
return isVector(shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo)), shape::rank(shapeInfo));
}
INLINEDEF _CUDA_HD bool isRowVector(const Nd4jLong *shapeInfo) {
bool isVector = shape::isVector(shapeInfo) == 1;
bool shapeFirstOne = shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo))[0] == 1;
return isVector && shapeFirstOne;
}
INLINEDEF _CUDA_HD bool isColumnVector(Nd4jLong *shapeInfo) {
bool isVector = shape::isVector(shapeInfo) == 1;
bool shapeFirstOne = shape::shapeOf(shapeInfo)[0] == 1;
return isVector && !shapeFirstOne;
}
INLINEDEF _CUDA_HD int oneDimEqualToLength(Nd4jLong *shape, int rank) {
for(int i = 0; i < rank; i++) {
if(shape[i] == shape::prod(shape,rank))
return 1;
}
return 0;
}
INLINEDEF _CUDA_HD int oneDimEqualToLength(Nd4jLong *shapeInfo) {
return oneDimEqualToLength(shape::shapeOf(shapeInfo),shape::rank(shapeInfo));
}
/**
* Returns whether the
* given shape is a vector or not
* @param shape the shape of the array
* @param rank the rank of the shape
*/
INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shape, int rank) {
if (rank > 2)
return 0;
else if (rank <= 2) {
if (shape[0] == 1 || shape[1] == 1)
return 0;
}
return 1;
}
INLINEDEF _CUDA_HD int isMatrix(Nd4jLong *shapeInfo) {
return isMatrix(shape::shapeOf(shapeInfo),shape::rank(shapeInfo));
}
/**
* Returns the shape portion of an information
* buffer
*/
INLINEDEF _CUDA_HD Nd4jLong *shapeOf(Nd4jLong *buffer) {
return buffer + 1;
}
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
template <typename T>
INLINEDEF _CUDA_HD T *copyOf(Nd4jLong length, T *toCopy) {
traceNew(18);
T *ret = new T[length];
return copyOf(length, toCopy, ret);
}
template <typename T>
INLINEDEF _CUDA_HD T* copyOf(Nd4jLong length, T *toCopy, T *ret) {
memcpy(ret, toCopy, sizeof(T)*length);
return ret;
}
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
template <typename T>
INLINEDEF _CUDA_HD void copyTo(Nd4jLong length, T *from, T *to) {
memcpy(to, from, sizeof(T)*length);
}
/**
* Return a copy of a buffer.
* This buffer allocates memory
* that must be freed elsewhere.
*/
INLINEDEF _CUDA_HD void copyTo(int length, Nd4jLong *from, Nd4jLong *to, Nd4jLong *indexes) {
for(int i = 0; i < length; i++) {
to[i] = from[indexes[i]];
}
}
/**
* Permute the given strides
* in the given rearrange order
* @param toPermute the buffer to permute
* @param shapeRank the length of the buffer to permute
* @param rearrange the rearrange order (must be 0 based indexes
* and all must be filled in)
* @return the rearranged array
*/
/*
INLINEDEF _CUDA_HD Nd4jLong *permutedStrides(Nd4jLong *toPermute, int shapeRank, int *rearrange) {
Nd4jLong *strideCopy = copyOf(shapeRank, toPermute);
checkArrangeArray(rearrange, shapeRank, shapeRank);
Nd4jLong *newStride = doPermuteSwap(shapeRank, strideCopy, rearrange);
delete[] strideCopy;
return newStride;
}
*/
/**
* Return the slice (shape + 1 in pointer arithmetic)
* @param shape the shape to take the slice of
* @return the shape array - the first entry
*/
INLINEDEF _CUDA_HD Nd4jLong *slice(Nd4jLong *shape) {
return shape + 1;
}
INLINEDEF _CUDA_HD int slices(Nd4jLong *shapeBuffer) {
return static_cast<int>(shape::shapeOf(shapeBuffer)[0]);
}
INLINEDEF _CUDA_HD Nd4jLong *sliceOfShapeBuffer(Nd4jLong sliceIdx, Nd4jLong *shapeBuffer) {
int rank = shape::rank(shapeBuffer);
int newRank = rank - 1;
if(newRank < 2)
newRank = 2;
Nd4jLong *newShapeBuffer = new Nd4jLong[shape::shapeInfoLength(newRank)];
newShapeBuffer[0] = newRank;
Nd4jLong *currShape = shape::shapeOf(shapeBuffer);
Nd4jLong *currStride = shape::stride(shapeBuffer);
//initialize new shape and stride by taking the shape and stride + 1
//and adding to the shape information
//a slice is always just taking the existing shape and cutting the first index off
//of the shape and stride
Nd4jLong *newShape = shape::shapeOf(newShapeBuffer);
Nd4jLong *newStride = shape::stride(newShapeBuffer);
if(shape::isVector(shapeBuffer)) {
Nd4jLong *currShape = shape::shapeOf(shapeBuffer);
//row vector: slice index 0 is a valid index, just copy the whole thing
if(currShape[0] == 1) {
if(sliceIdx == 0) {
memcpy(newShapeBuffer,shapeBuffer,shape::shapeInfoByteLength(shape::rank(shapeBuffer)));
return newShapeBuffer;
}
}
//column vector: this will be a scalar
else {
delete[] newShapeBuffer;
Nd4jLong *scalar = shape::createScalarShapeInfo();
int offset = shape::offset(shapeBuffer);
scalar[shape::shapeInfoLength(2) - 3] = offset + sliceIdx;
return scalar;
}
}
else if(shape::isMatrix(shapeBuffer)) {
newShape[0] = 1;
newShape[1] = currShape[1];
newStride[0] = 1;
newStride[1] = currStride[1];
}
else {
for(int i = 0; i < newRank; i++) {
newShape[i] = currShape[i + 1];
newStride[i] = currStride[i + 1];
}
}
auto indices = new Nd4jLong[rank];
memset((void *) indices,0,rank * sizeof(Nd4jLong));
indices[0] = sliceIdx;
Nd4jLong offset = shape::getOffset(0,newShape,newStride,indices,rank);
newShapeBuffer[shape::shapeInfoLength(newRank) - 3] = offset;
if(shape::isMatrix(shapeBuffer)) {
newShapeBuffer[shape::shapeInfoLength(newRank) - 2] = currStride[1];
}
else {
newShapeBuffer[shape::shapeInfoLength(newRank) - 2] = shape::elementWiseStride(shapeBuffer);
}
newShapeBuffer[shape::shapeInfoLength(newRank) - 1] = shape::getOrder(newRank,newShape,newStride,1);
delete[] indices;
return newShapeBuffer;
}
/**
* Returns the length of the
* shape information buffer:
* rank * 2 + 3
* @param rank the rank to get the shape
* info length for
* @return rank * 2 + 4
*/
INLINEDEF _CUDA_HD int shapeInfoLength(int rank) {
//FIXME magic numbers
return rank * 2 + 4;
}
INLINEDEF _CUDA_HD int shapeInfoLength(Nd4jLong* shape) {
return shapeInfoLength(static_cast<int>(shape[0]));
}
INLINEDEF _CUDA_HD int shapeInfoLength(const Nd4jLong* shape) {
return shapeInfoLength(static_cast<int>(shape[0]));
}
INLINEDEF _CUDA_HD size_t shapeInfoByteLength(int rank) {
//FIXME magic numbers
return (rank * 2 + 4) * sizeof(Nd4jLong);
}
INLINEDEF _CUDA_HD size_t shapeInfoByteLength(const Nd4jLong* shapeInfo) {
//FIXME magic numbers
return shapeInfoByteLength((int) shapeInfo[0]);
}
/**
* Returns the rank portion of
* an information buffer
*/
INLINEDEF _CUDA_HD int rank(const Nd4jLong *buffer) {
return static_cast<int>(buffer[0]);
}
INLINEDEF _CUDA_HD int rank(const int *buffer) {
return buffer[0];
}
INLINEDEF _CUDA_HD int rank(const unsigned int *buffer) {
return static_cast<int>(buffer[0]);
}
/**
* Converts a raw int buffer of the layout:
* rank
* shape
* stride
* offset
* elementWiseStride
*
* where shape and stride are both straight int pointers
*/
INLINEDEF _CUDA_HD ShapeInformation *infoFromBuffer(Nd4jLong *buffer) {
traceNew(19);
auto info = new ShapeInformation;
auto length = shapeInfoLength(rank(buffer));
auto rank = buffer[0];
//start after rank
info->shape = buffer + 1;
info->stride = buffer + (1 + rank);
info->rank = rank;
info->offset = buffer[length - 3];
info->elementWiseStride = buffer[length - 2];
Nd4jLong *stride = buffer + 1 + rank;
info->stride = stride;
info->order = (char) buffer[length - 1];
return info;
}
/**
* Returns the stride portion of an information
* buffer
*/
INLINEDEF _CUDA_HD Nd4jLong *stride(const Nd4jLong *buffer) {
return const_cast<Nd4jLong*>(buffer) + (1 + rank(buffer));
}
INLINEDEF _CUDA_HD bool isEmpty(const Nd4jLong *shapeInfo) {
return ((shape::extra(const_cast<Nd4jLong*>(shapeInfo)) & ARRAY_EMPTY) == ARRAY_EMPTY);
}
/**
* Compute the length of the given shape
*/
INLINEDEF _CUDA_HD Nd4jLong length(const Nd4jLong *shapeInfo) {
int rank = shape::rank(shapeInfo);
if (rank == 0) {
if (isEmpty(shapeInfo))
return 0L;
else
return 1L;
}
if (rank == 1)
return shapeInfo[1];
return shape::prodLong(shape::shapeOf(const_cast<Nd4jLong*>(shapeInfo)), rank);
}
INLINEDEF _CUDA_HD Nd4jLong length(std::initializer_list<int>& shape) {
Nd4jLong ret = 1;
for (auto v : shape) {
ret *= v;
}
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong length(std::initializer_list<Nd4jLong>& shape) {
Nd4jLong ret = 1;
for (auto v : shape) {
ret *= v;
}
return ret;
}
/***
* Returns the offset
* portion of an information buffer
*/
INLINEDEF _CUDA_HD Nd4jLong offset(Nd4jLong *buffer) {
return buffer[shape::shapeInfoLength(shape::rank(buffer)) - 3];
}
INLINEDEF _CUDA_HD Nd4jLong& extra(Nd4jLong *buffer) {
return buffer[shape::shapeInfoLength(shape::rank(buffer)) - 3];
}
/**
* Returns the ordering
* for this shape information buffer
*/
INLINEDEF _CUDA_HD char order(const Nd4jLong *buffer) {
//FIXME magic numbers
return static_cast<char>(buffer[(buffer[0] * 2 + 4) - 1]);
}
/**
* Returns the element wise stride for this information
* buffer
*/
INLINEDEF _CUDA_HD Nd4jLong elementWiseStride(const Nd4jLong *buffer) {
return buffer[shapeInfoLength(static_cast<int>(buffer[0])) - 2];
}
/**
* Returns the element wise stride for this information
* buffer relative to a dimension and reduction index
*/
INLINEDEF _CUDA_HD Nd4jLong reductionIndexElementWiseStride(Nd4jLong* buffer, int* dimension, int dimensionLength) {
if(dimensionLength > 1) {
if(shape::order(buffer) == 'f') {
/**
* The element wise stride belongs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along arr
* we can use arr.stride(1) as a representation
* along which to iterate.
*/
if(shape::shapeOf(buffer)[dimension[dimensionLength - 1]] != 1) {
//int tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]];
//return tadElementWiseStride;
auto tadElementWiseStride = shape::stride(buffer)[dimension[0]];
return tadElementWiseStride;
}
return 1;
}
else {
/**
* The element wise stride belongs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along arr
* we can use arr.stride(1) as a representation
* along which to iterate.
*/
if(shape::shapeOf(buffer)[dimension[dimensionLength - 1]] != 1) {
auto tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]];
return tadElementWiseStride;
}
return 1;
}
}
else {
if(shape::order(buffer) == 'f') {
/**
* The element wise stride belongs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along arr
* we can use arr.stride(1) as a representation
* along which to iterate.
*/
auto tadElementWiseStride = shape::stride(buffer)[dimension[0]];
return tadElementWiseStride;
}
else {
/**
* The element wise stride belongs to a reduction index.
* When used out of order, we can get rid of the data
* dependencies and rely on using the max dimension
* specified for stride instead.
* Say we take the sum(0,1) along arr
* we can use arr.stride(1) as a representation
* along which to iterate.
*/
auto tadElementWiseStride = shape::stride(buffer)[dimension[dimensionLength - 1]];
return tadElementWiseStride;
}
}
}
/**
* Returns whether
* the given shape info buffer
* represents a scalar shape
*/
INLINEDEF _CUDA_HD int isScalar(Nd4jLong *info) {
const int rank = shape::rank(info);
if(rank > 2)
return 0;
if(rank == 0)
return 1;
if(rank == 1)
return shape::shapeOf(info)[0] == 1;
if(rank == 2)
return shape::shapeOf(info)[0] == 1 && shape::shapeOf(info)[1] == 1;
return 0;
}
/**
* Returns whether
* the given shape information
* represents a scalar
* shape or not
*/
INLINEDEF _CUDA_HD int isScalar(volatile ShapeInformation *info) {
const int rank = info->rank;
if(rank > 2)
return 0;
if(rank == 1)
return info->shape[0] == 1;
if(rank == 2)
return info->shape[0] == 1 && info->shape[1] == 1;
return 0;
}
/**
* Return a copy of this array with the
* given index omitted
*
* @param data the data to copy
* @param indexes the index of the item to remove
* @param dataLength the length of the data array
* @param indexesLength the length of the data array
* @return the new array with the omitted
*
* item
*/
template <typename T1, typename T2>
INLINEDEF _CUDA_HD void removeIndex(T1* data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength, T1 *ret) {
int count = 0;
int absLength = dataLength - indexesLength;
for (int i = 0; i < dataLength && count < absLength; i++) {
int contains = 0;
for (int j = 0; j < indexesLength; j++) {
if (i == indexes[j]) {
contains = 1;
break;
}
}
if (!contains) {
ret[count] = data[i];
count++;
}
}
}
/**
* Return a copy of this array with the
* given index omitted
*
* @param data the data to copy
* @param indexes the index of the item to remove
* @param dataLength the length of the data array
* @param indexesLength the length of the data array
* @return the new array with the omitted
*
* item
*/
template <typename T1, typename T2>
INLINEDEF _CUDA_HD T1* removeIndex(T1 *data, T2 *indexes, Nd4jLong dataLength, Nd4jLong indexesLength) {
auto lengthOfArr = dataLength - indexesLength;
if(lengthOfArr < 0) {
printf("Remove index call created a <= 0 length array. This was likely not intended.");
}
auto ret = new T1[lengthOfArr];
memset(ret,0,sizeof(T1) * lengthOfArr);
removeIndex<T1, T2>(data, indexes, dataLength, indexesLength, ret);
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong* everyIndexBut(Nd4jLong *indexes,int indexesLength,int begin,int end) {
int len = end - indexesLength;
traceNew(20);
auto ret = new Nd4jLong[len];
int retIdx = 0;
//not here that we do 0 based indexing for end - this assumes things like:
//0 to 4 are specified
for(int i = begin; i < end ; i++) {
bool found = false;
for(int j = 0; j < indexesLength; j++) {
if(indexes[j] == i) {
found = true;
break;
}
}
if(!found) {
ret[retIdx++] = i;
}
}
return ret;
}
/**
* Computes the offset for accessing
* a global element given the shape information
* and the offset to be read.
*/
#ifdef __CUDACC__
INLINEDEF __device__ int tadOffset(ShapeInformation *xInfo, int offset) {
return offset + threadIdx.x * xInfo->elementWiseStride;
}
#endif
/**
* Returns a shape
* forces the given length to be 2.
* @param shape the shape to modify
* @param dimension the dimension (row or column)
* for the shape to be returned as
* @return the new shape
*/
INLINEDEF _CUDA_HD Nd4jLong *ensureVectorShape(Nd4jLong *shape, int dimension) {
traceNew(21);
Nd4jLong *ret = new Nd4jLong[2];
if (dimension == 0) {
ret[0] = 1;
ret[1] = shape[0];
} else {
ret[0] = shape[0];
ret[1] = 1;
}
return ret;
}
/**
* Returns a shape
* forces the given length to be 2.
* @param shape the shape to modify
* @param dimension the dimension (row or column)
* for the shape to be returned as
* @return the new shape
*/
INLINEDEF _CUDA_HD Nd4jLong *ensureVectorShape(Nd4jLong *shape) {
return ensureVectorShape(shape, 0);
}
/**
* This method does STRICT comparison for two shape buffers
*
* @param shape
* @return
*/
INLINEDEF _CUDA_HD bool equalsStrict(const Nd4jLong *shapeA, const Nd4jLong *shapeB) {
if (shapeA[0] != shapeB[0])
return false;
if (shapeA[0] == 0)
return true;
// we do full comparison here
int length = shape::shapeInfoLength(shapeA[0]);
for (int e = 1; e < length; e++)
if (shapeA[e] != shapeB[e])
return false;
return true;
}
INLINEDEF _CUDA_HD bool haveSameOffsets(const Nd4jLong *shapeA, const Nd4jLong *shapeB) {
if (shapeA[0] != shapeB[0])
return false;
if (shapeA[0] == 0)
return true;
// we do full comparison here
int length = shape::shapeInfoLength(shapeA[0]);
for (int e = 1; e < length; e++) {
if(e == (length - 3)) continue; // type position, neglect it
if (shapeA[e] != shapeB[e])
return false;
}
return true;
}
INLINEDEF _CUDA_HD int sizeAt(const Nd4jLong *shape, const int dim) {
if (dim >= 0)
return shape[1+dim];
else
return shape[1+(rank(shape) + dim)];
}
/**
* This method does SOFT comparison for two shape buffers, we compare only rank & shapes
*
* @param shape
* @return
*/
INLINEDEF _CUDA_HD bool equalsSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB) {
if (shapeA[0] != shapeB[0])
return false;
if (shapeA[0] == 0)
return true;
// we compare only shapes, and ignoring stride & ews
auto length = shapeA[0];
for (int e = 1; e <= length; e++)
if (shapeA[e] != shapeB[e])
return false;
return true;
}
INLINEDEF _CUDA_HD bool equalsTypesAndShapesSoft(const Nd4jLong *shapeA, const Nd4jLong *shapeB) {
return equalsSoft(shapeA, shapeB) && shapeA[shapeInfoLength(shapeA) - 3] == shapeB[shapeInfoLength(shapeB) - 3];
}
/**
* Generate an int buffer
* up to the given length
* at the specified increment
*
*/
template <typename T>
INLINEDEF _CUDA_HD T* range(int from, int to, int increment) {
int diff = nd4j::math::nd4j_abs<int>(from - to);
int retLength = diff / increment;
T *ret;
traceNew(22);
if(diff / increment < 1)
ret = new T[1];
else
ret = new T[diff / increment];
if (from < to) {
int count = 0;
for (int i = from; i < to; i += increment) {
if (count >= retLength)
break;
ret[count++] = i;
}
} else if (from > to) {
int count = 0;
for (int i = from - 1; i >= to; i -= increment) {
if (count >= retLength)
break;
ret[count++] = i;
}
}
return ret;
}
/**
* Generate a range
* beginning at from and ending at to
* incrementing by 1
* @param from the start
* @param to the end
* @return the int array starting at from and ending at to
*/
template <typename T>
INLINEDEF _CUDA_HD T* range(int from, int to) {
return range<T>(from, to, 1);
}
/**
* Keep the given indexes in the data
* @param data
* @param index
* @param indexLength
* @param dataLength
* @return
*/
INLINEDEF _CUDA_HD Nd4jLong *keep(volatile Nd4jLong *data, int* index, int indexLength, int dataLength) {
traceNew(23);
Nd4jLong *ret = new Nd4jLong[indexLength];
int count = 0;
for (int i = 0; i < dataLength; i++) {
int contains = 0;
for (int j = 0; j < indexLength; j++) {
if (i == index[j]) {
contains = 1;
break;
}
}
if (contains)
ret[count++] = data[i];
}
return ret;
}
/**
* Generate a reverse
* copy of the data
*/
template <typename T>
INLINEDEF _CUDA_HD T* reverseCopy(T *data, Nd4jLong length) {
if (length < 1)
return nullptr;
traceNew(24);
T *copy = new T[length];
for (Nd4jLong i = 0; i <= length / 2; i++) {
T temp = data[i];
copy[i] = data[length - i - 1];
copy[length - i - 1] = temp;
}
return copy;
}
template <typename T>
INLINEDEF _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong length) {
if (length < 1)
return;
for (Nd4jLong i = 0; i <= length / 2; i++) {
T temp = from[i];
to[i] = from[length - i - 1];
to[length - i - 1] = temp;
}
}
template <typename T>
INLINEDEF _CUDA_HD void reverseCopyTo(T *from, T *to, Nd4jLong *indexes, Nd4jLong length) {
if (length < 1)
return;
for (Nd4jLong i = 0; i <= length / 2; i++) {
T temp = from[indexes[i]];
to[i] = from[indexes[length - i - 1]];
to[length - i - 1] = temp;
}
}
/**
*
* @param arr1
* @param arr1Length
* @param arr2
* @param arr2Length
* @return
*/
template <typename T>
INLINEDEF _CUDA_HD T* concat(T* arr1, Nd4jLong arr1Length, T* arr2, Nd4jLong arr2Length) {
traceNew(25);
T *ret = new T[arr1Length + arr2Length];
std::memcpy(ret, arr1, arr1Length * sizeof(T));
std::memcpy(ret + arr1Length, arr2, arr2Length * sizeof(T));
return ret;
}
/**
*
* @param numArrays
* @param numTotalElements
* @param arr
* @param lengths
* @return
*/
template <typename T>
INLINEDEF _CUDA_HD T *concat(Nd4jLong numArrays, Nd4jLong numTotalElements, T **arr, Nd4jLong *lengths) {
T* ret = new T[numTotalElements];
Nd4jLong count = 0;
for (Nd4jLong i = 0; i < numArrays; i++) {
for (Nd4jLong j = 0; j < lengths[i]; j++) {
ret[count++] = arr[i][j];
}
}
return ret;
}
/**
* Get the length per slice of the
* given shape and the dimension
* @param rank the rank of the shape
* @param shape the shape of to get
* the length per slice for
* @param dimension the dimension to
* get the length per slice for
* @param dimensionLength the length of the dimension array
* @return the length per slice of the given shape
* along the given dimension
*/
INLINEDEF _CUDA_HD Nd4jLong lengthPerSlice(int rank, Nd4jLong *shape, int* dimension, int dimensionLength) {
if(shape::isVector(shape,rank)) {
//return total length for row vectors
if(dimensionLength == 1 && shape[0] == 1) {
return shape::prod(shape,rank);
}
}
else if(rank == dimensionLength)
return shape::prod(shape,rank);
int absSelta = nd4j::math::nd4j_abs<int>(rank - dimensionLength);
traceNew(27);
auto ret2 = shape::removeIndex<Nd4jLong>(shape, dimension, rank, dimensionLength);
auto ret = prodLong(ret2, absSelta);
delete[] ret2;
return ret;
}
/**
* calculates the offset for a tensor
* @param index
* @param arr
* @param tensorShape
* @return
*/
INLINEDEF _CUDA_HD Nd4jLong sliceOffsetForTensor(int rank, int index, Nd4jLong *shape, Nd4jLong *tensorShape, int tensorShapeLength, int* dimension, int dimensionLength) {
auto tensorLength = prodLong(tensorShape, tensorShapeLength);
auto lengthPerSlice2 = lengthPerSlice(rank, shape, dimension, dimensionLength);
if (lengthPerSlice2 <= 0) {
return 0;
}
Nd4jLong offset = index * tensorLength / lengthPerSlice2;
return offset;
}
/**
* calculates the offset for a tensor
* @param index
* @param arr
* @param tensorShape
* @return
*/
INLINEDEF _CUDA_HD Nd4jLong sliceOffsetForTensor(int index,int tensorLength,int lengthPerSlice2) {
Nd4jLong offset = index * tensorLength / lengthPerSlice2;
return offset;
}
#ifdef __CUDACC__
/**
* Computes the offset for accessing
* a global element given the shape information
* and the offset to be read.
*/
INLINEDEF _CUDA_D int tadOffset(Nd4jLong *xInfo, int offset) {
return offset + threadIdx.x * elementWiseStride(xInfo);
}
#endif
/**
* Computes the number
* of tensors along
* a given dimension
*/
INLINEDEF _CUDA_HD Nd4jLong tensorsAlongDimension(volatile int rank, volatile int length,
volatile Nd4jLong *shape, int *dimension, int dimensionLength) {
Nd4jLong *tensorShape = shape::keep(shape, dimension, dimensionLength, rank);
Nd4jLong ret = length / shape::prodLong(tensorShape, dimensionLength);
delete[] tensorShape;
return ret;
}
/**
* Computes the number
* of tensors along
* a given dimension
*/
INLINEDEF _CUDA_HD Nd4jLong tensorsAlongDimension(Nd4jLong *shapeInfo, int *dimension, int dimensionLength) {
Nd4jLong *keepShape = shape::shapeOf(shapeInfo);
Nd4jLong *tensorShape = shape::keep(keepShape, dimension, dimensionLength, rank(shapeInfo));
Nd4jLong ret = shape::length(shapeInfo) / shape::prodLong(tensorShape, dimensionLength);
delete[] tensorShape;
return ret;
}
/**
* Get an offset for retrieval
* from a data buffer
* based on the given
* shape stride and given indices
* @param baseOffset the offset to start from
* @param shape the shape of the array
* @param stride the stride of the array
* @param indices the indices to iterate over
* @return the double at the specified index
*/
INLINEDEF _CUDA_HD Nd4jLong getOffset(Nd4jLong baseOffset, const Nd4jLong *shape, const Nd4jLong *stride, const Nd4jLong *indices, int rank) {
Nd4jLong offset = baseOffset;
for(int i = 0; i < rank; i++) {
if(indices[i] >= shape[i] && shape[i] != 1) {
#ifdef __CUDA_ARCH__
printf("D: Index %i [%lld] must not be >= shape[%lld].\n", i,indices[i],shape[i]);
#else
printf("H: Index %i [%lld] must not be >= shape[%lld].\n", i, (long long) indices[i], (long long) shape[i]);
#endif
#ifdef __CUDA_ARCH__
//if (threadIdx.x == 0 && blockIdx.x == 0)
// printShapeInfoLinear("getOffsetFailed", rank, shape, stride);
#endif
return -1;
}
if(shape[i] != 1) {
offset += indices[i] * stride[i];
}
}
return offset;
}
/**
* Returns the tensor along dimension
* for the given block index
* @param blockSize
* @param blockIdx
* @param i
* @return
*/
INLINEDEF _CUDA_HD int tadForBlockIndex(int blockSize, int blockIdx, int i) {
return blockIdx + i * blockSize;
}
/**
* Computes the number of tads per block
*
*/
INLINEDEF _CUDA_HD int tadsPerBlock(int blockSize, int tads) {
return nd4j::math::nd4j_ceil<double, int>(tads / (double) blockSize);
}
/**
* Returns a shape buffer
* for the shape information metadata.
*/
INLINEDEF _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info) {
traceNew(29);
auto ret = new Nd4jLong[shapeInfoLength(info->rank)];
int count = 1;
int rank = info->rank;
ret[0] = info->rank;
for (int i = 0; i < rank; i++) {
ret[count++] = info->shape[i];
}
for (int i = 0; i < rank; i++) {
ret[count++] = info->stride[i];
}
ret[count++] = info->offset;
ret[count++] = info->elementWiseStride;
ret[count] = info->order;
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong *toShapeBuffer( ShapeInformation *info, Nd4jLong* ret) {
int count = 1;
int rank = info->rank;
ret[0] = info->rank;
if (ret[0] == 0) {
ret[1] = 0;
ret[2] = 1;
ret[3] = 99;
return ret;
}
for (int i = 0; i < rank; i++) {
ret[count++] = info->shape[i];
}
for (int i = 0; i < rank; i++) {
ret[count++] = info->stride[i];
}
ret[count++] = info->offset;
ret[count++] = info->elementWiseStride;
ret[count++] = info->order;
return ret;
}
INLINEDEF _CUDA_HD void printIntArray(const Nd4jLong *arr, const int length) {
for(int i = 0; i < length; i++) {
printf(" %lld ", (long long) arr[i]);
}
printf("\n");
}
INLINEDEF _CUDA_HD void printIntArray(const int *arr, const int length) {
for(int i = 0; i < length; i++) {
printf(" %i ", arr[i]);
}
printf("\n");
}
INLINEDEF _CUDA_HD void printShapeInfo(Nd4jLong *shapeInfo) {
int rank = shape::rank(shapeInfo);
Nd4jLong *shape = shape::shapeOf(shapeInfo);
printf("Rank %d\n",rank);
printf("Shape:\n");
for(int i = 0; i < rank; i++) {
printf(" %lld ",(long long) shape[i]);
}
printf("\n");
Nd4jLong *stride = shape::stride(shapeInfo);
printf("Stride:\n");
for(int i = 0; i < rank; i++) {
printf(" %lld ", (long long) stride[i]);
}
printf("\n");
printf("Order %c\n",shape::order(shapeInfo));
}
INLINEDEF _CUDA_HD void printShapeInfoLinear(Nd4jLong *shapeInfo) {
int rank = shape::rank(shapeInfo);
int lim = shape::shapeInfoLength(rank);
printf("ShapeInfo: [");
for (int i = 0; i < lim; i++) {
printf("%lld", (long long) shapeInfo[i]);
if (i < lim - 1) {
printf(", ");
}
}
printf("]\n");
#ifndef __CUDA_ARCH__
fflush(stdout);
#endif
}
INLINEDEF _CUDA_HD void printShapeInfoLinear(const char *msg, int rank, Nd4jLong *shape, Nd4jLong *strides) {
printf("%s : [", msg);
for (int i = 0; i < rank; i++) {
printf("%lld, ", (long long) shape[i]);
}
for (int i = 0; i < rank; i++) {
printf("%lld", (long long) strides[i]);
if (i < rank - 1)
printf(", ");
}
printf("]\n");
#ifndef __CUDA_ARCH__
fflush(stdout);
#endif
}
INLINEDEF _CUDA_HD void printShapeInfoLinear(const char *msg, Nd4jLong *shapeInfo) {
int rank = shape::rank(shapeInfo);
int lim = shape::shapeInfoLength(rank);
printf("%s : [", msg);
for (int i = 0; i < lim; i++) {
printf("%lld", (long long) shapeInfo[i]);
if (i < lim - 1) {
printf(", ");
}
}
printf("]\n");
#ifndef __CUDACC__
fflush(stdout);
#endif
}
template <typename T>
INLINEDEF _CUDA_HD void printArray(void *varr,int length, const char * message) {
auto arr = reinterpret_cast<T*>(varr);
if (message != nullptr)
printf("%s: [", message);
else
printf("Array: [");
for (int i = 0; i < length; i ++) {
printf("%f", (float) arr[i]);
if (i + 1 < length) printf(", ");
}
printf("]\n");
#ifndef __CUDACC__
fflush(stdout);
#endif
}
INLINEDEF _CUDA_HD void printArray(float *arr,int length) {
printf("Array: [");
for (int i = 0; i < length; i ++) {
printf("%f", arr[i]);
if (i + 1 < length) printf(", ");
}
printf("]\n");
}
/**
* Given an linear index, element wise stride
* and the length of each tad
* map a linear index to a tad
* @param i the index to map
* @param the element wise stride for the tads
* @param numElementsPerTad the number of elements
* per tad
*/
INLINEDEF _CUDA_HD int tadIndex(int i, int elementWiseStride, int numElementsPerTad) {
return i / (numElementsPerTad * elementWiseStride);
}
/**
* Map a tad to a
* reduction index.
* @param tadIndexForOriginal the original tad index for the
* split up problem (eg: split is dimension 3 mapping to a 2,3 problem)
* @param tadsForReduced the number of tads for the shrunk down problem (eg: 2,3)
* @param tadsForOriginal the number of tads for the smaller problem (eg: 3)
*/
INLINEDEF _CUDA_HD int reductionIndexForTad(int tadIndexForOriginal, int tadsForReduced,
int tadsForOriginal) {
if (tadIndexForOriginal == 0)
return 0;
return tadIndexForOriginal / (tadsForOriginal / tadsForReduced);
}
INLINEDEF _CUDA_HD void transposeInplace(Nd4jLong *shapeBuffer) {
int rank = shape::rank(shapeBuffer);
Nd4jLong *shape = shape::shapeOf(shapeBuffer);
Nd4jLong *strides = shape::stride(shapeBuffer);
// swap shape
for (int e = 0; e < rank / 2; e++) {
int idx1 = rank - e - 1;
int idx2 = e;
int tmp = shape[idx2];
shape[idx2] = shape[idx1];
shape[idx1] = tmp;
}
// swap strides
for (int e = 0; e < rank / 2; e++) {
int idx1 = rank - e - 1;
int idx2 = e;
int tmp = strides[idx2];
strides[idx2] = strides[idx1];
strides[idx1] = tmp;
}
if (shape::order(shapeBuffer) == 'c')
shapeBuffer[shape::shapeInfoLength(shapeBuffer) - 1] = 102;
else
shapeBuffer[shape::shapeInfoLength(shapeBuffer) - 1] = 99;
}
/**
* Tad index for linear
* @param linearIndex
* @param tadLength
* @return
*/
INLINEDEF _CUDA_HD int tadIndexForLinear(int linearIndex, int tadLength) {
return linearIndex % tadLength;
}
/**
* Computes the number of tads
* per reduce index for the
* reduction tad.
*/
INLINEDEF _CUDA_HD int tadsPerReduceIndex(int tadsForReduce, int tadsForOriginal) {
return tadsForOriginal / tadsForReduce;
}
/**
* Maps a linear index to a reduction index
* @param i the linear index to map
* @param elementWiseStride the element wise stride
* for the multiple problem
* @param tadNum the number of tads for the shrunken problem
* @param originalTadNum the tad number for the reduced version of the problem
*/
INLINEDEF _CUDA_HD int reductionIndexForLinear(int i, int elementWiseStride, int numElementsPerTad,
int tadNum, int originalTadNum) {
int tad = tadIndex(i, elementWiseStride, numElementsPerTad);
return reductionIndexForTad(tad, tadNum, originalTadNum);
}
INLINEDEF _CUDA_HD Nd4jLong* createScalarShapeInfo() {
traceNew(30);
auto shape = new Nd4jLong[1];
shape[0] = 1;
auto stride = new Nd4jLong[1];
stride[0] = 1;
auto shapeInformation2 = new ShapeInformation();
shapeInformation2->rank = 1;
shapeInformation2->offset = 0;
shapeInformation2->stride = stride;
shapeInformation2->shape = shape;
shapeInformation2->elementWiseStride = 1;
shapeInformation2->order = 99;
Nd4jLong *ret = shape::toShapeBuffer(shapeInformation2);
delete shapeInformation2;
delete[] shape;
delete[] stride;
return ret;
}
INLINEDEF _CUDA_HD Nd4jLong* createScalarShapeInfo(Nd4jLong *ret) {
ret[0] = 2;
ret[1] = 1;
ret[2] = 1;
ret[3] = 1;
ret[4] = 1;
ret[5] = 0;
ret[6] = 1;
ret[7] = 99;
return ret;
}
/**
* Returns the prod of the data
* up to the given length
*/
INLINEDEF _CUDA_HD int prod(Nd4jLong *data, int length) {
int prod = 1;
for (int i = 0; i < length; i++) {
prod *= data[i];
}
return prod;
}
/**
* Returns the prod of the data
* up to the given length
*/
INLINEDEF _CUDA_HD Nd4jLong prodLong(const Nd4jLong *data, int length) {
Nd4jLong prod = 1;
for (int i = 0; i < length; i++) {
prod *= data[i];
}
return prod;
}
INLINEDEF _CUDA_HD int rearMostLeftOverItem(Nd4jLong *data, Nd4jLong *dimension,int dimensionLength) {
Nd4jLong *stride = shape::stride(data);
//corner case: return the final item when its greater than the max, since its guaranteed to be left over
//note here that strides are interpreted in reverse for tad
//start from the front rather than the back
int rank = shape::rank(data);
if(shape::order(data) == 'f') {
int dimIdx = dimensionLength - 1;
for(int i = rank - 1; i >= 0; i--) {
/**
* Needs to find an algorithm such that:
* looping backwards will find the highest dimension left
* that isn't included in the dimension index list.
*
* This can also be thought of as the last item of the first index
* of the difference between the full list of indices and
* the dimension indices.
*
* We should avoid excessive object creation by only looping backwards.
*/
if(dimension[dimIdx--] != i) {
int ret = stride[i];
return ret;
}
}
}
else {
int dimIdx = dimensionLength - 1;
for(int i = rank - 1; i >= 0; i--) {
/**
* Needs to find an algorithm such that:
* looping backwards will find the highest dimension left
* that isn't included in the dimension index list.
*
* This can also be thought of as the last item of the first index
* of the difference between the full list of indices and
* the dimension indices.
*
* We should avoid excessive object creation by only looping backwards.
*/
if(dimension[dimIdx--] != i) {
int ret = stride[i];
return ret;
}
}
}
int ret = stride[0];
return ret;
}
#ifdef __CUDACC__
__device__ INLINEDEF void sweepShapeInfoBuffer(Nd4jLong *shapeInfoBuffer, Nd4jLong *targetBuffer) {
// we read first element, to find out length of our shapeInfoBuffer
int rank = shapeInfoBuffer[0];
int len = shape::shapeInfoLength(rank);
for (int i = threadIdx.x; i < len; i += blockDim.x)
targetBuffer[i] = shapeInfoBuffer[i];
}
#endif
INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpy(cnpy::NpyArray arr) {
return shape::shapeBufferOfNpy(arr.shape.size(),(unsigned int*) arr.shape.data(),arr.fortranOrder);
}
// INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpyBuffer(char *buffer) {
// unsigned Nd4jLong *shape;
// unsigned int ndims, wordSize;
// bool fortranOrder;
// cnpy::parseNpyHeaderStr(std::string(buffer),wordSize,shape,ndims,fortranOrder);
// Nd4jLong * ret = shape::shapeBufferOfNpy(ndims,shape,fortranOrder);
// delete[] shape;
// return ret;
// }
INLINEDEF _CUDA_HD Nd4jLong *shapeBufferOfNpy(int rank, unsigned int* shape,bool fortranOrder) {
if(fortranOrder) {
Nd4jLong *shapeBufferRet = shape::shapeBufferFortran(rank, nd4j::FLOAT32,(Nd4jLong *) shape);
return shapeBufferRet;
}
else {
Nd4jLong *newShape = new Nd4jLong[rank];
for(int i = 0; i < rank; i++) {
newShape[i] = shape[i];
}
Nd4jLong *shapeBufferRet = shape::shapeBuffer(rank, nd4j::FLOAT32, newShape);
delete[] newShape;
return shapeBufferRet;
}
}
INLINEDEF _CUDA_HD bool strideDescendingCAscendingF(const Nd4jLong *shapeBuffer) {
int rank = shape::rank(shapeBuffer);
Nd4jLong *strides = shape::stride(const_cast<Nd4jLong*>(shapeBuffer));
char order = shape::order(shapeBuffer);
if (shape::isRowVector(shapeBuffer) && strides[0] == 1 && strides[1] == 1)
return true;
if (order == 'c') {
for (int i = 1; i < rank; i++)
if (strides[i-1] <= strides[i])
return false;
return true;
} else if (order == 'f') {
for (int i = 1; i < rank; i++)
if (strides[i-1] >= strides[i])
return false;
return true;
} else {
printf("Unknown order for array!\n");
return false;
}
}
INLINEDEF _CUDA_HD bool isStrideSimple(const Nd4jLong* shapeInfo) {
return (order(shapeInfo) == 'c') && (elementWiseStride(shapeInfo) > 0);
}
//////////////////////////////////////////////////////////////////////////
// copy-past from java hasDefaultStridesForShape function
INLINEDEF _CUDA_HD bool areStridesDefault(const Nd4jLong* shapeInfo) {
const int rank = shape::rank(shapeInfo);
if(rank == 0)
return true;
if(!strideDescendingCAscendingF(shapeInfo))
return false;
Nd4jLong defaultShapeInfo[MAX_SHAPEINFOLENGTH];
memcpy(defaultShapeInfo, shapeInfo, shape::shapeInfoByteLength(shapeInfo));
shape::updateStrides(defaultShapeInfo, shape::order(shapeInfo));
bool result = true;
for(int i = rank+1; i <= 2*rank; ++i)
if(defaultShapeInfo[i] != shapeInfo[i]) {
result = false;
break;
}
return result;
}
// INLINEDEF _CUDA_H bool reshapeCF(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShapeOf, bool isFOrder, Nd4jLong* target) {
// int oldnd;
// Nd4jLong* olddims = shape::copyOf(oldRank, shape::shapeOf(oldShape));
// Nd4jLong* oldstrides = shape::copyOf(oldRank, shape::stride(oldShape));
// int np, op, last_stride;
// int oi, oj, ok, ni, nj, nk;
// Nd4jLong* newStrides = new Nd4jLong[newRank];
// oldnd = 0;
// /*
// * Remove axes with dimension 1 from the old array. They have no effect
// * but would need special cases since their strides do not matter.
// */
// for (oi = 0; oi < oldRank; oi++) {
// if (shape::shapeOf(oldShape)[oi] != 1) {
// olddims[oldnd] = shape::shapeOf(oldShape)[oi];
// oldstrides[oldnd] = shape::stride(oldShape)[oi];
// oldnd++;
// }
// }
// np = 1;
// for (ni = 0; ni < newRank; ni++) {
// np *= newShapeOf[ni];
// }
// op = 1;
// for (oi = 0; oi < oldnd; oi++) {
// op *= olddims[oi];
// }
// if (np != op) {
// /* different total sizes; no hope */
// delete[] olddims;
// delete[] oldstrides;
// delete[] newStrides;
// return false;
// }
// if (np == 0) {
// /* the current code does not handle 0-sized arrays, so give up */
// delete[] olddims;
// delete[] oldstrides;
// delete[] newStrides;
// return false;
// }
// /* oi to oj and ni to nj give the axis ranges currently worked with */
// oi = 0;
// oj = 1;
// ni = 0;
// nj = 1;
// while (ni < newRank && oi < oldnd) {
// np = newShapeOf[ni];
// op = olddims[oi];
// while (np != op) {
// if (np < op) {
// /* Misses trailing 1s, these are handled later */
// np *= newShapeOf[nj++];
// } else {
// op *= olddims[oj++];
// }
// }
// /* Check whether the original axes can be combined */
// for (ok = oi; ok < oj - 1; ok++) {
// if (isFOrder) {
// if (oldstrides[ok + 1] != olddims[ok] * oldstrides[ok]) {
// /* not contiguous enough */
// delete[] olddims;
// delete[] oldstrides;
// delete[] newStrides;
// return false;
// }
// } else {
// /* C order */
// if (oldstrides[ok] != olddims[ok + 1] * oldstrides[ok + 1]) {
// /* not contiguous enough */
// delete[] olddims;
// delete[] oldstrides;
// delete[] newStrides;
// return false;
// }
// }
// }
// /* Calculate new strides for all axes currently worked with */
// if (isFOrder) {
// newStrides[ni] = oldstrides[oi];
// for (nk = ni + 1; nk < nj; nk++) {
// newStrides[nk] = newStrides[nk - 1] * newShapeOf[nk - 1];
// }
// } else {
// /* C order */
// newStrides[nj - 1] = oldstrides[oj - 1];
// for (nk = nj - 1; nk > ni; nk--) {
// newStrides[nk - 1] = newStrides[nk] * newShapeOf[nk];
// }
// }
// ni = nj++;
// oi = oj++;
// }
// if (ni >= 1) {
// last_stride = newStrides[ni - 1];
// } else {
// last_stride = shape::elementWiseStride(oldShape);
// }
// if (isFOrder && ni >= 1) {
// last_stride *= newShapeOf[ni - 1];
// }
// for (nk = ni; nk < newRank; nk++) {
// newStrides[nk] = last_stride;
// }
// target[0] = newRank;
// int cnt = 1;
// for (int e = 0; e < newRank; e++)
// target[cnt++] = newShapeOf[e];
// for (int e = 0; e < newRank; e++)
// target[cnt++] = newStrides[e];
// target[shape::shapeInfoLength(newRank) - 3] = 0;
// target[shape::shapeInfoLength(newRank) - 2] = 0;
// target[shape::shapeInfoLength(newRank) - 1] = isFOrder ? 102 : 99;
// nd4j::ArrayOptions::setDataType(target, nd4j::ArrayOptions::dataType(oldShape));
// delete[] olddims;
// delete[] oldstrides;
// delete[] newStrides;
// return true;
// }
INLINEDEF _CUDA_H bool reshapeCF(const int oldRank, const Nd4jLong* oldShapeInfo, const int newRank, const Nd4jLong* newShape, const bool isFOrder, Nd4jLong* newShapeInfo) {
const int newOrder = isFOrder ? 102 : 99;
const int oldOrder = oldShapeInfo[2 * oldRank + 3];
newShapeInfo[0] = newRank;
memcpy(newShapeInfo + 1, newShape, newRank * sizeof(Nd4jLong));
if(newOrder == oldOrder && shape::elementWiseStride(oldShapeInfo) == 1) {
shape::updateStrides(newShapeInfo, newOrder);
newShapeInfo[2 * newRank + 1] = oldShapeInfo[2 * oldRank + 1]; // type
return true;
}
Nd4jLong* newStrides = shape::stride(newShapeInfo);
const Nd4jLong* oldShape = shape::shapeOf(const_cast<Nd4jLong*>(oldShapeInfo));
const Nd4jLong* oldStrides = shape::stride(const_cast<Nd4jLong*>(oldShapeInfo));
int oldStart(0), oldStop(1), newStart(0), newStop(1), newDim, oldDim;
if(isFOrder) {
while (newStart < newRank && oldStart < oldRank) {
newDim = newShape[newStart];
oldDim = oldShape[oldStart];
while (newDim != oldDim)
if (newDim < oldDim) newDim *= newShape[newStop++];
else oldDim *= oldShape[oldStop++];
/* Check whether the original axes can be combined */
for (int i = oldStart; i < oldStop - 1; i++) {
if(oldShape[i] == 1) { // take into account strides like {...,1,1,...} correctly
++oldStart;
continue;
}
if(oldStrides[i + 1] != oldShape[i] * oldStrides[i])
return false; // not contiguous enough
}
/* Calculate new strides for all axes currently worked with */
newStrides[newStart] = oldStrides[oldStart];
for (int i = newStart + 1; i < newStop; i++)
newStrides[i] = newStrides[i - 1] * newShape[i - 1];
newStart = newStop++;
oldStart = oldStop++;
}
}
else {
while (newStart < newRank && oldStart < oldRank) {
newDim = newShape[newStart];
oldDim = oldShape[oldStart];
while (newDim != oldDim)
if (newDim < oldDim) newDim *= newShape[newStop++];
else oldDim *= oldShape[oldStop++];
/* Check whether the original axes can be combined */
for (int i = oldStart; i < oldStop - 1; i++)
if (oldShape[i] != 1 && oldStrides[i] != oldShape[i + 1] * oldStrides[i + 1])
return false; /* not contiguous enough */
/* Calculate new strides for all axes currently worked with */
newStrides[newStop - 1] = oldStrides[oldStop - 1];
for (int i = newStop - 1; i > newStart; i--)
newStrides[i - 1] = newStrides[i] * newShape[i];
newStart = newStop++;
oldStart = oldStop++;
}
}
newShapeInfo[2 * newRank + 3] = newOrder; // order
newShapeInfo[2 * newRank + 2] = (newOrder == oldOrder) ? shape::elementWiseStride(oldShapeInfo) : 0; // ews
newShapeInfo[2 * newRank + 1] = oldShapeInfo[2 * oldRank + 1]; // type
return true;
}
INLINEDEF _CUDA_H bool canReshape(const int oldRank, Nd4jLong* oldShape, const int newRank, Nd4jLong* newShapeOf, bool isFOrder) {
int oldnd;
Nd4jLong* oldDims = shape::copyOf(oldRank, shape::shapeOf(oldShape));
Nd4jLong* oldStrides = shape::copyOf(oldRank, shape::stride(oldShape));
int np, op, last_stride;
int oldStart, oldStop, ok, newStart, newStop, nk;
auto newStrides = new Nd4jLong[newRank];
oldnd = 0;
/*
* Remove axes with dimension 1 from the old array. They have no effect
* but would need special cases since their strides do not matter.
*/
for (oldStart = 0; oldStart < oldRank; oldStart++) {
if (shape::shapeOf(oldShape)[oldStart] != 1) {
oldDims[oldnd] = shape::shapeOf(oldShape)[oldStart];
oldStrides[oldnd] = shape::stride(oldShape)[oldStart];
oldnd++;
}
}
np = 1;
for (newStart = 0; newStart < newRank; newStart++) {
np *= newShapeOf[newStart];
}
op = 1;
for (oldStart = 0; oldStart < oldnd; oldStart++) {
op *= oldDims[oldStart];
}
if (np != op) {
/* different total sizes; no hope */
delete[] oldDims;
delete[] oldStrides;
delete[] newStrides;
return false;
}
if (np == 0) {
/* the current code does not handle 0-sized arrays, so give up */
delete[] oldDims;
delete[] oldStrides;
delete[] newStrides;
return false;
}
/* oldStart to oldStop and newStart to newStop give the axis ranges currently worked with */
oldStart = 0;
oldStop = 1;
newStart = 0;
newStop = 1;
while (newStart < newRank && oldStart < oldnd) {
np = newShapeOf[newStart];
op = oldDims[oldStart];
while (np != op) {
if (np < op) {
/* Misses trailing 1s, these are handled later */
np *= newShapeOf[newStop++];
} else {
op *= oldDims[oldStop++];
}
}
/* Check whether the original axes can be combined */
for (ok = oldStart; ok < oldStop - 1; ok++) {
if (isFOrder) {
if (oldStrides[ok + 1] != oldDims[ok] * oldStrides[ok]) {
/* not contiguous enough */
delete[] oldDims;
delete[] oldStrides;
delete[] newStrides;
return false;
}
} else {
/* C order */
if (oldStrides[ok] != oldDims[ok + 1] * oldStrides[ok + 1]) {
/* not contiguous enough */
delete[] oldDims;
delete[] oldStrides;
delete[] newStrides;
return false;
}
}
}
/* Calculate new strides for all axes currently worked with */
if (isFOrder) {
newStrides[newStart] = oldStrides[oldStart];
for (nk = newStart + 1; nk < newStop; nk++) {
newStrides[nk] = newStrides[nk - 1] * newShapeOf[nk - 1];
}
} else {
/* C order */
newStrides[newStop - 1] = oldStrides[oldStop - 1];
for (nk = newStop - 1; nk > newStart; nk--) {
newStrides[nk - 1] = newStrides[nk] * newShapeOf[nk];
}
}
newStart = newStop++;
oldStart = oldStop++;
}
delete[] oldDims;
delete[] oldStrides;
delete[] newStrides;
return true;
}
// this function checks the consistence of dimensions with array rank (negative dimensions, too large dimensions, too big number of dimensions)
// also it sorts input array of dimensions, this operation is also necessary for creating TAD object
INLINEDEF _CUDA_H void checkDimensions(const int rank, std::vector<int>& dimensions) {
int dimSize = dimensions.size();
if(dimSize == 0)
throw std::runtime_error("shape::checkDimensions method: array of dimensions is empty!");
// check presence of negative dimensions and if they are present transform them to positive ones -dim -> rank - |dim|
for(auto& dim : dimensions)
if(dim < 0)
dim += rank;
// sort input array of dimensions, this operation is also necessary for creating TAD object in external methods
if (dimSize > 1) {
std::sort(dimensions.begin(), dimensions.end());
// remove duplicates if they are present
dimensions.erase(std::unique(dimensions.begin(), dimensions.end()), dimensions.end());
}
// check whether number of dimensions is to big (>rank)
dimSize = dimensions.size();
if(dimSize > rank)
throw std::runtime_error("shape::checkDimensions method: number of input dimensions is too big ( > rank of array)!");
// check if min dimension is still negative and whether max dimension is bigger then rank-1
if(dimensions[0] < 0 || dimensions.back() > (rank-1))
throw std::runtime_error("shape::checkDimensions method: the negative dimension is still present in input array after transform or the too big dimension is present ( > rank of array) !");
}
// max array is outer for min array, min array is sub-array of max array
// function calculates the coordinates of min array (and saves them into minIdxs) given coordinates of max array (already stored in maxIdxs)
INLINEDEF _CUDA_HD void maxIndToMinInd(Nd4jLong* maxIdxs, Nd4jLong* minIdxs, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude) {
const auto maxRank = shape::rank(maxShapeInfo);
const auto minRank = shape::rank(minShapeInfo);
// if(minRank >= maxRank)
// throw std::runtime_error("shape::maxIndToMinInd method: rank of min array should be smaller then rank of max array!");
const auto diff = maxRank - minRank; // the size of dimsToExclude is equal to diff
if(dimsToExclude == nullptr) { // means dimsToExclude == {0,1,2,...,diff-1}
for(int minI = 0, maxI = diff; maxI < maxRank; ++maxI, ++minI) {
if(maxIdxs[maxI] >= minShapeInfo[minI+1])
maxIdxs[maxI] %= minShapeInfo[minI+1];
minIdxs[minI] = maxIdxs[maxI];
}
}
else {
for(int dim = 0, minI = 0, maxI = 0; maxI < maxRank; ++maxI) {
if(dim < diff && dimsToExclude[dim] == maxI) {
++dim;
continue;
}
++minI;
if(maxIdxs[maxI] >= minShapeInfo[minI])
maxIdxs[maxI] %= minShapeInfo[minI];
minIdxs[minI] = maxIdxs[maxI];
}
}
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD Nd4jLong subArrayIndex(const Nd4jLong maxIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude) {
Nd4jLong maxIdxs[MAX_RANK];
ind2subC(shape::rank(maxShapeInfo), const_cast<Nd4jLong *>(maxShapeInfo)+1, const_cast<Nd4jLong&>(maxIdx), maxIdxs);
Nd4jLong minIdxs[MAX_RANK];
maxIndToMinInd(maxIdxs, minIdxs, maxShapeInfo, minShapeInfo, dimsToExclude);
return sub2Ind(shape::rank(minShapeInfo), minShapeInfo + 1, minIdxs);
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD Nd4jLong subArrayOffset(const Nd4jLong maxIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude) {
Nd4jLong maxIdxs[MAX_RANK];
ind2subC(shape::rank(maxShapeInfo), const_cast<Nd4jLong *>(maxShapeInfo)+1, const_cast<Nd4jLong&>(maxIdx), maxIdxs);
Nd4jLong minIdxs[MAX_RANK];
maxIndToMinInd(maxIdxs, minIdxs, maxShapeInfo, minShapeInfo, dimsToExclude);
return getOffset(0, minShapeInfo + 1, minShapeInfo + shape::rank(minShapeInfo) + 1, minIdxs, shape::rank(minShapeInfo));
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD int outerArrayOffsets(Nd4jLong* maxOffsets, const Nd4jLong minIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude) {
const auto rankMin = shape::rank(minShapeInfo);
const auto rankMax = shape::rank(maxShapeInfo);
// if(rankMin >= rankMax)
// throw std::runtime_error("shape::subArrayIndex method: rank of min array should be smaller then rank of max array!");
// if(rankMax > MAX_RANK/2)
// throw std::runtime_error("shape::subArrayIndex method: rank of max array should be <= MAX_RANK/2 !");
const auto diff = rankMax - rankMin; // the size of dimsToExclude is equal to diff
Nd4jLong buffer[MAX_RANK];
Nd4jLong* indices = buffer;
Nd4jLong* increment = buffer + MAX_RANK/2;
int N, minI, maxI;
// calculate min per-dim-indices which corresponds to absolute minIdx index
shape::ind2subC(rankMin, minShapeInfo + 1, minIdx, indices);
// transform storage indices to contain per-dim max indices, purpose - memory saving
// fill increment array as well
if(dimsToExclude == nullptr) { // means dimsToExclude == {0,1,2,...,diff-1}
for(minI = rankMin - 1, maxI = rankMax-1; maxI >= diff; --maxI, --minI) {
increment[maxI] = (maxShapeInfo[maxI+1] == minShapeInfo[minI+1]) ? 0 : minShapeInfo[minI+1];
indices[maxI] = indices[minI];
}
for(maxI = 0; maxI < diff; ++maxI) {
increment[maxI] = 1;
indices[maxI] = 0;
}
}
else {
for(N = diff-1, minI = rankMin - 1, maxI = rankMax - 1; maxI >= 0; --maxI) {
if(N >= 0 && dimsToExclude[N] == maxI) {
increment[maxI] = 1;
indices[maxI] = 0;
--N;
}
else {
increment[maxI] = (maxShapeInfo[maxI+1] == minShapeInfo[minI+1]) ? 0 : minShapeInfo[minI+1];
indices[maxI] = indices[minI--];
}
}
}
maxI = rankMax-1;
N = 0;
int step;
maxOffsets[N++] = shape::getOffset(0, maxShapeInfo + 1, maxShapeInfo + rankMax + 1, indices, rankMax);
// nested loops - producing of absolute indices for max array
while(maxI >= 0) {
if(increment[maxI] != 0) {
indices[maxI] += increment[maxI];
if(indices[maxI] >= maxShapeInfo[maxI+1]) {
indices[maxI] %= increment[maxI]; // restore initial value of indices[maxI]
step = -1;
}
else {
maxOffsets[N++] = shape::getOffset(0, maxShapeInfo + 1, maxShapeInfo + rankMax + 1, indices, rankMax);
step = rankMax - 1 - maxI;
}
}
else if(maxI == rankMax - 1)
step = -1;
maxI += step;
}
return N;
}
//////////////////////////////////////////////////////////////////////
INLINEDEF _CUDA_HD int outerArrayIndexes(Nd4jLong* maxIdxs, const Nd4jLong minIdx, const Nd4jLong* maxShapeInfo, const Nd4jLong* minShapeInfo, const int* dimsToExclude) {
const auto rankMin = shape::rank(minShapeInfo);
const auto rankMax = shape::rank(maxShapeInfo);
// if(rankMin >= rankMax)
// throw std::runtime_error("shape::subArrayIndex method: rank of min array should be smaller then rank of max array!");
// if(rankMax > MAX_RANK/2)
// throw std::runtime_error("shape::subArrayIndex method: rank of max array should be <= MAX_RANK/2 !");
const auto diff = rankMax - rankMin; // the size of dimsToExclude is equal to diff
Nd4jLong buffer[MAX_RANK];
Nd4jLong* indices = buffer;
Nd4jLong* increment = buffer + MAX_RANK/2;
int N, minI, maxI;
// calculate min per-dim-indices which corresponds to absolute minIdx index
shape::ind2subC(rankMin, minShapeInfo + 1, minIdx, indices);
// transform storage indices to contain per-dim max indices, purpose - memory saving
// fill increment array as well
if(dimsToExclude == nullptr) { // means dimsToExclude == {0,1,2,...,diff-1}
for(minI = rankMin - 1, maxI = rankMax-1; maxI >= diff; --maxI, --minI) {
increment[maxI] = (maxShapeInfo[maxI+1] == minShapeInfo[minI+1]) ? 0 : minShapeInfo[minI+1];
indices[maxI] = indices[minI];
}
for(maxI = 0; maxI < diff; ++maxI) {
increment[maxI] = 1;
indices[maxI] = 0;
}
}
else {
for(N = diff-1, minI = rankMin - 1, maxI = rankMax - 1; maxI >= 0; --maxI) {
if(N >= 0 && dimsToExclude[N] == maxI) {
increment[maxI] = 1;
indices[maxI] = 0;
--N;
}
else {
increment[maxI] = (maxShapeInfo[maxI+1] == minShapeInfo[minI+1]) ? 0 : minShapeInfo[minI+1];
indices[maxI] = indices[minI--];
}
}
}
maxI = rankMax-1;
N = 0;
int step;
maxIdxs[N++] = sub2Ind(rankMax, maxShapeInfo + 1, indices);
// nested loops - producing of absolute indices for max array
while(maxI >= 0) {
if(increment[maxI] != 0) {
indices[maxI] += increment[maxI];
if(indices[maxI] >= maxShapeInfo[maxI+1]) {
indices[maxI] %= increment[maxI]; // restore initial value of indices[maxI]
step = -1;
}
else {
maxIdxs[N++] = sub2Ind(rankMax, maxShapeInfo + 1, indices);
step = rankMax - 1 - maxI;
}
}
else if(maxI == rankMax - 1)
step = -1;
maxI += step;
}
return N;
}
INLINEDEF _CUDA_HD void shapeOldScalar(nd4j::DataType dataType, Nd4jLong* const buffer, const char order) {
buffer[0] = 2;
buffer[1] = 1;
buffer[2] = 1;
buffer[3] = 1;
buffer[4] = 1;
buffer[6] = 1;
buffer[7] = (int)order;
nd4j::ArrayOptions::setDataType(buffer, dataType);
}
template <typename T1, typename T2>
INLINEDEF _CUDA_H void convertT(T1 *from, T2 *to, Nd4jLong length) {
for (Nd4jLong e = 0; e < length; e++)
to[e] = (T2) from[e];
};
//////////////////////////////////////////////////////////////////////
INLINEDEF void calcSubArrOffsets(const Nd4jLong numOfSubArrs, const int rank, const Nd4jLong* shape, const Nd4jLong* strides, Nd4jLong* subArrOffsets) {
// set offset for first sub-array, it is equal to zero always
subArrOffsets[0] = 0;
// choose whether to parallelize or not
if(numOfSubArrs > 1024 /*Environment::getInstance()->elementwiseThreshold()*/) {
#pragma omp parallel // PRAGMA_OMP_PARALLEL_ARGS(private(indexes))
{
Nd4jLong* indexes = new Nd4jLong[rank];
#pragma omp for simd schedule(guided) // PRAGMA_OMP_PARALLEL_FOR
for (Nd4jLong i = 1; i < numOfSubArrs; ++i) {
shape::ind2subC(rank, shape, i, indexes);
subArrOffsets[i] = 0;
for (int j = 0; j < rank; ++j)
if(shape[j] != 1)
subArrOffsets[i] += indexes[j] * strides[j];
}
delete []indexes;
}
}
else {
Nd4jLong rankMinusOne = rank - 1;
Nd4jLong i = 1, j = rankMinusOne;
Nd4jLong* idx = new Nd4jLong[rank];
Nd4jLong* currOffset = new Nd4jLong[rank];
memset(idx, 0, sizeof(Nd4jLong) * rank);
memset(currOffset, 0, sizeof(Nd4jLong) * rank);
// nested loops - calculation of sub-array offsets (subArrOffsets)
while(j >= 0) {
if(shape[j] == 1) { --j; continue; } // ignore dimensions equal to unity
if(j == rankMinusOne) { // last dimension
for(idx[j] = 1; idx[j] < shape[j]; ++idx[j])
subArrOffsets[i++] = subArrOffsets[i-1] + strides[j];
--j;
}
else if(idx[j] < shape[j] - 1) {
currOffset[j] += strides[j];
subArrOffsets[i++] = j ? currOffset[j] + currOffset[j-1] : currOffset[j];
++idx[j];
j = rankMinusOne;
}
else
currOffset[j--] = idx[j] = 0;
}
delete []idx;
delete []currOffset;
}
}
}
#endif /* SHAPE_H_ */
|
GB_unaryop__ainv_uint16_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint16_uint64
// op(A') function: GB_tran__ainv_uint16_uint64
// C type: uint16_t
// A type: uint64_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT16 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint16_uint64
(
uint16_t *Cx, // Cx and Ax may be aliased
uint64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint16_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB
// A.*B function (eWiseMult): GB_AemultB
// A*D function (colscale): GB_AxD
// D*A function (rowscale): GB_DxB
// C+=B function (dense accum): GB_Cdense_accumB
// C+=b function (dense accum): GB_Cdense_accumb
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum
// C=scalar+B GB_bind1st
// C=scalar+B' GB_bind1st_tran
// C=A+scalar GB_bind2nd
// C=A'+scalar GB_bind2nd_tran
// C type: GB_ctype
// A type: GB_atype
// B,b type: GB_btype
// BinaryOp: GB_binaryop(cij,aij,bij,i,j)
#define GB_ATYPE \
GB_atype
#define GB_BTYPE \
GB_btype
#define GB_CTYPE \
GB_ctype
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
GB_atype_is_btype
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
GB_ctype_is_atype
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
GB_ctype_is_btype
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GB_geta(aij,Ax,pA)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
GB_getb(bij,Bx,pB)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GB_ctype t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
GB_copy_a_to_c(cij,Ax,pA)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
GB_copy_b_to_c(cij,Bx,pB)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
GB_binaryop(z, x, y, i, j) ;
// op is second
#define GB_OP_IS_SECOND \
GB_op_is_second
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
GB_op_is_plus_real
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
GB_op_is_minus_real
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
GB_cblas_axpy
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
GB_disable
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
if_is_binop_subset
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
endif_is_binop_subset
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
if_C_dense_update
{
#include "GB_dense_subassign_23_template.c"
}
endif_C_dense_update
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
if_C_dense_update
{
// get the scalar b for C += b, of type GB_btype
GB_btype bwork = (*((GB_btype *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
endif_C_dense_update
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
if_binop_is_semiring_multiplier
GrB_Info GB_AxD
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_ctype *GB_RESTRICT Cx = (GB_ctype *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
endif_binop_is_semiring_multiplier
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
if_binop_is_semiring_multiplier
GrB_Info GB_DxB
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_ctype *GB_RESTRICT Cx = (GB_ctype *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
endif_binop_is_semiring_multiplier
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
if_binop_bind1st_is_enabled
GrB_Info GB_bind1st
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_ctype *Cx = (GB_ctype *) Cx_output ;
GB_atype x = (*((GB_atype *) x_input)) ;
GB_btype *Bx = (GB_btype *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
GB_getb(bij, Bx, p) ;
GB_binaryop(Cx [p], x, bij, 0, 0) ;
}
return (GrB_SUCCESS) ;
#endif
}
endif_binop_bind1st_is_enabled
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
if_binop_bind2nd_is_enabled
GrB_Info GB_bind2nd
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GB_ctype *Cx = (GB_ctype *) Cx_output ;
GB_atype *Ax = (GB_atype *) Ax_input ;
GB_btype y = (*((GB_btype *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GB_geta(aij, Ax, p) ;
GB_binaryop(Cx [p], aij, y, 0, 0) ;
}
return (GrB_SUCCESS) ;
#endif
}
endif_binop_bind2nd_is_enabled
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
if_binop_bind1st_is_enabled
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GB_getb(aij, Ax, pA) ; \
GB_binaryop(Cx [pC], x, aij, 0, 0) ; \
}
GrB_Info GB_bind1st_tran
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GB_btype
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_atype x = (*((const GB_atype *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GB_atype
}
endif_binop_bind1st_is_enabled
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
if_binop_bind2nd_is_enabled
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GB_geta(aij, Ax, pA) ; \
GB_binaryop(Cx [pC], aij, y, 0, 0) ; \
}
GrB_Info GB_bind2nd_tran
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_btype y = (*((const GB_btype *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
endif_binop_bind2nd_is_enabled
#endif
|
transform.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M %
% T R R A A NN N SS F O O R R MM MM %
% T RRRR AAAAA N N N SSS FFF O O RRRR M M M %
% T R R A A N NN SS F O O R R M M %
% T R R A A N N SSSSS F OOO R R M M %
% %
% %
% MagickCore Image Transform Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/memory_.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/resource_.h"
#include "MagickCore/resize.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o O r i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoOrientImage() adjusts an image so that its orientation is suitable for
% viewing (i.e. top-left orientation).
%
% The format of the AutoOrientImage method is:
%
% Image *AutoOrientImage(const Image *image,
% const OrientationType orientation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o orientation: Current image orientation.
%
% o exception: Return any errors or warnings in this structure.
%
*/
MagickExport Image *AutoOrientImage(const Image *image,
const OrientationType orientation,ExceptionInfo *exception)
{
Image
*orient_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
orient_image=(Image *) NULL;
switch(orientation)
{
case UndefinedOrientation:
case TopLeftOrientation:
default:
{
orient_image=CloneImage(image,0,0,MagickTrue,exception);
break;
}
case TopRightOrientation:
{
orient_image=FlopImage(image,exception);
break;
}
case BottomRightOrientation:
{
orient_image=RotateImage(image,180.0,exception);
break;
}
case BottomLeftOrientation:
{
orient_image=FlipImage(image,exception);
break;
}
case LeftTopOrientation:
{
orient_image=TransposeImage(image,exception);
break;
}
case RightTopOrientation:
{
orient_image=RotateImage(image,90.0,exception);
break;
}
case RightBottomOrientation:
{
orient_image=TransverseImage(image,exception);
break;
}
case LeftBottomOrientation:
{
orient_image=RotateImage(image,270.0,exception);
break;
}
}
if (orient_image != (Image *) NULL)
orient_image->orientation=TopLeftOrientation;
return(orient_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChopImage() removes a region of an image and collapses the image to occupy
% the removed portion.
%
% The format of the ChopImage method is:
%
% Image *ChopImage(const Image *image,const RectangleInfo *chop_info)
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o chop_info: Define the region of the image to chop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info,
ExceptionInfo *exception)
{
#define ChopImageTag "Chop/Image"
CacheView
*chop_view,
*image_view;
Image
*chop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
extent;
ssize_t
y;
/*
Check chop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(chop_info != (RectangleInfo *) NULL);
if (((chop_info->x+(ssize_t) chop_info->width) < 0) ||
((chop_info->y+(ssize_t) chop_info->height) < 0) ||
(chop_info->x > (ssize_t) image->columns) ||
(chop_info->y > (ssize_t) image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
extent=(*chop_info);
if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns)
extent.width=(size_t) ((ssize_t) image->columns-extent.x);
if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows)
extent.height=(size_t) ((ssize_t) image->rows-extent.y);
if (extent.x < 0)
{
extent.width-=(size_t) (-extent.x);
extent.x=0;
}
if (extent.y < 0)
{
extent.height-=(size_t) (-extent.y);
extent.y=0;
}
chop_image=CloneImage(image,image->columns-extent.width,image->rows-
extent.height,MagickTrue,exception);
if (chop_image == (Image *) NULL)
return((Image *) NULL);
/*
Extract chop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
chop_view=AcquireAuthenticCacheView(chop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,chop_image,extent.y,1)
#endif
for (y=0; y < (ssize_t) extent.y; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(chop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(chop_image,channel,p[i],q);
}
q+=GetPixelChannels(chop_image);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ChopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
/*
Extract chop image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,chop_image,image->rows-(extent.y+extent.height),1)
#endif
for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(chop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(chop_image,channel,p[i],q);
}
q+=GetPixelChannels(chop_image);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ChopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
chop_view=DestroyCacheView(chop_view);
image_view=DestroyCacheView(image_view);
chop_image->type=image->type;
if (status == MagickFalse)
chop_image=DestroyImage(chop_image);
return(chop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C M Y K I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a
% single image.
%
% The format of the ConsolidateCMYKImage method is:
%
% Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConsolidateCMYKImages(const Image *images,
ExceptionInfo *exception)
{
CacheView
*cmyk_view,
*image_view;
Image
*cmyk_image,
*cmyk_images;
register ssize_t
j;
ssize_t
y;
/*
Consolidate separate C, M, Y, and K planes into a single image.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cmyk_images=NewImageList();
for (j=0; j < (ssize_t) GetImageListLength(images); j+=4)
{
register ssize_t
i;
assert(images != (Image *) NULL);
cmyk_image=CloneImage(images,0,0,MagickTrue,
exception);
if (cmyk_image == (Image *) NULL)
break;
if (SetImageStorageClass(cmyk_image,DirectClass,exception) == MagickFalse)
break;
(void) SetImageColorspace(cmyk_image,CMYKColorspace,exception);
for (i=0; i < 4; i++)
{
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
Quantum
pixel;
pixel=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p));
switch (i)
{
case 0: SetPixelCyan(cmyk_image,pixel,q); break;
case 1: SetPixelMagenta(cmyk_image,pixel,q); break;
case 2: SetPixelYellow(cmyk_image,pixel,q); break;
case 3: SetPixelBlack(cmyk_image,pixel,q); break;
default: break;
}
p+=GetPixelChannels(images);
q+=GetPixelChannels(cmyk_image);
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
}
AppendImageToList(&cmyk_images,cmyk_image);
}
return(cmyk_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImage() extracts a region of the image starting at the offset defined
% by geometry. Region must be fully defined, and no special handling of
% geometry flags is performed.
%
% The format of the CropImage method is:
%
% Image *CropImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to crop with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry,
ExceptionInfo *exception)
{
#define CropImageTag "Crop/Image"
CacheView
*crop_view,
*image_view;
Image
*crop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
offset;
RectangleInfo
bounding_box,
page;
ssize_t
y;
/*
Check crop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
bounding_box=image->page;
if ((bounding_box.width == 0) || (bounding_box.height == 0))
{
bounding_box.width=image->columns;
bounding_box.height=image->rows;
}
page=(*geometry);
if (page.width == 0)
page.width=bounding_box.width;
if (page.height == 0)
page.height=bounding_box.height;
if (((bounding_box.x-page.x) >= (ssize_t) page.width) ||
((bounding_box.y-page.y) >= (ssize_t) page.height) ||
((page.x-bounding_box.x) > (ssize_t) image->columns) ||
((page.y-bounding_box.y) > (ssize_t) image->rows))
{
/*
Crop is not within virtual canvas, return 1 pixel transparent image.
*/
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.alpha=(MagickRealType) TransparentAlpha;
crop_image->alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(crop_image,exception);
crop_image->page=bounding_box;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
if (crop_image->dispose == BackgroundDispose)
crop_image->dispose=NoneDispose;
return(crop_image);
}
if ((page.x < 0) && (bounding_box.x >= 0))
{
page.width+=page.x-bounding_box.x;
page.x=0;
}
else
{
page.width-=bounding_box.x-page.x;
page.x-=bounding_box.x;
if (page.x < 0)
page.x=0;
}
if ((page.y < 0) && (bounding_box.y >= 0))
{
page.height+=page.y-bounding_box.y;
page.y=0;
}
else
{
page.height-=bounding_box.y-page.y;
page.y-=bounding_box.y;
if (page.y < 0)
page.y=0;
}
if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns)
page.width=image->columns-page.x;
if ((geometry->width != 0) && (page.width > geometry->width))
page.width=geometry->width;
if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows)
page.height=image->rows-page.y;
if ((geometry->height != 0) && (page.height > geometry->height))
page.height=geometry->height;
bounding_box.x+=page.x;
bounding_box.y+=page.y;
if ((page.width == 0) || (page.height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
return((Image *) NULL);
}
/*
Initialize crop image attributes.
*/
crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->page.width=image->page.width;
crop_image->page.height=image->page.height;
offset.x=(ssize_t) (bounding_box.x+bounding_box.width);
offset.y=(ssize_t) (bounding_box.y+bounding_box.height);
if ((offset.x > (ssize_t) image->page.width) ||
(offset.y > (ssize_t) image->page.height))
{
crop_image->page.width=bounding_box.width;
crop_image->page.height=bounding_box.height;
}
crop_image->page.x=bounding_box.x;
crop_image->page.y=bounding_box.y;
/*
Crop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
crop_view=AcquireAuthenticCacheView(crop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,crop_image,crop_image->rows,1)
#endif
for (y=0; y < (ssize_t) crop_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns,
1,exception);
q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) crop_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait crop_traits=GetPixelChannelTraits(crop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(crop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(crop_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(crop_image);
}
if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CropImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
crop_view=DestroyCacheView(crop_view);
image_view=DestroyCacheView(image_view);
crop_image->type=image->type;
if (status == MagickFalse)
crop_image=DestroyImage(crop_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e T o T i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImageToTiles() crops a single image, into a possible list of tiles.
% This may include a single sub-region of the image. This basically applies
% all the normal geometry flags for Crop.
%
% Image *CropImageToTiles(const Image *image,
% const RectangleInfo *crop_geometry, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double ConstrainPixelOffset(double x)
{
if (x < (double) -(SSIZE_MAX-512))
return((double) -(SSIZE_MAX-512));
if (x > (double) (SSIZE_MAX-512))
return((double) (SSIZE_MAX-512));
return(x);
}
static inline ssize_t PixelRoundOffset(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return((ssize_t) floor(ConstrainPixelOffset(x)));
return((ssize_t) ceil(ConstrainPixelOffset(x)));
}
MagickExport Image *CropImageToTiles(const Image *image,
const char *crop_geometry,ExceptionInfo *exception)
{
Image
*next,
*crop_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
crop_image=NewImageList();
next=NewImageList();
flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception);
if ((flags & AreaValue) != 0)
{
PointInfo
delta,
offset;
RectangleInfo
crop;
size_t
height,
width;
/*
Crop into NxM tiles (@ flag).
*/
width=image->columns;
height=image->rows;
if (geometry.width == 0)
geometry.width=1;
if (geometry.height == 0)
geometry.height=1;
if ((flags & AspectValue) == 0)
{
width-=(geometry.x < 0 ? -1 : 1)*geometry.x;
height-=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
else
{
width+=(geometry.x < 0 ? -1 : 1)*geometry.x;
height+=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
delta.x=(double) width/geometry.width;
delta.y=(double) height/geometry.height;
if (delta.x < 1.0)
delta.x=1.0;
if (delta.y < 1.0)
delta.y=1.0;
for (offset.y=0; offset.y < (double) height; )
{
if ((flags & AspectValue) == 0)
{
crop.y=PixelRoundOffset((double) (offset.y-
(geometry.y > 0 ? 0 : geometry.y)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) PixelRoundOffset((double) (offset.y+
(geometry.y < 0 ? 0 : geometry.y)));
}
else
{
crop.y=PixelRoundOffset((double) (offset.y-
(geometry.y > 0 ? geometry.y : 0)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) PixelRoundOffset((double)
(offset.y+(geometry.y < -1 ? geometry.y : 0)));
}
crop.height-=crop.y;
crop.y+=image->page.y;
for (offset.x=0; offset.x < (double) width; )
{
if ((flags & AspectValue) == 0)
{
crop.x=PixelRoundOffset((double) (offset.x-
(geometry.x > 0 ? 0 : geometry.x)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) PixelRoundOffset((double) (offset.x+
(geometry.x < 0 ? 0 : geometry.x)));
}
else
{
crop.x=PixelRoundOffset((double) (offset.x-
(geometry.x > 0 ? geometry.x : 0)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) PixelRoundOffset((double) (offset.x+
(geometry.x < 0 ? geometry.x : 0)));
}
crop.width-=crop.x;
crop.x+=image->page.x;
next=CropImage(image,&crop,exception);
if (next != (Image *) NULL)
AppendImageToList(&crop_image,next);
}
}
ClearMagickException(exception);
return(crop_image);
}
if (((geometry.width == 0) && (geometry.height == 0)) ||
((flags & XValue) != 0) || ((flags & YValue) != 0))
{
/*
Crop a single region at +X+Y.
*/
crop_image=CropImage(image,&geometry,exception);
if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0))
{
crop_image->page.width=geometry.width;
crop_image->page.height=geometry.height;
crop_image->page.x-=geometry.x;
crop_image->page.y-=geometry.y;
}
return(crop_image);
}
if ((image->columns > geometry.width) || (image->rows > geometry.height))
{
RectangleInfo
page;
size_t
height,
width;
ssize_t
x,
y;
/*
Crop into tiles of fixed size WxH.
*/
page=image->page;
if (page.width == 0)
page.width=image->columns;
if (page.height == 0)
page.height=image->rows;
width=geometry.width;
if (width == 0)
width=page.width;
height=geometry.height;
if (height == 0)
height=page.height;
next=NewImageList();
for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height)
{
for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width)
{
geometry.width=width;
geometry.height=height;
geometry.x=x;
geometry.y=y;
next=CropImage(image,&geometry,exception);
if (next == (Image *) NULL)
break;
AppendImageToList(&crop_image,next);
}
if (next == (Image *) NULL)
break;
}
return(crop_image);
}
return(CloneImage(image,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x c e r p t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExcerptImage() returns a excerpt of the image as defined by the geometry.
%
% The format of the ExcerptImage method is:
%
% Image *ExcerptImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExcerptImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define ExcerptImageTag "Excerpt/Image"
CacheView
*excerpt_view,
*image_view;
Image
*excerpt_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate excerpt image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (excerpt_image == (Image *) NULL)
return((Image *) NULL);
/*
Excerpt each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,excerpt_image,excerpt_image->rows,1)
#endif
for (y=0; y < (ssize_t) excerpt_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y,
geometry->width,1,exception);
q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) excerpt_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait excerpt_traits=GetPixelChannelTraits(excerpt_image,channel);
if ((traits == UndefinedPixelTrait) ||
(excerpt_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(excerpt_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(excerpt_image);
}
if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ExcerptImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
excerpt_view=DestroyCacheView(excerpt_view);
image_view=DestroyCacheView(image_view);
excerpt_image->type=image->type;
if (status == MagickFalse)
excerpt_image=DestroyImage(excerpt_image);
return(excerpt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x t e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExtentImage() extends the image as defined by the geometry, gravity, and
% image background color. Set the (x,y) offset of the geometry to move the
% original image relative to the extended image.
%
% The format of the ExtentImage method is:
%
% Image *ExtentImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExtentImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
Image
*extent_image;
MagickBooleanType
status;
/*
Allocate extent image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (extent_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageBackgroundColor(extent_image,exception);
if (status == MagickFalse)
{
extent_image=DestroyImage(extent_image);
return((Image *) NULL);
}
status=CompositeImage(extent_image,image,image->compose,MagickTrue,
-geometry->x,-geometry->y,exception);
return(extent_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlipImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis.
%
% The format of the FlipImage method is:
%
% Image *FlipImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception)
{
#define FlipImageTag "Flip/Image"
CacheView
*flip_view,
*image_view;
Image
*flip_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flip_image=CloneImage(image,0,0,MagickTrue,exception);
if (flip_image == (Image *) NULL)
return((Image *) NULL);
/*
Flip image.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flip_view=AcquireAuthenticCacheView(flip_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,flip_image,flip_image->rows,1)
#endif
for (y=0; y < (ssize_t) flip_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y-
1),flip_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) flip_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait flip_traits=GetPixelChannelTraits(flip_image,channel);
if ((traits == UndefinedPixelTrait) ||
(flip_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(flip_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(flip_image);
}
if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FlipImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flip_view=DestroyCacheView(flip_view);
image_view=DestroyCacheView(image_view);
flip_image->type=image->type;
if (page.height != 0)
page.y=(ssize_t) (page.height-flip_image->rows-page.y);
flip_image->page=page;
if (status == MagickFalse)
flip_image=DestroyImage(flip_image);
return(flip_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlopImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis.
%
% The format of the FlopImage method is:
%
% Image *FlopImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception)
{
#define FlopImageTag "Flop/Image"
CacheView
*flop_view,
*image_view;
Image
*flop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flop_image=CloneImage(image,0,0,MagickTrue,exception);
if (flop_image == (Image *) NULL)
return((Image *) NULL);
/*
Flop each row.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flop_view=AcquireAuthenticCacheView(flop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,flop_image,flop_image->rows,1)
#endif
for (y=0; y < (ssize_t) flop_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
q+=GetPixelChannels(flop_image)*flop_image->columns;
for (x=0; x < (ssize_t) flop_image->columns; x++)
{
register ssize_t
i;
q-=GetPixelChannels(flop_image);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait flop_traits=GetPixelChannelTraits(flop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(flop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(flop_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FlopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flop_view=DestroyCacheView(flop_view);
image_view=DestroyCacheView(image_view);
flop_image->type=image->type;
if (page.width != 0)
page.x=(ssize_t) (page.width-flop_image->columns-page.x);
flop_image->page=page;
if (status == MagickFalse)
flop_image=DestroyImage(flop_image);
return(flop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RollImage() offsets an image as defined by x_offset and y_offset.
%
% The format of the RollImage method is:
%
% Image *RollImage(const Image *image,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x_offset: the number of columns to roll in the horizontal direction.
%
% o y_offset: the number of rows to roll in the vertical direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy,
const ssize_t dx,const ssize_t dy,ExceptionInfo *exception)
{
CacheView
*source_view,
*destination_view;
MagickBooleanType
status;
ssize_t
y;
if (columns == 0)
return(MagickTrue);
status=MagickTrue;
source_view=AcquireVirtualCacheView(source,exception);
destination_view=AcquireAuthenticCacheView(destination,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,destination,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Transfer scanline.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception);
q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(source); i++)
{
PixelChannel channel = GetPixelChannelChannel(source,i);
PixelTrait source_traits=GetPixelChannelTraits(source,channel);
PixelTrait destination_traits=GetPixelChannelTraits(destination,
channel);
if ((source_traits == UndefinedPixelTrait) ||
(destination_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(destination,channel,p[i],q);
}
p+=GetPixelChannels(source);
q+=GetPixelChannels(destination);
}
sync=SyncCacheViewAuthenticPixels(destination_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
destination_view=DestroyCacheView(destination_view);
source_view=DestroyCacheView(source_view);
return(status);
}
MagickExport Image *RollImage(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define RollImageTag "Roll/Image"
Image
*roll_image;
MagickStatusType
status;
RectangleInfo
offset;
/*
Initialize roll image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
roll_image=CloneImage(image,0,0,MagickTrue,exception);
if (roll_image == (Image *) NULL)
return((Image *) NULL);
offset.x=x_offset;
offset.y=y_offset;
while (offset.x < 0)
offset.x+=(ssize_t) image->columns;
while (offset.x >= (ssize_t) image->columns)
offset.x-=(ssize_t) image->columns;
while (offset.y < 0)
offset.y+=(ssize_t) image->rows;
while (offset.y >= (ssize_t) image->rows)
offset.y-=(ssize_t) image->rows;
/*
Roll image.
*/
status=CopyImageRegion(roll_image,image,(size_t) offset.x,
(size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows-
offset.y,0,0,exception);
(void) SetImageProgress(image,RollImageTag,0,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,
(size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0,
exception);
(void) SetImageProgress(image,RollImageTag,1,3);
status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows-
offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,2,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows-
offset.y,0,0,offset.x,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,3,3);
roll_image->type=image->type;
if (status == MagickFalse)
roll_image=DestroyImage(roll_image);
return(roll_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShaveImage() shaves pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the ShaveImage method is:
%
% Image *ShaveImage(const Image *image,const RectangleInfo *shave_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o shave_image: Method ShaveImage returns a pointer to the shaved
% image. A null image is returned if there is a memory shortage or
% if the image width or height is zero.
%
% o image: the image.
%
% o shave_info: Specifies a pointer to a RectangleInfo which defines the
% region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShaveImage(const Image *image,
const RectangleInfo *shave_info,ExceptionInfo *exception)
{
Image
*shave_image;
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (((2*shave_info->width) >= image->columns) ||
((2*shave_info->height) >= image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
SetGeometry(image,&geometry);
geometry.width-=2*shave_info->width;
geometry.height-=2*shave_info->height;
geometry.x=(ssize_t) shave_info->width+image->page.x;
geometry.y=(ssize_t) shave_info->height+image->page.y;
shave_image=CropImage(image,&geometry,exception);
if (shave_image == (Image *) NULL)
return((Image *) NULL);
shave_image->page.width-=2*shave_info->width;
shave_image->page.height-=2*shave_info->height;
shave_image->page.x-=(ssize_t) shave_info->width;
shave_image->page.y-=(ssize_t) shave_info->height;
return(shave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p l i c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpliceImage() splices a solid color into the image as defined by the
% geometry.
%
% The format of the SpliceImage method is:
%
% Image *SpliceImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to splice with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpliceImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define SpliceImageTag "Splice/Image"
CacheView
*image_view,
*splice_view;
Image
*splice_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
splice_geometry;
ssize_t
columns,
y;
/*
Allocate splice image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
splice_geometry=(*geometry);
splice_image=CloneImage(image,image->columns+splice_geometry.width,
image->rows+splice_geometry.height,MagickTrue,exception);
if (splice_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(splice_image,DirectClass,exception) == MagickFalse)
{
splice_image=DestroyImage(splice_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&splice_image->background_color) == MagickFalse) &&
(IsGrayColorspace(splice_image->colorspace) != MagickFalse))
(void) SetImageColorspace(splice_image,sRGBColorspace,exception);
if ((splice_image->background_color.alpha_trait != UndefinedPixelTrait) &&
(splice_image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlpha(splice_image,OpaqueAlpha,exception);
(void) SetImageBackgroundColor(splice_image,exception);
/*
Respect image geometry.
*/
switch (image->gravity)
{
default:
case UndefinedGravity:
case NorthWestGravity:
break;
case NorthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
break;
}
case NorthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
break;
}
case WestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.width/2;
break;
}
case CenterGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case EastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case SouthWestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
}
/*
Splice image.
*/
status=MagickTrue;
progress=0;
columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
splice_view=AcquireAuthenticCacheView(splice_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,splice_image,splice_geometry.y,1)
#endif
for (y=0; y < (ssize_t) splice_geometry.y; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q+=GetPixelChannels(splice_image);
for ( ; x < (ssize_t) splice_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SpliceImageTag,progress,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,splice_image,splice_image->rows,2)
#endif
for (y=(ssize_t) (splice_geometry.y+splice_geometry.height);
y < (ssize_t) splice_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
if ((y < 0) || (y >= (ssize_t)splice_image->rows))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height,
splice_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q+=GetPixelChannels(splice_image);
for ( ; x < (ssize_t) splice_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SpliceImageTag,progress,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
splice_view=DestroyCacheView(splice_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
splice_image=DestroyImage(splice_image);
return(splice_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImage() is a convenience method that behaves like ResizeImage() or
% CropImage() but accepts scaling and/or cropping information as a region
% geometry specification. If the operation fails, the original image handle
% is left as is.
%
% This should only be used for single images.
%
% This function destroys what it assumes to be a single image list.
% If the input image is part of a larger list, all other images in that list
% will be simply 'lost', not destroyed.
%
% Also if the crop generates a list of images only the first image is resized.
% And finally if the crop succeeds and the resize failed, you will get a
% cropped image, as well as a 'false' or 'failed' report.
%
% This function and should probably be deprecated in favor of direct calls
% to CropImageToTiles() or ResizeImage(), as appropriate.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImage(Image **image,const char *crop_geometry,
% const char *image_geometry,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType TransformImage(Image **image,
const char *crop_geometry,const char *image_geometry,ExceptionInfo *exception)
{
Image
*resize_image,
*transform_image;
RectangleInfo
geometry;
assert(image != (Image **) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
transform_image=(*image);
if (crop_geometry != (const char *) NULL)
{
Image
*crop_image;
/*
Crop image to a user specified size.
*/
crop_image=CropImageToTiles(*image,crop_geometry,exception);
if (crop_image == (Image *) NULL)
transform_image=CloneImage(*image,0,0,MagickTrue,exception);
else
{
transform_image=DestroyImage(transform_image);
transform_image=GetFirstImageInList(crop_image);
}
*image=transform_image;
}
if (image_geometry == (const char *) NULL)
return(MagickTrue);
/*
Scale image to a user specified size.
*/
(void) ParseRegionGeometry(transform_image,image_geometry,&geometry,
exception);
if ((transform_image->columns == geometry.width) &&
(transform_image->rows == geometry.height))
return(MagickTrue);
resize_image=ResizeImage(transform_image,geometry.width,geometry.height,
transform_image->filter,exception);
if (resize_image == (Image *) NULL)
return(MagickFalse);
transform_image=DestroyImage(transform_image);
transform_image=resize_image;
*image=transform_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p o s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransposeImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis while rotating them by 90 degrees.
%
% The format of the TransposeImage method is:
%
% Image *TransposeImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception)
{
#define TransposeImageTag "Transpose/Image"
CacheView
*image_view,
*transpose_view;
Image
*transpose_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transpose_image == (Image *) NULL)
return((Image *) NULL);
/*
Transpose image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transpose_view=AcquireAuthenticCacheView(transpose_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,transpose_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1),
0,1,transpose_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait transpose_traits=GetPixelChannelTraits(transpose_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(transpose_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(transpose_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(transpose_image);
}
if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransposeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transpose_view=DestroyCacheView(transpose_view);
image_view=DestroyCacheView(image_view);
transpose_image->type=image->type;
page=transpose_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
transpose_image->page=page;
if (status == MagickFalse)
transpose_image=DestroyImage(transpose_image);
return(transpose_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s v e r s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransverseImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis while rotating them by 270 degrees.
%
% The format of the TransverseImage method is:
%
% Image *TransverseImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception)
{
#define TransverseImageTag "Transverse/Image"
CacheView
*image_view,
*transverse_view;
Image
*transverse_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transverse_image == (Image *) NULL)
return((Image *) NULL);
/*
Transverse image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transverse_view=AcquireAuthenticCacheView(transverse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,transverse_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-1),
0,1,transverse_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
q+=GetPixelChannels(transverse_image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
q-=GetPixelChannels(transverse_image);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait transverse_traits=GetPixelChannelTraits(transverse_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(transverse_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(transverse_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(transverse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransverseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transverse_view=DestroyCacheView(transverse_view);
image_view=DestroyCacheView(image_view);
transverse_image->type=image->type;
page=transverse_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-transverse_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-transverse_image->rows-page.y);
transverse_image->page=page;
if (status == MagickFalse)
transverse_image=DestroyImage(transverse_image);
return(transverse_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r i m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TrimImage() trims pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the TrimImage method is:
%
% Image *TrimImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception)
{
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
geometry=GetImageBoundingBox(image,exception);
if ((geometry.width == 0) || (geometry.height == 0))
{
Image
*crop_image;
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.alpha=(MagickRealType) TransparentAlpha;
crop_image->alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(crop_image,exception);
crop_image->page=image->page;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
return(crop_image);
}
geometry.x+=image->page.x;
geometry.y+=image->page.y;
return(CropImage(image,&geometry,exception));
}
|
GB_unaryop__ainv_int8_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int8_int32
// op(A') function: GB_tran__ainv_int8_int32
// C type: int8_t
// A type: int32_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT8 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int8_int32
(
int8_t *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int8_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
relulayer.h | #ifndef RELULAYER_H
#define RELULAYER_H
#include "vol.h"
#include "layer.h"
#include "utils.h"
#include <boost/archive/text_oarchive.hpp>
#include <boost/archive/text_iarchive.hpp>
#include <boost/serialization/vector.hpp>
#include <boost/serialization/map.hpp>
#include <boost/serialization/utility.hpp>
#include <iostream>
#include <iomanip>
#include <algorithm>
#include <string>
#include <random>
#include <cmath>
#include <ctime>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/legacy/compat.hpp>
#ifndef NOT_USE
#define NOT_USE 2<<20
#endif
using namespace std;
template < typename FP >
class ReluLayer : public Layer<FP>{
private:
public:
int out_depth;
int out_sx;
int out_sy;
int sx;
int sy;
int stride;
int pad;
int in_depth;
int in_sx;
int in_sy;
int winx;
int winy;
string layer_type;
vector<FP> switchx;
vector<FP> switchy;
//num_neurons In:{d,x,y} Conf:{l1_decay,l2_decay}
ReluLayer(int in_depth,int in_sx,int in_sy){
this->in_depth=in_depth;
this->in_sx=in_sx;
this->in_sy=in_sy;
this->layer_type="relu";
this->in_act=NULL;
this->out_act=NULL;
//cout << "cv 0" << endl;
//cout << "cv 1" << endl;
this->out_depth = this->in_depth;
this->out_sx = this->in_sx;
this->out_sy = this->in_sy;
//cout << "cv 2" << endl;
/*Utils<FP> ut;
this->switchx = ut.zeros(this->out_sx*this->out_sy*this->out_depth);
this->switchy = ut.zeros(this->out_sx*this->out_sy*this->out_depth);*/
//cout << "cv 4" << endl;
}
void dtor(){
//cout << "clearrr3" << endl;
if(this->in_act != NULL){delete this->in_act;this->in_act=NULL;}
//cout << "clearrr5" << endl;
if(this->out_act != NULL){delete this->out_act;this->out_act=NULL;}
//cout << "clearrr4" << endl;
}
~ReluLayer(){
dtor();
}
vector<FP> get_all_w(){
vector<FP> out;
Vol<FP>* V;
vector< Vol<FP>* > list;
for(int z=0;z<list.size();z++){
V=list[z];
int size=V->w.size();
//cout << size << endl;
for(int q=0;q<size;q++){
out.push_back(V->w[q]);
}
}
return out;
}
void set_all_w(vector<FP> aw){
Vol<FP>* V;
vector< Vol<FP>* > list;
vector<int> slist;
int as=0;
for(int i=0,q=0;i<slist.size();i++){
V = list[i];
for(int j=0;j<slist[i];j++,q++){
V->w[j]=aw[q];
}
}
}
Vol<FP>* forward(Vol<FP>* V,bool is_training=false){
//cout << "@1" << endl;
this->in_act = V;
//cout << "@2" << endl;
//cout << "feed b" << endl;
Vol<FP>* V2 = V->clone();
int N = V2->w.size();
//cout << "@3" << endl;
clock_t begin_time = clock();
//#pragma omp parallel for
for(int i=0;i<N;i++){
if(V2->w[i] < 0) V2->w[i]=0;
}
//std::cout << "ReluLayer : " << float( clock () - begin_time ) / CLOCKS_PER_SEC << endl;
//cout << "feed e" << endl;
if(this->out_act != NULL){delete this->out_act;this->out_act=NULL;}
//cout << "feed f" << endl;
this->out_act = V2;
//cout << "feed g" << endl;
//cout << "feed h" << endl;
return this->out_act;
}
void backward(int tmpy=0){
Vol<FP>* V = this->in_act;
Vol<FP>* V2 = this->out_act;
int N = V->w.size();
Utils<FP> ut;
V->dw = ut.zeros(V->w.size());
for(int i=0;i<N;i++){
if(V2->w[i] <= 0) V->dw[i] = 0;
else V->dw[i] = V2->dw[i];
}
}
vector< map<string, vector<FP>* > > getParamsAndGrads(){
vector< map<string, vector<FP>* > > v;
return v;
}
string get_layer_type(){
return this->layer_type;
}
Vol<FP>* get_in_act(){
return this->in_act;
}
Vol<FP>* get_out_act(){
return this->out_act;
}
};
/**
* Mat::data Specification
* 2x2 1 channel
* [ R , R ;
* R , R ]
*
* 2x2 2 channel
* [ R , G , R , G ;
* R , G , R , G ]
*
* 2x2 3 channel
* [ R , G , B , R , G , B ;
* R , G , B . R , G , B ]
*/
#endif
|
tinyexr.h | #ifndef TINYEXR_H_
#define TINYEXR_H_
/*
Copyright (c) 2014 - 2020, Syoyo Fujita and many contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Syoyo Fujita nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// TinyEXR contains some OpenEXR code, which is licensed under ------------
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2002, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
// End of OpenEXR license -------------------------------------------------
//
//
// Do this:
// #define TINYEXR_IMPLEMENTATION
// before you include this file in *one* C or C++ file to create the
// implementation.
//
// // i.e. it should look like this:
// #include ...
// #include ...
// #include ...
// #define TINYEXR_IMPLEMENTATION
// #include "tinyexr.h"
//
//
#include <stddef.h> // for size_t
#include <stdint.h> // guess stdint.h is available(C99)
#ifdef __cplusplus
extern "C" {
#endif
// Use embedded miniz or not to decode ZIP format pixel. Linking with zlib
// required if this flas is 0.
#ifndef TINYEXR_USE_MINIZ
#define TINYEXR_USE_MINIZ (1)
#endif
// Disable PIZ comporession when applying cpplint.
#ifndef TINYEXR_USE_PIZ
#define TINYEXR_USE_PIZ (1)
#endif
#ifndef TINYEXR_USE_ZFP
#define TINYEXR_USE_ZFP (0) // TinyEXR extension.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#ifndef TINYEXR_USE_THREAD
#define TINYEXR_USE_THREAD (0) // No threaded loading.
// http://computation.llnl.gov/projects/floating-point-compression
#endif
#ifndef TINYEXR_USE_OPENMP
#ifdef _OPENMP
#define TINYEXR_USE_OPENMP (1)
#else
#define TINYEXR_USE_OPENMP (0)
#endif
#endif
#define TINYEXR_SUCCESS (0)
#define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1)
#define TINYEXR_ERROR_INVALID_EXR_VERSION (-2)
#define TINYEXR_ERROR_INVALID_ARGUMENT (-3)
#define TINYEXR_ERROR_INVALID_DATA (-4)
#define TINYEXR_ERROR_INVALID_FILE (-5)
#define TINYEXR_ERROR_INVALID_PARAMETER (-6)
#define TINYEXR_ERROR_CANT_OPEN_FILE (-7)
#define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8)
#define TINYEXR_ERROR_INVALID_HEADER (-9)
#define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10)
#define TINYEXR_ERROR_CANT_WRITE_FILE (-11)
#define TINYEXR_ERROR_SERIALZATION_FAILED (-12)
#define TINYEXR_ERROR_LAYER_NOT_FOUND (-13)
// @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf }
// pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2
#define TINYEXR_PIXELTYPE_UINT (0)
#define TINYEXR_PIXELTYPE_HALF (1)
#define TINYEXR_PIXELTYPE_FLOAT (2)
#define TINYEXR_MAX_HEADER_ATTRIBUTES (1024)
#define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128)
#define TINYEXR_COMPRESSIONTYPE_NONE (0)
#define TINYEXR_COMPRESSIONTYPE_RLE (1)
#define TINYEXR_COMPRESSIONTYPE_ZIPS (2)
#define TINYEXR_COMPRESSIONTYPE_ZIP (3)
#define TINYEXR_COMPRESSIONTYPE_PIZ (4)
#define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension
#define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0)
#define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1)
#define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2)
#define TINYEXR_TILE_ONE_LEVEL (0)
#define TINYEXR_TILE_MIPMAP_LEVELS (1)
#define TINYEXR_TILE_RIPMAP_LEVELS (2)
#define TINYEXR_TILE_ROUND_DOWN (0)
#define TINYEXR_TILE_ROUND_UP (1)
typedef struct _EXRVersion {
int version; // this must be 2
// tile format image;
// not zero for only a single-part "normal" tiled file (according to spec.)
int tiled;
int long_name; // long name attribute
// deep image(EXR 2.0);
// for a multi-part file, indicates that at least one part is of type deep* (according to spec.)
int non_image;
int multipart; // multi-part(EXR 2.0)
} EXRVersion;
typedef struct _EXRAttribute {
char name[256]; // name and type are up to 255 chars long.
char type[256];
unsigned char *value; // uint8_t*
int size;
int pad0;
} EXRAttribute;
typedef struct _EXRChannelInfo {
char name[256]; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} EXRChannelInfo;
typedef struct _EXRTile {
int offset_x;
int offset_y;
int level_x;
int level_y;
int width; // actual width in a tile.
int height; // actual height int a tile.
unsigned char **images; // image[channels][pixels]
} EXRTile;
typedef struct _EXRBox2i {
int min_x;
int min_y;
int max_x;
int max_y;
} EXRBox2i;
typedef struct _EXRHeader {
float pixel_aspect_ratio;
int line_order;
EXRBox2i data_window;
EXRBox2i display_window;
float screen_window_center[2];
float screen_window_width;
int chunk_count;
// Properties for tiled format(`tiledesc`).
int tiled;
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
int long_name;
// for a single-part file, agree with the version field bit 11
// for a multi-part file, it is consistent with the type of part
int non_image;
int multipart;
unsigned int header_len;
// Custom attributes(exludes required attributes(e.g. `channels`,
// `compression`, etc)
int num_custom_attributes;
EXRAttribute *custom_attributes; // array of EXRAttribute. size =
// `num_custom_attributes`.
EXRChannelInfo *channels; // [num_channels]
int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for
// each channel. This is overwritten with `requested_pixel_types` when
// loading.
int num_channels;
int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*)
int *requested_pixel_types; // Filled initially by
// ParseEXRHeaderFrom(Meomory|File), then users
// can edit it(only valid for HALF pixel type
// channel)
// name attribute required for multipart files;
// must be unique and non empty (according to spec.);
// use EXRSetNameAttr for setting value;
// max 255 character allowed - excluding terminating zero
char name[256];
} EXRHeader;
typedef struct _EXRMultiPartHeader {
int num_headers;
EXRHeader *headers;
} EXRMultiPartHeader;
typedef struct _EXRImage {
EXRTile *tiles; // Tiled pixel data. The application must reconstruct image
// from tiles manually. NULL if scanline format.
struct _EXRImage* next_level; // NULL if scanline format or image is the last level.
int level_x; // x level index
int level_y; // y level index
unsigned char **images; // image[channels][pixels]. NULL if tiled format.
int width;
int height;
int num_channels;
// Properties for tile format.
int num_tiles;
} EXRImage;
typedef struct _EXRMultiPartImage {
int num_images;
EXRImage *images;
} EXRMultiPartImage;
typedef struct _DeepImage {
const char **channel_names;
float ***image; // image[channels][scanlines][samples]
int **offset_table; // offset_table[scanline][offsets]
int num_channels;
int width;
int height;
int pad0;
} DeepImage;
// @deprecated { For backward compatibility. Not recommended to use. }
// Loads single-frame OpenEXR image. Assume EXR image contains A(single channel
// alpha) or RGB(A) channels.
// Application must free image data as returned by `out_rgba`
// Result image format is: float x RGBA x width x hight
// Returns negative value and may set error string in `err` when there's an
// error
extern int LoadEXR(float **out_rgba, int *width, int *height,
const char *filename, const char **err);
// Loads single-frame OpenEXR image by specifying layer name. Assume EXR image
// contains A(single channel alpha) or RGB(A) channels. Application must free
// image data as returned by `out_rgba` Result image format is: float x RGBA x
// width x hight Returns negative value and may set error string in `err` when
// there's an error When the specified layer name is not found in the EXR file,
// the function will return `TINYEXR_ERROR_LAYER_NOT_FOUND`.
extern int LoadEXRWithLayer(float **out_rgba, int *width, int *height,
const char *filename, const char *layer_name,
const char **err);
//
// Get layer infos from EXR file.
//
// @param[out] layer_names List of layer names. Application must free memory
// after using this.
// @param[out] num_layers The number of layers
// @param[out] err Error string(will be filled when the function returns error
// code). Free it using FreeEXRErrorMessage after using this value.
//
// @return TINYEXR_SUCCEES upon success.
//
extern int EXRLayers(const char *filename, const char **layer_names[],
int *num_layers, const char **err);
// @deprecated { to be removed. }
// Simple wrapper API for ParseEXRHeaderFromFile.
// checking given file is a EXR file(by just look up header)
// @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for
// others
extern int IsEXR(const char *filename);
// @deprecated { to be removed. }
// Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels.
// components must be 1(Grayscale), 3(RGB) or 4(RGBA).
// Input image format is: `float x width x height`, or `float x RGB(A) x width x
// hight`
// Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero
// value.
// Save image as fp32(FLOAT) format when `save_as_fp16` is 0.
// Use ZIP compression by default.
// Returns negative value and may set error string in `err` when there's an
// error
extern int SaveEXR(const float *data, const int width, const int height,
const int components, const int save_as_fp16,
const char *filename, const char **err);
// Returns the number of resolution levels of the image (including the base)
extern int EXRNumLevels(const EXRImage* exr_image);
// Initialize EXRHeader struct
extern void InitEXRHeader(EXRHeader *exr_header);
// Set name attribute of EXRHeader struct (it makes a copy)
extern void EXRSetNameAttr(EXRHeader *exr_header, const char* name);
// Initialize EXRImage struct
extern void InitEXRImage(EXRImage *exr_image);
// Frees internal data of EXRHeader struct
extern int FreeEXRHeader(EXRHeader *exr_header);
// Frees internal data of EXRImage struct
extern int FreeEXRImage(EXRImage *exr_image);
// Frees error message
extern void FreeEXRErrorMessage(const char *msg);
// Parse EXR version header of a file.
extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename);
// Parse EXR version header from memory-mapped EXR data.
extern int ParseEXRVersionFromMemory(EXRVersion *version,
const unsigned char *memory, size_t size);
// Parse single-part OpenEXR header from a file and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version,
const char *filename, const char **err);
// Parse single-part OpenEXR header from a memory and initialize `EXRHeader`.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRHeaderFromMemory(EXRHeader *header,
const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err);
// Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*`
// array.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const char *filename,
const char **err);
// Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*`
// array
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers,
int *num_headers,
const EXRVersion *version,
const unsigned char *memory,
size_t size, const char **err);
// Loads single-part OpenEXR image from a file.
// Application must setup `ParseEXRHeaderFromFile` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header,
const char *filename, const char **err);
// Loads single-part OpenEXR image from a memory.
// Application must setup `EXRHeader` with
// `ParseEXRHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header,
const unsigned char *memory,
const size_t size, const char **err);
// Loads multi-part OpenEXR image from a file.
// Application must setup `ParseEXRMultipartHeaderFromFile` before calling this
// function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromFile(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const char *filename,
const char **err);
// Loads multi-part OpenEXR image from a memory.
// Application must setup `EXRHeader*` array with
// `ParseEXRMultipartHeaderFromMemory` before calling this function.
// Application can free EXRImage using `FreeEXRImage`
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRMultipartImageFromMemory(EXRImage *images,
const EXRHeader **headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err);
// Saves multi-channel, single-frame OpenEXR image to a file.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int SaveEXRImageToFile(const EXRImage *image,
const EXRHeader *exr_header, const char *filename,
const char **err);
// Saves multi-channel, single-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// Return the number of bytes if success.
// Return zero and will set error string in `err` when there's an
// error.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern size_t SaveEXRImageToMemory(const EXRImage *image,
const EXRHeader *exr_header,
unsigned char **memory, const char **err);
// Saves multi-channel, multi-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// File global attributes (eg. display_window) must be set in the first header.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int SaveEXRMultipartImageToFile(const EXRImage *images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const char *filename, const char **err);
// Saves multi-channel, multi-frame OpenEXR image to a memory.
// Image is compressed using EXRImage.compression value.
// File global attributes (eg. display_window) must be set in the first header.
// Return the number of bytes if success.
// Return zero and will set error string in `err` when there's an
// error.
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern size_t SaveEXRMultipartImageToMemory(const EXRImage *images,
const EXRHeader **exr_headers,
unsigned int num_parts,
unsigned char **memory, const char **err);
// Loads single-frame OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadDeepEXR(DeepImage *out_image, const char *filename,
const char **err);
// NOT YET IMPLEMENTED:
// Saves single-frame OpenEXR deep image.
// Returns negative value and may set error string in `err` when there's an
// error
// extern int SaveDeepEXR(const DeepImage *in_image, const char *filename,
// const char **err);
// NOT YET IMPLEMENTED:
// Loads multi-part OpenEXR deep image.
// Application must free memory of variables in DeepImage(image, offset_table)
// extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const
// char *filename,
// const char **err);
// For emscripten.
// Loads single-frame OpenEXR image from memory. Assume EXR image contains
// RGB(A) channels.
// Returns negative value and may set error string in `err` when there's an
// error
// When there was an error message, Application must free `err` with
// FreeEXRErrorMessage()
extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err);
#ifdef __cplusplus
}
#endif
#endif // TINYEXR_H_
#ifdef TINYEXR_IMPLEMENTATION
#ifndef TINYEXR_IMPLEMENTATION_DEFINED
#define TINYEXR_IMPLEMENTATION_DEFINED
#ifdef _WIN32
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#include <windows.h> // for UTF-8
#endif
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <sstream>
// #include <iostream> // debug
#include <limits>
#include <string>
#include <vector>
#include <set>
// https://stackoverflow.com/questions/5047971/how-do-i-check-for-c11-support
#if __cplusplus > 199711L || (defined(_MSC_VER) && _MSC_VER >= 1900)
#define TINYEXR_HAS_CXX11 (1)
// C++11
#include <cstdint>
#if TINYEXR_USE_THREAD
#include <atomic>
#include <thread>
#endif
#endif // __cplusplus > 199711L
#if TINYEXR_USE_OPENMP
#include <omp.h>
#endif
#if TINYEXR_USE_MINIZ
#else
// Issue #46. Please include your own zlib-compatible API header before
// including `tinyexr.h`
//#include "zlib.h"
#endif
#if TINYEXR_USE_ZFP
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Weverything"
#endif
#include "zfp.h"
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
namespace tinyexr {
#if __cplusplus > 199711L
// C++11
typedef uint64_t tinyexr_uint64;
typedef int64_t tinyexr_int64;
#else
// Although `long long` is not a standard type pre C++11, assume it is defined
// as a compiler's extension.
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#endif
typedef unsigned long long tinyexr_uint64;
typedef long long tinyexr_int64;
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
#if TINYEXR_USE_MINIZ
namespace miniz {
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wunused-function"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#pragma clang diagnostic ignored "-Wundef"
#if __has_warning("-Wcomma")
#pragma clang diagnostic ignored "-Wcomma"
#endif
#if __has_warning("-Wmacro-redefined")
#pragma clang diagnostic ignored "-Wmacro-redefined"
#endif
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#if __has_warning("-Wtautological-constant-compare")
#pragma clang diagnostic ignored "-Wtautological-constant-compare"
#endif
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
/* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP
reading/writing/appending, PNG writing
See "unlicense" statement at the end of this file.
Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013
Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951:
http://www.ietf.org/rfc/rfc1951.txt
Most API's defined in miniz.c are optional. For example, to disable the
archive related functions just define
MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO
(see the list below for more macros).
* Change History
10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major
release with Zip64 support (almost there!):
- Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug
(thanks kahmyong.moon@hp.com) which could cause locate files to not find
files. This bug
would only have occurred in earlier versions if you explicitly used this
flag, OR if you used mz_zip_extract_archive_file_to_heap() or
mz_zip_add_mem_to_archive_file_in_place()
(which used this flag). If you can't switch to v1.15 but want to fix
this bug, just remove the uses of this flag from both helper funcs (and of
course don't use the flag).
- Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when
pUser_read_buf is not NULL and compressed size is > uncompressed size
- Fixing mz_zip_reader_extract_*() funcs so they don't try to extract
compressed data from directory entries, to account for weird zipfiles which
contain zero-size compressed data on dir entries.
Hopefully this fix won't cause any issues on weird zip archives,
because it assumes the low 16-bits of zip external attributes are DOS
attributes (which I believe they always are in practice).
- Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the
internal attributes, just the filename and external attributes
- mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed
- Added cmake support for Linux builds which builds all the examples,
tested with clang v3.3 and gcc v4.6.
- Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti
- Merged MZ_FORCEINLINE fix from hdeanclark
- Fix <time.h> include before config #ifdef, thanks emil.brink
- Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping
(super useful for OpenGL apps), and explicit control over the compression
level (so you can
set it to 1 for real-time compression).
- Merged in some compiler fixes from paulharris's github repro.
- Retested this build under Windows (VS 2010, including static analysis),
tcc 0.9.26, gcc v4.6 and clang v3.3.
- Added example6.c, which dumps an image of the mandelbrot set to a PNG
file.
- Modified example2 to help test the
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more.
- In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix
possible src file fclose() leak if alignment bytes+local header file write
faiiled
- In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader():
Was pushing the wrong central dir header offset, appears harmless in this
release, but it became a problem in the zip64 branch
5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE,
#include <time.h> (thanks fermtect).
5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix
mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit.
- Temporarily/locally slammed in "typedef unsigned long mz_ulong" and
re-ran a randomized regression test on ~500k files.
- Eliminated a bunch of warnings when compiling with GCC 32-bit/64.
- Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze
(static analysis) option and fixed all warnings (except for the silly
"Use of the comma-operator in a tested expression.." analysis warning,
which I purposely use to work around a MSVC compiler warning).
- Created 32-bit and 64-bit Codeblocks projects/workspace. Built and
tested Linux executables. The codeblocks workspace is compatible with
Linux+Win32/x64.
- Added miniz_tester solution/project, which is a useful little app
derived from LZHAM's tester app that I use as part of the regression test.
- Ran miniz.c and tinfl.c through another series of regression testing on
~500,000 files and archives.
- Modified example5.c so it purposely disables a bunch of high-level
functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the
MINIZ_NO_STDIO bug report.)
- Fix ftell() usage in examples so they exit with an error on files which
are too large (a limitation of the examples, not miniz itself).
4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple
minor level_and_flags issues in the archive API's.
level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce
Dawson <bruced@valvesoftware.com> for the feedback/bug report.
5/28/11 v1.11 - Added statement from unlicense.org
5/27/11 v1.10 - Substantial compressor optimizations:
- Level 1 is now ~4x faster than before. The L1 compressor's throughput
now varies between 70-110MB/sec. on a
- Core i7 (actual throughput varies depending on the type of data, and x64
vs. x86).
- Improved baseline L2-L9 compression perf. Also, greatly improved
compression perf. issues on some file types.
- Refactored the compression code for better readability and
maintainability.
- Added level 10 compression level (L10 has slightly better ratio than
level 9, but could have a potentially large
drop in throughput on some files).
5/15/11 v1.09 - Initial stable release.
* Low-level Deflate/Inflate implementation notes:
Compression: Use the "tdefl" API's. The compressor supports raw, static,
and dynamic blocks, lazy or
greedy parsing, match length filtering, RLE-only, and Huffman-only streams.
It performs and compresses
approximately as well as zlib.
Decompression: Use the "tinfl" API's. The entire decompressor is
implemented as a single function
coroutine: see tinfl_decompress(). It supports decompression into a 32KB
(or larger power of 2) wrapping buffer, or into a memory
block large enough to hold the entire file.
The low-level tdefl/tinfl API's do not make any use of dynamic memory
allocation.
* zlib-style API notes:
miniz.c implements a fairly large subset of zlib. There's enough
functionality present for it to be a drop-in
zlib replacement in many apps:
The z_stream struct, optional memory allocation callbacks
deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound
inflateInit/inflateInit2/inflate/inflateEnd
compress, compress2, compressBound, uncompress
CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly
routines.
Supports raw deflate streams or standard zlib streams with adler-32
checking.
Limitations:
The callback API's are not implemented yet. No support for gzip headers or
zlib static dictionaries.
I've tried to closely emulate zlib's various flavors of stream flushing
and return status codes, but
there are no guarantees that miniz.c pulls this off perfectly.
* PNG writing: See the tdefl_write_image_to_png_file_in_memory() function,
originally written by
Alex Evans. Supports 1-4 bytes/pixel images.
* ZIP archive API notes:
The ZIP archive API's where designed with simplicity and efficiency in
mind, with just enough abstraction to
get the job done with minimal fuss. There are simple API's to retrieve file
information, read files from
existing archives, create new archives, append new files to existing
archives, or clone archive data from
one archive to another. It supports archives located in memory or the heap,
on disk (using stdio.h),
or you can specify custom file read/write callbacks.
- Archive reading: Just call this function to read a single file from a
disk archive:
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const
char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
For more complex cases, use the "mz_zip_reader" functions. Upon opening an
archive, the entire central
directory is located and read as-is into memory, and subsequent file access
only occurs when reading individual files.
- Archives file scanning: The simple way is to use this function to scan a
loaded archive for a specific file:
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
The locate operation can optionally check file comments too, which (as one
example) can be used to identify
multiple versions of the same file in an archive. This function uses a
simple linear search through the central
directory, so it's not very fast.
Alternately, you can iterate through all the files in an archive (using
mz_zip_reader_get_num_files()) and
retrieve detailed info on each file by calling mz_zip_reader_file_stat().
- Archive creation: Use the "mz_zip_writer" functions. The ZIP writer
immediately writes compressed file data
to disk and builds an exact image of the central directory in memory. The
central directory image is written
all at once at the end of the archive file when the archive is finalized.
The archive writer can optionally align each file's local header and file
data to any power of 2 alignment,
which can be useful when the archive will be read from optical media. Also,
the writer supports placing
arbitrary data blobs at the very beginning of ZIP archives. Archives
written using either feature are still
readable by any ZIP tool.
- Archive appending: The simple way to add a single file to an archive is
to call this function:
mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename,
const char *pArchive_name,
const void *pBuf, size_t buf_size, const void *pComment, mz_uint16
comment_size, mz_uint level_and_flags);
The archive will be created if it doesn't already exist, otherwise it'll be
appended to.
Note the appending is done in-place and is not an atomic operation, so if
something goes wrong
during the operation it's possible the archive could be left without a
central directory (although the local
file headers and file data will be fine, so the archive will be
recoverable).
For more complex archive modification scenarios:
1. The safest way is to use a mz_zip_reader to read the existing archive,
cloning only those bits you want to
preserve into a new archive using using the
mz_zip_writer_add_from_zip_reader() function (which compiles the
compressed file data as-is). When you're done, delete the old archive and
rename the newly written archive, and
you're done. This is safe but requires a bunch of temporary disk space or
heap memory.
2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using
mz_zip_writer_init_from_reader(),
append new files as needed, then finalize the archive which will write an
updated central directory to the
original archive. (This is basically what
mz_zip_add_mem_to_archive_file_in_place() does.) There's a
possibility that the archive's central directory could be lost with this
method if anything goes wrong, though.
- ZIP archive support limitations:
No zip64 or spanning support. Extraction functions can only handle
unencrypted, stored or deflated files.
Requires streams capable of seeking.
* This is a header file library, like stb_image.c. To get only a header file,
either cut and paste the
below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then
include miniz.c from it.
* Important: For best perf. be sure to customize the below macros for your
target platform:
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_LITTLE_ENDIAN 1
#define MINIZ_HAS_64BIT_REGISTERS 1
* On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before
including miniz.c to ensure miniz
uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be
able to process large files
(i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes).
*/
#ifndef MINIZ_HEADER_INCLUDED
#define MINIZ_HEADER_INCLUDED
//#include <stdlib.h>
// Defines to completely disable specific portions of miniz.c:
// If all macros here are defined the only functionality remaining will be
// CRC-32, adler-32, tinfl, and tdefl.
// Define MINIZ_NO_STDIO to disable all usage and any functions which rely on
// stdio for file I/O.
//#define MINIZ_NO_STDIO
// If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able
// to get the current time, or
// get/set file times, and the C run-time funcs that get/set times won't be
// called.
// The current downside is the times written to your archives will be from 1979.
#define MINIZ_NO_TIME
// Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's.
#define MINIZ_NO_ARCHIVE_APIS
// Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive
// API's.
//#define MINIZ_NO_ARCHIVE_WRITING_APIS
// Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression
// API's.
//#define MINIZ_NO_ZLIB_APIS
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent
// conflicts against stock zlib.
//#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES
// Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc.
// Note if MINIZ_NO_MALLOC is defined then the user must always provide custom
// user alloc/free/realloc
// callbacks to the zlib and archive API's, and a few stand-alone helper API's
// which don't provide custom user
// functions (such as tdefl_compress_mem_to_heap() and
// tinfl_decompress_mem_to_heap()) won't work.
//#define MINIZ_NO_MALLOC
#if defined(__TINYC__) && (defined(__linux) || defined(__linux__))
// TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc
// on Linux
#define MINIZ_NO_TIME
#endif
#if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS)
//#include <time.h>
#endif
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
// MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
#define MINIZ_X86_OR_X64_CPU 1
#endif
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#if MINIZ_X86_OR_X64_CPU
// Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient
// integer loads and stores from unaligned addresses.
//#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \
0 // disable to suppress compiler warnings
#endif
#if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \
defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \
defined(__x86_64__)
// Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are
// reasonably fast (and don't involve compiler generated calls to helper
// functions).
#define MINIZ_HAS_64BIT_REGISTERS 1
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API Definitions.
// For more compatibility with zlib, miniz.c uses unsigned long for some
// parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits!
typedef unsigned long mz_ulong;
// mz_free() internally uses the MZ_FREE() macro (which by default calls free()
// unless you've modified the MZ_MALLOC macro) to release a block allocated from
// the heap.
void mz_free(void *p);
#define MZ_ADLER32_INIT (1)
// mz_adler32() returns the initial adler-32 value to use when called with
// ptr==NULL.
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len);
#define MZ_CRC32_INIT (0)
// mz_crc32() returns the initial CRC-32 value to use when called with
// ptr==NULL.
mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len);
// Compression strategies.
enum {
MZ_DEFAULT_STRATEGY = 0,
MZ_FILTERED = 1,
MZ_HUFFMAN_ONLY = 2,
MZ_RLE = 3,
MZ_FIXED = 4
};
// Method
#define MZ_DEFLATED 8
#ifndef MINIZ_NO_ZLIB_APIS
// Heap allocation callbacks.
// Note that mz_alloc_func parameter types purpsosely differ from zlib's:
// items/size is size_t, not unsigned long.
typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size);
typedef void(*mz_free_func)(void *opaque, void *address);
typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items,
size_t size);
#define MZ_VERSION "9.1.15"
#define MZ_VERNUM 0x91F0
#define MZ_VER_MAJOR 9
#define MZ_VER_MINOR 1
#define MZ_VER_REVISION 15
#define MZ_VER_SUBREVISION 0
// Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The
// other values are for advanced use (refer to the zlib docs).
enum {
MZ_NO_FLUSH = 0,
MZ_PARTIAL_FLUSH = 1,
MZ_SYNC_FLUSH = 2,
MZ_FULL_FLUSH = 3,
MZ_FINISH = 4,
MZ_BLOCK = 5
};
// Return status codes. MZ_PARAM_ERROR is non-standard.
enum {
MZ_OK = 0,
MZ_STREAM_END = 1,
MZ_NEED_DICT = 2,
MZ_ERRNO = -1,
MZ_STREAM_ERROR = -2,
MZ_DATA_ERROR = -3,
MZ_MEM_ERROR = -4,
MZ_BUF_ERROR = -5,
MZ_VERSION_ERROR = -6,
MZ_PARAM_ERROR = -10000
};
// Compression levels: 0-9 are the standard zlib-style levels, 10 is best
// possible compression (not zlib compatible, and may be very slow),
// MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL.
enum {
MZ_NO_COMPRESSION = 0,
MZ_BEST_SPEED = 1,
MZ_BEST_COMPRESSION = 9,
MZ_UBER_COMPRESSION = 10,
MZ_DEFAULT_LEVEL = 6,
MZ_DEFAULT_COMPRESSION = -1
};
// Window bits
#define MZ_DEFAULT_WINDOW_BITS 15
struct mz_internal_state;
// Compression/decompression stream struct.
typedef struct mz_stream_s {
const unsigned char *next_in; // pointer to next byte to read
unsigned int avail_in; // number of bytes available at next_in
mz_ulong total_in; // total number of bytes consumed so far
unsigned char *next_out; // pointer to next byte to write
unsigned int avail_out; // number of bytes that can be written to next_out
mz_ulong total_out; // total number of bytes produced so far
char *msg; // error msg (unused)
struct mz_internal_state *state; // internal state, allocated by zalloc/zfree
mz_alloc_func
zalloc; // optional heap allocation function (defaults to malloc)
mz_free_func zfree; // optional heap free function (defaults to free)
void *opaque; // heap alloc function user pointer
int data_type; // data_type (unused)
mz_ulong adler; // adler32 of the source or uncompressed data
mz_ulong reserved; // not used
} mz_stream;
typedef mz_stream *mz_streamp;
// Returns the version string of miniz.c.
const char *mz_version(void);
// mz_deflateInit() initializes a compressor with default options:
// Parameters:
// pStream must point to an initialized mz_stream struct.
// level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION].
// level 1 enables a specially optimized compression function that's been
// optimized purely for performance, not ratio.
// (This special func. is currently only enabled when
// MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.)
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if the input parameters are bogus.
// MZ_MEM_ERROR on out of memory.
int mz_deflateInit(mz_streamp pStream, int level);
// mz_deflateInit2() is like mz_deflate(), except with more control:
// Additional parameters:
// method must be MZ_DEFLATED
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with
// zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no
// header or footer)
// mem_level must be between [1, 9] (it's checked but ignored by miniz.c)
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy);
// Quickly resets a compressor without having to reallocate anything. Same as
// calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2().
int mz_deflateReset(mz_streamp pStream);
// mz_deflate() compresses the input to output, consuming as much of the input
// and producing as much output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or
// MZ_FINISH.
// Return values:
// MZ_OK on success (when flushing, or if more input is needed but not
// available, and/or there's more output to be written but the output buffer
// is full).
// MZ_STREAM_END if all input has been consumed and all output bytes have been
// written. Don't call mz_deflate() on the stream anymore.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input and/or
// output buffers are empty. (Fill up the input buffer or free up some output
// space and try again.)
int mz_deflate(mz_streamp pStream, int flush);
// mz_deflateEnd() deinitializes a compressor:
// Return values:
// MZ_OK on success.
// MZ_STREAM_ERROR if the stream is bogus.
int mz_deflateEnd(mz_streamp pStream);
// mz_deflateBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by deflate(), assuming flush is set to only
// MZ_NO_FLUSH or MZ_FINISH.
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len);
// Single-call compression functions mz_compress() and mz_compress2():
// Returns MZ_OK on success, or one of the error codes from mz_deflate() on
// failure.
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level);
// mz_compressBound() returns a (very) conservative upper bound on the amount of
// data that could be generated by calling mz_compress().
mz_ulong mz_compressBound(mz_ulong source_len);
// Initializes a decompressor.
int mz_inflateInit(mz_streamp pStream);
// mz_inflateInit2() is like mz_inflateInit() with an additional option that
// controls the window size and whether or not the stream has been wrapped with
// a zlib header/footer:
// window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or
// -MZ_DEFAULT_WINDOW_BITS (raw deflate).
int mz_inflateInit2(mz_streamp pStream, int window_bits);
// Decompresses the input stream to the output, consuming only as much of the
// input as needed, and writing as much to the output as possible.
// Parameters:
// pStream is the stream to read from and write to. You must initialize/update
// the next_in, avail_in, next_out, and avail_out members.
// flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH.
// On the first call, if flush is MZ_FINISH it's assumed the input and output
// buffers are both sized large enough to decompress the entire stream in a
// single call (this is slightly faster).
// MZ_FINISH implies that there are no more source bytes available beside
// what's already in the input buffer, and that the output buffer is large
// enough to hold the rest of the decompressed data.
// Return values:
// MZ_OK on success. Either more input is needed but not available, and/or
// there's more output to be written but the output buffer is full.
// MZ_STREAM_END if all needed input has been consumed and all output bytes
// have been written. For zlib streams, the adler-32 of the decompressed data
// has also been verified.
// MZ_STREAM_ERROR if the stream is bogus.
// MZ_DATA_ERROR if the deflate stream is invalid.
// MZ_PARAM_ERROR if one of the parameters is invalid.
// MZ_BUF_ERROR if no forward progress is possible because the input buffer is
// empty but the inflater needs more input to continue, or if the output
// buffer is not large enough. Call mz_inflate() again
// with more input data, or with more room in the output buffer (except when
// using single call decompression, described above).
int mz_inflate(mz_streamp pStream, int flush);
// Deinitializes a decompressor.
int mz_inflateEnd(mz_streamp pStream);
// Single-call decompression.
// Returns MZ_OK on success, or one of the error codes from mz_inflate() on
// failure.
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len);
// Returns a string description of the specified error code, or NULL if the
// error code is invalid.
const char *mz_error(int err);
// Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used
// as a drop-in replacement for the subset of zlib that miniz.c supports.
// Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you
// use zlib in the same project.
#ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
typedef unsigned char Byte;
typedef unsigned int uInt;
typedef mz_ulong uLong;
typedef Byte Bytef;
typedef uInt uIntf;
typedef char charf;
typedef int intf;
typedef void *voidpf;
typedef uLong uLongf;
typedef void *voidp;
typedef void *const voidpc;
#define Z_NULL 0
#define Z_NO_FLUSH MZ_NO_FLUSH
#define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH
#define Z_SYNC_FLUSH MZ_SYNC_FLUSH
#define Z_FULL_FLUSH MZ_FULL_FLUSH
#define Z_FINISH MZ_FINISH
#define Z_BLOCK MZ_BLOCK
#define Z_OK MZ_OK
#define Z_STREAM_END MZ_STREAM_END
#define Z_NEED_DICT MZ_NEED_DICT
#define Z_ERRNO MZ_ERRNO
#define Z_STREAM_ERROR MZ_STREAM_ERROR
#define Z_DATA_ERROR MZ_DATA_ERROR
#define Z_MEM_ERROR MZ_MEM_ERROR
#define Z_BUF_ERROR MZ_BUF_ERROR
#define Z_VERSION_ERROR MZ_VERSION_ERROR
#define Z_PARAM_ERROR MZ_PARAM_ERROR
#define Z_NO_COMPRESSION MZ_NO_COMPRESSION
#define Z_BEST_SPEED MZ_BEST_SPEED
#define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION
#define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION
#define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY
#define Z_FILTERED MZ_FILTERED
#define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY
#define Z_RLE MZ_RLE
#define Z_FIXED MZ_FIXED
#define Z_DEFLATED MZ_DEFLATED
#define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS
#define alloc_func mz_alloc_func
#define free_func mz_free_func
#define internal_state mz_internal_state
#define z_stream mz_stream
#define deflateInit mz_deflateInit
#define deflateInit2 mz_deflateInit2
#define deflateReset mz_deflateReset
#define deflate mz_deflate
#define deflateEnd mz_deflateEnd
#define deflateBound mz_deflateBound
#define compress mz_compress
#define compress2 mz_compress2
#define compressBound mz_compressBound
#define inflateInit mz_inflateInit
#define inflateInit2 mz_inflateInit2
#define inflate mz_inflate
#define inflateEnd mz_inflateEnd
#define uncompress mz_uncompress
#define crc32 mz_crc32
#define adler32 mz_adler32
#define MAX_WBITS 15
#define MAX_MEM_LEVEL 9
#define zError mz_error
#define ZLIB_VERSION MZ_VERSION
#define ZLIB_VERNUM MZ_VERNUM
#define ZLIB_VER_MAJOR MZ_VER_MAJOR
#define ZLIB_VER_MINOR MZ_VER_MINOR
#define ZLIB_VER_REVISION MZ_VER_REVISION
#define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION
#define zlibVersion mz_version
#define zlib_version mz_version()
#endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Types and macros
typedef unsigned char mz_uint8;
typedef signed short mz_int16;
typedef unsigned short mz_uint16;
typedef unsigned int mz_uint32;
typedef unsigned int mz_uint;
typedef long long mz_int64;
typedef unsigned long long mz_uint64;
typedef int mz_bool;
#define MZ_FALSE (0)
#define MZ_TRUE (1)
// An attempt to work around MSVC's spammy "warning C4127: conditional
// expression is constant" message.
#ifdef _MSC_VER
#define MZ_MACRO_END while (0, 0)
#else
#define MZ_MACRO_END while (0)
#endif
// ------------------- ZIP archive reading/writing
#ifndef MINIZ_NO_ARCHIVE_APIS
enum {
MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024,
MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260,
MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256
};
typedef struct {
mz_uint32 m_file_index;
mz_uint32 m_central_dir_ofs;
mz_uint16 m_version_made_by;
mz_uint16 m_version_needed;
mz_uint16 m_bit_flag;
mz_uint16 m_method;
#ifndef MINIZ_NO_TIME
time_t m_time;
#endif
mz_uint32 m_crc32;
mz_uint64 m_comp_size;
mz_uint64 m_uncomp_size;
mz_uint16 m_internal_attr;
mz_uint32 m_external_attr;
mz_uint64 m_local_header_ofs;
mz_uint32 m_comment_size;
char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE];
char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE];
} mz_zip_archive_file_stat;
typedef size_t(*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n);
typedef size_t(*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n);
struct mz_zip_internal_state_tag;
typedef struct mz_zip_internal_state_tag mz_zip_internal_state;
typedef enum {
MZ_ZIP_MODE_INVALID = 0,
MZ_ZIP_MODE_READING = 1,
MZ_ZIP_MODE_WRITING = 2,
MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3
} mz_zip_mode;
typedef struct mz_zip_archive_tag {
mz_uint64 m_archive_size;
mz_uint64 m_central_directory_file_ofs;
mz_uint m_total_files;
mz_zip_mode m_zip_mode;
mz_uint m_file_offset_alignment;
mz_alloc_func m_pAlloc;
mz_free_func m_pFree;
mz_realloc_func m_pRealloc;
void *m_pAlloc_opaque;
mz_file_read_func m_pRead;
mz_file_write_func m_pWrite;
void *m_pIO_opaque;
mz_zip_internal_state *m_pState;
} mz_zip_archive;
typedef enum {
MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100,
MZ_ZIP_FLAG_IGNORE_PATH = 0x0200,
MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400,
MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800
} mz_zip_flags;
// ZIP archive reading
// Inits a ZIP archive reader.
// These functions read and validate the archive's central directory.
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags);
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags);
#endif
// Returns the total number of files in the archive.
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip);
// Returns detailed information about an archive file entry.
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat);
// Determines if an archive file entry is a directory entry.
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index);
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index);
// Retrieves the filename of an archive file entry.
// Returns the number of bytes written to pFilename, or if filename_buf_size is
// 0 this function returns the number of bytes needed to fully store the
// filename.
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size);
// Attempts to locates a file in the archive's central directory.
// Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH
// Returns -1 if the file cannot be found.
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags);
// Extracts a archive file to a memory buffer using no memory allocation.
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size);
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size);
// Extracts a archive file to a memory buffer.
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags);
// Extracts a archive file to a dynamically allocated heap buffer.
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags);
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags);
// Extracts a archive file using a callback function to output the file's data.
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags);
#ifndef MINIZ_NO_STDIO
// Extracts a archive file to a disk file and sets its last accessed and
// modified times.
// This function only extracts files, not archive directory records.
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags);
#endif
// Ends archive reading, freeing all allocations, and closing the input archive
// file if mz_zip_reader_init_file() was used.
mz_bool mz_zip_reader_end(mz_zip_archive *pZip);
// ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
// Inits a ZIP archive writer.
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size);
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size);
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning);
#endif
// Converts a ZIP archive reader object into a writer object, to allow efficient
// in-place file appends to occur on an existing archive.
// For archives opened using mz_zip_reader_init_file, pFilename must be the
// archive's filename so it can be reopened for writing. If the file can't be
// reopened, mz_zip_reader_end() will be called.
// For archives opened using mz_zip_reader_init_mem, the memory block must be
// growable using the realloc callback (which defaults to realloc unless you've
// overridden it).
// Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's
// user provided m_pWrite function cannot be NULL.
// Note: In-place archive modification is not recommended unless you know what
// you're doing, because if execution stops or something goes wrong before
// the archive is finalized the file's central directory will be hosed.
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename);
// Adds the contents of a memory buffer to an archive. These functions record
// the current local time into the archive.
// To add a directory entry, call this method with an archive name ending in a
// forwardslash with empty buffer.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags);
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32);
#ifndef MINIZ_NO_STDIO
// Adds the contents of a disk file to an archive. This function also records
// the disk file's modified time into the archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size, mz_uint level_and_flags);
#endif
// Adds a file to an archive by fully cloning the data from another archive.
// This function fully clones the source file's compressed data (no
// recompression), along with its full filename, extra data, and comment fields.
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index);
// Finalizes the archive by writing the central directory records followed by
// the end of central directory record.
// After an archive is finalized, the only valid call on the mz_zip_archive
// struct is mz_zip_writer_end().
// An archive must be manually finalized by calling this function for it to be
// valid.
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip);
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize);
// Ends archive writing, freeing all allocations, and closing the output file if
// mz_zip_writer_init_file() was used.
// Note for the archive to be valid, it must have been finalized before ending.
mz_bool mz_zip_writer_end(mz_zip_archive *pZip);
// Misc. high-level helper functions:
// mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically)
// appends a memory blob to a ZIP archive.
// level_and_flags - compression level (0-10, see MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or
// just set to MZ_DEFAULT_COMPRESSION.
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags);
// Reads a single file from an archive into a heap block.
// Returns NULL on failure.
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint zip_flags);
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
// ------------------- Low-level Decompression API Definitions
// Decompression flags used by tinfl_decompress().
// TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and
// ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the
// input is a raw deflate stream.
// TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available
// beyond the end of the supplied input buffer. If clear, the input buffer
// contains all remaining input.
// TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large
// enough to hold the entire decompressed stream. If clear, the output buffer is
// at least the size of the dictionary (typically 32KB).
// TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the
// decompressed bytes.
enum {
TINFL_FLAG_PARSE_ZLIB_HEADER = 1,
TINFL_FLAG_HAS_MORE_INPUT = 2,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4,
TINFL_FLAG_COMPUTE_ADLER32 = 8
};
// High level decompression functions:
// tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data
// to decompress.
// On return:
// Function returns a pointer to the decompressed data, or NULL on failure.
// *pOut_len will be set to the decompressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must call mz_free() on the returned block when it's no longer
// needed.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tinfl_decompress_mem_to_mem() decompresses a block in memory to another block
// in memory.
// Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes
// written on success.
#define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1))
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// tinfl_decompress_mem_to_callback() decompresses a block in memory to an
// internal 32KB buffer, and a user provided callback function will be called to
// flush the buffer.
// Returns 1 on success or 0 on failure.
typedef int(*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
struct tinfl_decompressor_tag;
typedef struct tinfl_decompressor_tag tinfl_decompressor;
// Max size of LZ dictionary.
#define TINFL_LZ_DICT_SIZE 32768
// Return status.
typedef enum {
TINFL_STATUS_BAD_PARAM = -3,
TINFL_STATUS_ADLER32_MISMATCH = -2,
TINFL_STATUS_FAILED = -1,
TINFL_STATUS_DONE = 0,
TINFL_STATUS_NEEDS_MORE_INPUT = 1,
TINFL_STATUS_HAS_MORE_OUTPUT = 2
} tinfl_status;
// Initializes the decompressor to its initial state.
#define tinfl_init(r) \
do { \
(r)->m_state = 0; \
} \
MZ_MACRO_END
#define tinfl_get_adler32(r) (r)->m_check_adler32
// Main low-level decompressor coroutine function. This is the only function
// actually needed for decompression. All the other functions are just
// high-level helpers for improved usability.
// This is a universal API, i.e. it can be used as a building block to build any
// desired higher level decompression API. In the limit case, it can be called
// once per every byte input or output.
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags);
// Internal/private bits follow.
enum {
TINFL_MAX_HUFF_TABLES = 3,
TINFL_MAX_HUFF_SYMBOLS_0 = 288,
TINFL_MAX_HUFF_SYMBOLS_1 = 32,
TINFL_MAX_HUFF_SYMBOLS_2 = 19,
TINFL_FAST_LOOKUP_BITS = 10,
TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
};
typedef struct {
mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE],
m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
} tinfl_huff_table;
#if MINIZ_HAS_64BIT_REGISTERS
#define TINFL_USE_64BIT_BITBUF 1
#endif
#if TINFL_USE_64BIT_BITBUF
typedef mz_uint64 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (64)
#else
typedef mz_uint32 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (32)
#endif
struct tinfl_decompressor_tag {
mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type,
m_check_adler32, m_dist, m_counter, m_num_extra,
m_table_sizes[TINFL_MAX_HUFF_TABLES];
tinfl_bit_buf_t m_bit_buf;
size_t m_dist_from_out_buf_start;
tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
mz_uint8 m_raw_header[4],
m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
};
// ------------------- Low-level Compression API Definitions
// Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly
// slower, and raw/dynamic blocks will be output more frequently).
#define TDEFL_LESS_MEMORY 0
// tdefl_init() compression flags logically OR'd together (low 12 bits contain
// the max. number of probes per dictionary search):
// TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes
// per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap
// compression), 4095=Huffman+LZ (slowest/best compression).
enum {
TDEFL_HUFFMAN_ONLY = 0,
TDEFL_DEFAULT_MAX_PROBES = 128,
TDEFL_MAX_PROBES_MASK = 0xFFF
};
// TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before
// the deflate data, and the Adler-32 of the source data at the end. Otherwise,
// you'll get raw deflate data.
// TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even
// when not writing zlib headers).
// TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more
// efficient lazy parsing.
// TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's
// initialization time to the minimum, but the output may vary from run to run
// given the same input (depending on the contents of memory).
// TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1)
// TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled.
// TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables.
// TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks.
// The low 12 bits are reserved to control the max # of hash probes per
// dictionary lookup (see TDEFL_MAX_PROBES_MASK).
enum {
TDEFL_WRITE_ZLIB_HEADER = 0x01000,
TDEFL_COMPUTE_ADLER32 = 0x02000,
TDEFL_GREEDY_PARSING_FLAG = 0x04000,
TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000,
TDEFL_RLE_MATCHES = 0x10000,
TDEFL_FILTER_MATCHES = 0x20000,
TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000,
TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000
};
// High level compression functions:
// tdefl_compress_mem_to_heap() compresses a block in memory to a heap block
// allocated via malloc().
// On entry:
// pSrc_buf, src_buf_len: Pointer and size of source block to compress.
// flags: The max match finder probes (default is 128) logically OR'd against
// the above flags. Higher probes are slower but improve compression.
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pOut_len will be set to the compressed data's size, which could be larger
// than src_buf_len on uncompressible data.
// The caller must free() the returned block when it's no longer needed.
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags);
// tdefl_compress_mem_to_mem() compresses a block in memory to another block in
// memory.
// Returns 0 on failure.
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags);
// Compresses an image to a compressed PNG file in memory.
// On entry:
// pImage, w, h, and num_chans describe the image to compress. num_chans may be
// 1, 2, 3, or 4.
// The image pitch in bytes per scanline will be w*num_chans. The leftmost
// pixel on the top scanline is stored first in memory.
// level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED,
// MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL
// If flip is true, the image will be flipped on the Y axis (useful for OpenGL
// apps).
// On return:
// Function returns a pointer to the compressed data, or NULL on failure.
// *pLen_out will be set to the size of the PNG image file.
// The caller must mz_free() the returned heap block (which will typically be
// larger than *pLen_out) when it's no longer needed.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip);
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out);
// Output stream interface. The compressor uses this interface to write
// compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time.
typedef mz_bool(*tdefl_put_buf_func_ptr)(const void *pBuf, int len,
void *pUser);
// tdefl_compress_mem_to_output() compresses a block to an output stream. The
// above helpers use this function internally.
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
enum {
TDEFL_MAX_HUFF_TABLES = 3,
TDEFL_MAX_HUFF_SYMBOLS_0 = 288,
TDEFL_MAX_HUFF_SYMBOLS_1 = 32,
TDEFL_MAX_HUFF_SYMBOLS_2 = 19,
TDEFL_LZ_DICT_SIZE = 32768,
TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1,
TDEFL_MIN_MATCH_LEN = 3,
TDEFL_MAX_MATCH_LEN = 258
};
// TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed
// output block (using static/fixed Huffman codes).
#if TDEFL_LESS_MEMORY
enum {
TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 12,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#else
enum {
TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024,
TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
TDEFL_MAX_HUFF_SYMBOLS = 288,
TDEFL_LZ_HASH_BITS = 15,
TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#endif
// The low-level tdefl functions below may be used directly if the above helper
// functions aren't flexible enough. The low-level functions don't make any heap
// allocations, unlike the above helper functions.
typedef enum {
TDEFL_STATUS_BAD_PARAM = -2,
TDEFL_STATUS_PUT_BUF_FAILED = -1,
TDEFL_STATUS_OKAY = 0,
TDEFL_STATUS_DONE = 1
} tdefl_status;
// Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums
typedef enum {
TDEFL_NO_FLUSH = 0,
TDEFL_SYNC_FLUSH = 2,
TDEFL_FULL_FLUSH = 3,
TDEFL_FINISH = 4
} tdefl_flush;
// tdefl's compression state structure.
typedef struct {
tdefl_put_buf_func_ptr m_pPut_buf_func;
void *m_pPut_buf_user;
mz_uint m_flags, m_max_probes[2];
int m_greedy_parsing;
mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size;
mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end;
mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in,
m_bit_buffer;
mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit,
m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index,
m_wants_to_finish;
tdefl_status m_prev_return_status;
const void *m_pIn_buf;
void *m_pOut_buf;
size_t *m_pIn_buf_size, *m_pOut_buf_size;
tdefl_flush m_flush;
const mz_uint8 *m_pSrc;
size_t m_src_buf_left, m_out_buf_ofs;
mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1];
mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE];
mz_uint16 m_next[TDEFL_LZ_DICT_SIZE];
mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE];
mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE];
} tdefl_compressor;
// Initializes the compressor.
// There is no corresponding deinit() function because the tdefl API's do not
// dynamically allocate memory.
// pBut_buf_func: If NULL, output data will be supplied to the specified
// callback. In this case, the user should call the tdefl_compress_buffer() API
// for compression.
// If pBut_buf_func is NULL the user should always call the tdefl_compress()
// API.
// flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER,
// etc.)
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags);
// Compresses a block of data, consuming as much of the specified input buffer
// as possible, and writing as much compressed data to the specified output
// buffer as possible.
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush);
// tdefl_compress_buffer() is only usable when the tdefl_init() is called with a
// non-NULL tdefl_put_buf_func_ptr.
// tdefl_compress_buffer() always consumes the entire input buffer.
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush);
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d);
mz_uint32 tdefl_get_adler32(tdefl_compressor *d);
// Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't
// defined, because it uses some of its macros.
#ifndef MINIZ_NO_ZLIB_APIS
// Create tdefl_compress() flags given zlib-style compression parameters.
// level may range from [0,10] (where 10 is absolute max compression, but may be
// much slower on some files)
// window_bits may be -15 (raw deflate) or 15 (zlib)
// strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY,
// MZ_RLE, or MZ_FIXED
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy);
#endif // #ifndef MINIZ_NO_ZLIB_APIS
#ifdef __cplusplus
}
#endif
#endif // MINIZ_HEADER_INCLUDED
// ------------------- End of Header: Implementation follows. (If you only want
// the header, define MINIZ_HEADER_FILE_ONLY.)
#ifndef MINIZ_HEADER_FILE_ONLY
typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1];
typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1];
typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1];
//#include <assert.h>
//#include <string.h>
#define MZ_ASSERT(x) assert(x)
#ifdef MINIZ_NO_MALLOC
#define MZ_MALLOC(x) NULL
#define MZ_FREE(x) (void)x, ((void)0)
#define MZ_REALLOC(p, x) NULL
#else
#define MZ_MALLOC(x) malloc(x)
#define MZ_FREE(x) free(x)
#define MZ_REALLOC(p, x) realloc(p, x)
#endif
#define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b))
#define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj))
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
#define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
#define MZ_READ_LE32(p) *((const mz_uint32 *)(p))
#else
#define MZ_READ_LE16(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U))
#define MZ_READ_LE32(p) \
((mz_uint32)(((const mz_uint8 *)(p))[0]) | \
((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \
((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \
((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U))
#endif
#ifdef _MSC_VER
#define MZ_FORCEINLINE __forceinline
#elif defined(__GNUC__)
#define MZ_FORCEINLINE inline __attribute__((__always_inline__))
#else
#define MZ_FORCEINLINE inline
#endif
#ifdef __cplusplus
extern "C" {
#endif
// ------------------- zlib-style API's
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) {
mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16);
size_t block_len = buf_len % 5552;
if (!ptr) return MZ_ADLER32_INIT;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
return (s2 << 16) + s1;
}
// Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C
// implementation that balances processor cache usage against speed":
// http://www.geocities.com/malbrain/
mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) {
static const mz_uint32 s_crc32[16] = {
0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4,
0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c };
mz_uint32 crcu32 = (mz_uint32)crc;
if (!ptr) return MZ_CRC32_INIT;
crcu32 = ~crcu32;
while (buf_len--) {
mz_uint8 b = *ptr++;
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)];
crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)];
}
return ~crcu32;
}
void mz_free(void *p) { MZ_FREE(p); }
#ifndef MINIZ_NO_ZLIB_APIS
static void *def_alloc_func(void *opaque, size_t items, size_t size) {
(void)opaque, (void)items, (void)size;
return MZ_MALLOC(items * size);
}
static void def_free_func(void *opaque, void *address) {
(void)opaque, (void)address;
MZ_FREE(address);
}
// static void *def_realloc_func(void *opaque, void *address, size_t items,
// size_t size) {
// (void)opaque, (void)address, (void)items, (void)size;
// return MZ_REALLOC(address, items * size);
//}
const char *mz_version(void) { return MZ_VERSION; }
int mz_deflateInit(mz_streamp pStream, int level) {
return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9,
MZ_DEFAULT_STRATEGY);
}
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits,
int mem_level, int strategy) {
tdefl_compressor *pComp;
mz_uint comp_flags =
TDEFL_COMPUTE_ADLER32 |
tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy);
if (!pStream) return MZ_STREAM_ERROR;
if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) ||
((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS)))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = MZ_ADLER32_INIT;
pStream->msg = NULL;
pStream->reserved = 0;
pStream->total_in = 0;
pStream->total_out = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1,
sizeof(tdefl_compressor));
if (!pComp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pComp;
if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) {
mz_deflateEnd(pStream);
return MZ_PARAM_ERROR;
}
return MZ_OK;
}
int mz_deflateReset(mz_streamp pStream) {
if ((!pStream) || (!pStream->state) || (!pStream->zalloc) ||
(!pStream->zfree))
return MZ_STREAM_ERROR;
pStream->total_in = pStream->total_out = 0;
tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL,
((tdefl_compressor *)pStream->state)->m_flags);
return MZ_OK;
}
int mz_deflate(mz_streamp pStream, int flush) {
size_t in_bytes, out_bytes;
mz_ulong orig_total_in, orig_total_out;
int mz_status = MZ_OK;
if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) ||
(!pStream->next_out))
return MZ_STREAM_ERROR;
if (!pStream->avail_out) return MZ_BUF_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if (((tdefl_compressor *)pStream->state)->m_prev_return_status ==
TDEFL_STATUS_DONE)
return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR;
orig_total_in = pStream->total_in;
orig_total_out = pStream->total_out;
for (;;) {
tdefl_status defl_status;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
defl_status = tdefl_compress((tdefl_compressor *)pStream->state,
pStream->next_in, &in_bytes, pStream->next_out,
&out_bytes, (tdefl_flush)flush);
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (defl_status < 0) {
mz_status = MZ_STREAM_ERROR;
break;
}
else if (defl_status == TDEFL_STATUS_DONE) {
mz_status = MZ_STREAM_END;
break;
}
else if (!pStream->avail_out)
break;
else if ((!pStream->avail_in) && (flush != MZ_FINISH)) {
if ((flush) || (pStream->total_in != orig_total_in) ||
(pStream->total_out != orig_total_out))
break;
return MZ_BUF_ERROR; // Can't make forward progress without some input.
}
}
return mz_status;
}
int mz_deflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) {
(void)pStream;
// This is really over conservative. (And lame, but it's actually pretty
// tricky to compute a true upper bound given the way tdefl's blocking works.)
return MZ_MAX(128 + (source_len * 110) / 100,
128 + source_len + ((source_len / (31 * 1024)) + 1) * 5);
}
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len, int level) {
int status;
mz_stream stream;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_deflateInit(&stream, level);
if (status != MZ_OK) return status;
status = mz_deflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_deflateEnd(&stream);
return (status == MZ_OK) ? MZ_BUF_ERROR : status;
}
*pDest_len = stream.total_out;
return mz_deflateEnd(&stream);
}
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
return mz_compress2(pDest, pDest_len, pSource, source_len,
MZ_DEFAULT_COMPRESSION);
}
mz_ulong mz_compressBound(mz_ulong source_len) {
return mz_deflateBound(NULL, source_len);
}
typedef struct {
tinfl_decompressor m_decomp;
mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed;
int m_window_bits;
mz_uint8 m_dict[TINFL_LZ_DICT_SIZE];
tinfl_status m_last_status;
} inflate_state;
int mz_inflateInit2(mz_streamp pStream, int window_bits) {
inflate_state *pDecomp;
if (!pStream) return MZ_STREAM_ERROR;
if ((window_bits != MZ_DEFAULT_WINDOW_BITS) &&
(-window_bits != MZ_DEFAULT_WINDOW_BITS))
return MZ_PARAM_ERROR;
pStream->data_type = 0;
pStream->adler = 0;
pStream->msg = NULL;
pStream->total_in = 0;
pStream->total_out = 0;
pStream->reserved = 0;
if (!pStream->zalloc) pStream->zalloc = def_alloc_func;
if (!pStream->zfree) pStream->zfree = def_free_func;
pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1,
sizeof(inflate_state));
if (!pDecomp) return MZ_MEM_ERROR;
pStream->state = (struct mz_internal_state *)pDecomp;
tinfl_init(&pDecomp->m_decomp);
pDecomp->m_dict_ofs = 0;
pDecomp->m_dict_avail = 0;
pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT;
pDecomp->m_first_call = 1;
pDecomp->m_has_flushed = 0;
pDecomp->m_window_bits = window_bits;
return MZ_OK;
}
int mz_inflateInit(mz_streamp pStream) {
return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS);
}
int mz_inflate(mz_streamp pStream, int flush) {
inflate_state *pState;
mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32;
size_t in_bytes, out_bytes, orig_avail_in;
tinfl_status status;
if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR;
if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH;
if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH))
return MZ_STREAM_ERROR;
pState = (inflate_state *)pStream->state;
if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER;
orig_avail_in = pStream->avail_in;
first_call = pState->m_first_call;
pState->m_first_call = 0;
if (pState->m_last_status < 0) return MZ_DATA_ERROR;
if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR;
pState->m_has_flushed |= (flush == MZ_FINISH);
if ((flush == MZ_FINISH) && (first_call)) {
// MZ_FINISH on the first call implies that the input and output buffers are
// large enough to hold the entire compressed/decompressed file.
decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
in_bytes = pStream->avail_in;
out_bytes = pStream->avail_out;
status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes,
pStream->next_out, pStream->next_out, &out_bytes,
decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pStream->next_out += (mz_uint)out_bytes;
pStream->avail_out -= (mz_uint)out_bytes;
pStream->total_out += (mz_uint)out_bytes;
if (status < 0)
return MZ_DATA_ERROR;
else if (status != TINFL_STATUS_DONE) {
pState->m_last_status = TINFL_STATUS_FAILED;
return MZ_BUF_ERROR;
}
return MZ_STREAM_END;
}
// flush != MZ_FINISH then we must assume there's more input.
if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT;
if (pState->m_dict_avail) {
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
return ((pState->m_last_status == TINFL_STATUS_DONE) &&
(!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
for (;;) {
in_bytes = pStream->avail_in;
out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs;
status = tinfl_decompress(
&pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict,
pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags);
pState->m_last_status = status;
pStream->next_in += (mz_uint)in_bytes;
pStream->avail_in -= (mz_uint)in_bytes;
pStream->total_in += (mz_uint)in_bytes;
pStream->adler = tinfl_get_adler32(&pState->m_decomp);
pState->m_dict_avail = (mz_uint)out_bytes;
n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
pStream->next_out += n;
pStream->avail_out -= n;
pStream->total_out += n;
pState->m_dict_avail -= n;
pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
if (status < 0)
return MZ_DATA_ERROR; // Stream is corrupted (there could be some
// uncompressed data left in the output dictionary -
// oh well).
else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in))
return MZ_BUF_ERROR; // Signal caller that we can't make forward progress
// without supplying more input or by setting flush
// to MZ_FINISH.
else if (flush == MZ_FINISH) {
// The output buffer MUST be large to hold the remaining uncompressed data
// when flush==MZ_FINISH.
if (status == TINFL_STATUS_DONE)
return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END;
// status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's
// at least 1 more byte on the way. If there's no more room left in the
// output buffer then something is wrong.
else if (!pStream->avail_out)
return MZ_BUF_ERROR;
}
else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) ||
(!pStream->avail_out) || (pState->m_dict_avail))
break;
}
return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail))
? MZ_STREAM_END
: MZ_OK;
}
int mz_inflateEnd(mz_streamp pStream) {
if (!pStream) return MZ_STREAM_ERROR;
if (pStream->state) {
pStream->zfree(pStream->opaque, pStream->state);
pStream->state = NULL;
}
return MZ_OK;
}
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len,
const unsigned char *pSource, mz_ulong source_len) {
mz_stream stream;
int status;
memset(&stream, 0, sizeof(stream));
// In case mz_ulong is 64-bits (argh I hate longs).
if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR;
stream.next_in = pSource;
stream.avail_in = (mz_uint32)source_len;
stream.next_out = pDest;
stream.avail_out = (mz_uint32)*pDest_len;
status = mz_inflateInit(&stream);
if (status != MZ_OK) return status;
status = mz_inflate(&stream, MZ_FINISH);
if (status != MZ_STREAM_END) {
mz_inflateEnd(&stream);
return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR
: status;
}
*pDest_len = stream.total_out;
return mz_inflateEnd(&stream);
}
const char *mz_error(int err) {
static struct {
int m_err;
const char *m_pDesc;
} s_error_descs[] = { {MZ_OK, ""},
{MZ_STREAM_END, "stream end"},
{MZ_NEED_DICT, "need dictionary"},
{MZ_ERRNO, "file error"},
{MZ_STREAM_ERROR, "stream error"},
{MZ_DATA_ERROR, "data error"},
{MZ_MEM_ERROR, "out of memory"},
{MZ_BUF_ERROR, "buf error"},
{MZ_VERSION_ERROR, "version error"},
{MZ_PARAM_ERROR, "parameter error"} };
mz_uint i;
for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i)
if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc;
return NULL;
}
#endif // MINIZ_NO_ZLIB_APIS
// ------------------- Low-level Decompression (completely independent from all
// compression API's)
#define TINFL_MEMCPY(d, s, l) memcpy(d, s, l)
#define TINFL_MEMSET(p, c, l) memset(p, c, l)
#define TINFL_CR_BEGIN \
switch (r->m_state) { \
case 0:
#define TINFL_CR_RETURN(state_index, result) \
do { \
status = result; \
r->m_state = state_index; \
goto common_exit; \
case state_index:; \
} \
MZ_MACRO_END
#define TINFL_CR_RETURN_FOREVER(state_index, result) \
do { \
for (;;) { \
TINFL_CR_RETURN(state_index, result); \
} \
} \
MZ_MACRO_END
#define TINFL_CR_FINISH }
// TODO: If the caller has indicated that there's no more input, and we attempt
// to read beyond the input buf, then something is wrong with the input because
// the inflator never
// reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of
// the stream with 0's in this scenario.
#define TINFL_GET_BYTE(state_index, c) \
do { \
if (pIn_buf_cur >= pIn_buf_end) { \
for (;;) { \
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \
TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \
if (pIn_buf_cur < pIn_buf_end) { \
c = *pIn_buf_cur++; \
break; \
} \
} else { \
c = 0; \
break; \
} \
} \
} else \
c = *pIn_buf_cur++; \
} \
MZ_MACRO_END
#define TINFL_NEED_BITS(state_index, n) \
do { \
mz_uint c; \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < (mz_uint)(n))
#define TINFL_SKIP_BITS(state_index, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
#define TINFL_GET_BITS(state_index, b, n) \
do { \
if (num_bits < (mz_uint)(n)) { \
TINFL_NEED_BITS(state_index, n); \
} \
b = bit_buf & ((1 << (n)) - 1); \
bit_buf >>= (n); \
num_bits -= (n); \
} \
MZ_MACRO_END
// TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes
// remaining in the input buffer falls below 2.
// It reads just enough bytes from the input stream that are needed to decode
// the next Huffman code (and absolutely no more). It works by trying to fully
// decode a
// Huffman code by using whatever bits are currently present in the bit buffer.
// If this fails, it reads another byte, and tries again until it succeeds or
// until the
// bit buffer contains >=15 bits (deflate's max. Huffman code size).
#define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \
do { \
temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
if (temp >= 0) { \
code_len = temp >> 9; \
if ((code_len) && (num_bits >= code_len)) break; \
} else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while ((temp < 0) && (num_bits >= (code_len + 1))); \
if (temp >= 0) break; \
} \
TINFL_GET_BYTE(state_index, c); \
bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
num_bits += 8; \
} while (num_bits < 15);
// TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex
// than you would initially expect because the zlib API expects the decompressor
// to never read
// beyond the final byte of the deflate stream. (In other words, when this macro
// wants to read another byte from the input, it REALLY needs another byte in
// order to fully
// decode the next Huffman code.) Handling this properly is particularly
// important on raw deflate (non-zlib) streams, which aren't followed by a byte
// aligned adler-32.
// The slow path is only executed at the very end of the input buffer.
#define TINFL_HUFF_DECODE(state_index, sym, pHuff) \
do { \
int temp; \
mz_uint code_len, c; \
if (num_bits < 15) { \
if ((pIn_buf_end - pIn_buf_cur) < 2) { \
TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \
} else { \
bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \
(((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \
pIn_buf_cur += 2; \
num_bits += 16; \
} \
} \
if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \
0) \
code_len = temp >> 9, temp &= 511; \
else { \
code_len = TINFL_FAST_LOOKUP_BITS; \
do { \
temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
} while (temp < 0); \
} \
sym = temp; \
bit_buf >>= code_len; \
num_bits -= code_len; \
} \
MZ_MACRO_END
tinfl_status tinfl_decompress(tinfl_decompressor *r,
const mz_uint8 *pIn_buf_next,
size_t *pIn_buf_size, mz_uint8 *pOut_buf_start,
mz_uint8 *pOut_buf_next, size_t *pOut_buf_size,
const mz_uint32 decomp_flags) {
static const int s_length_base[31] = {
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0 };
static const int s_length_extra[31] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4,
4, 4, 5, 5, 5, 5, 0, 0, 0 };
static const int s_dist_base[32] = {
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33,
49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537,
2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0 };
static const int s_dist_extra[32] = { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
9, 9, 10, 10, 11, 11, 12, 12, 13, 13 };
static const mz_uint8 s_length_dezigzag[19] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 };
static const int s_min_table_sizes[3] = { 257, 1, 4 };
tinfl_status status = TINFL_STATUS_FAILED;
mz_uint32 num_bits, dist, counter, num_extra;
tinfl_bit_buf_t bit_buf;
const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end =
pIn_buf_next + *pIn_buf_size;
mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end =
pOut_buf_next + *pOut_buf_size;
size_t out_buf_size_mask =
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)
? (size_t)-1
: ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1,
dist_from_out_buf_start;
// Ensure the output buffer's size is a power of 2, unless the output buffer
// is large enough to hold the entire output file (in which case it doesn't
// matter).
if (((out_buf_size_mask + 1) & out_buf_size_mask) ||
(pOut_buf_next < pOut_buf_start)) {
*pIn_buf_size = *pOut_buf_size = 0;
return TINFL_STATUS_BAD_PARAM;
}
num_bits = r->m_num_bits;
bit_buf = r->m_bit_buf;
dist = r->m_dist;
counter = r->m_counter;
num_extra = r->m_num_extra;
dist_from_out_buf_start = r->m_dist_from_out_buf_start;
TINFL_CR_BEGIN
bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0;
r->m_z_adler32 = r->m_check_adler32 = 1;
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_GET_BYTE(1, r->m_zhdr0);
TINFL_GET_BYTE(2, r->m_zhdr1);
counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) ||
(r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) ||
((out_buf_size_mask + 1) <
(size_t)(1ULL << (8U + (r->m_zhdr0 >> 4)))));
if (counter) {
TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED);
}
}
do {
TINFL_GET_BITS(3, r->m_final, 3);
r->m_type = r->m_final >> 1;
if (r->m_type == 0) {
TINFL_SKIP_BITS(5, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
if (num_bits)
TINFL_GET_BITS(6, r->m_raw_header[counter], 8);
else
TINFL_GET_BYTE(7, r->m_raw_header[counter]);
}
if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) !=
(mz_uint)(0xFFFF ^
(r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) {
TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED);
}
while ((counter) && (num_bits)) {
TINFL_GET_BITS(51, dist, 8);
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)dist;
counter--;
}
while (counter) {
size_t n;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT);
}
while (pIn_buf_cur >= pIn_buf_end) {
if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) {
TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT);
}
else {
TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED);
}
}
n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur),
(size_t)(pIn_buf_end - pIn_buf_cur)),
counter);
TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n);
pIn_buf_cur += n;
pOut_buf_cur += n;
counter -= (mz_uint)n;
}
}
else if (r->m_type == 3) {
TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED);
}
else {
if (r->m_type == 1) {
mz_uint8 *p = r->m_tables[0].m_code_size;
mz_uint i;
r->m_table_sizes[0] = 288;
r->m_table_sizes[1] = 32;
TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
}
else {
for (counter = 0; counter < 3; counter++) {
TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]);
r->m_table_sizes[counter] += s_min_table_sizes[counter];
}
MZ_CLEAR_OBJ(r->m_tables[2].m_code_size);
for (counter = 0; counter < r->m_table_sizes[2]; counter++) {
mz_uint s;
TINFL_GET_BITS(14, s, 3);
r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s;
}
r->m_table_sizes[2] = 19;
}
for (; (int)r->m_type >= 0; r->m_type--) {
int tree_next, tree_cur;
tinfl_huff_table *pTable;
mz_uint i, j, used_syms, total, sym_index, next_code[17],
total_syms[16];
pTable = &r->m_tables[r->m_type];
MZ_CLEAR_OBJ(total_syms);
MZ_CLEAR_OBJ(pTable->m_look_up);
MZ_CLEAR_OBJ(pTable->m_tree);
for (i = 0; i < r->m_table_sizes[r->m_type]; ++i)
total_syms[pTable->m_code_size[i]]++;
used_syms = 0, total = 0;
next_code[0] = next_code[1] = 0;
for (i = 1; i <= 15; ++i) {
used_syms += total_syms[i];
next_code[i + 1] = (total = ((total + total_syms[i]) << 1));
}
if ((65536 != total) && (used_syms > 1)) {
TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED);
}
for (tree_next = -1, sym_index = 0;
sym_index < r->m_table_sizes[r->m_type]; ++sym_index) {
mz_uint rev_code = 0, l, cur_code,
code_size = pTable->m_code_size[sym_index];
if (!code_size) continue;
cur_code = next_code[code_size]++;
for (l = code_size; l > 0; l--, cur_code >>= 1)
rev_code = (rev_code << 1) | (cur_code & 1);
if (code_size <= TINFL_FAST_LOOKUP_BITS) {
mz_int16 k = (mz_int16)((code_size << 9) | sym_index);
while (rev_code < TINFL_FAST_LOOKUP_SIZE) {
pTable->m_look_up[rev_code] = k;
rev_code += (1 << code_size);
}
continue;
}
if (0 ==
(tree_cur = pTable->m_look_up[rev_code &
(TINFL_FAST_LOOKUP_SIZE - 1)])) {
pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] =
(mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
}
rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1);
for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) {
tree_cur -= ((rev_code >>= 1) & 1);
if (!pTable->m_tree[-tree_cur - 1]) {
pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next;
tree_cur = tree_next;
tree_next -= 2;
}
else
tree_cur = pTable->m_tree[-tree_cur - 1];
}
tree_cur -= ((rev_code >>= 1) & 1);
pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
}
if (r->m_type == 2) {
for (counter = 0;
counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) {
mz_uint s;
TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]);
if (dist < 16) {
r->m_len_codes[counter++] = (mz_uint8)dist;
continue;
}
if ((dist == 16) && (!counter)) {
TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED);
}
num_extra = "\02\03\07"[dist - 16];
TINFL_GET_BITS(18, s, num_extra);
s += "\03\03\013"[dist - 16];
TINFL_MEMSET(r->m_len_codes + counter,
(dist == 16) ? r->m_len_codes[counter - 1] : 0, s);
counter += s;
}
if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) {
TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
}
TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes,
r->m_table_sizes[0]);
TINFL_MEMCPY(r->m_tables[1].m_code_size,
r->m_len_codes + r->m_table_sizes[0],
r->m_table_sizes[1]);
}
}
for (;;) {
mz_uint8 *pSrc;
for (;;) {
if (((pIn_buf_end - pIn_buf_cur) < 4) ||
((pOut_buf_end - pOut_buf_cur) < 2)) {
TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
if (counter >= 256) break;
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ = (mz_uint8)counter;
}
else {
int sym2;
mz_uint code_len;
#if TINFL_USE_64BIT_BITBUF
if (num_bits < 30) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 4;
num_bits += 32;
}
#else
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
counter = sym2;
bit_buf >>= code_len;
num_bits -= code_len;
if (counter & 256) break;
#if !TINFL_USE_64BIT_BITBUF
if (num_bits < 15) {
bit_buf |=
(((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
pIn_buf_cur += 2;
num_bits += 16;
}
#endif
if ((sym2 =
r->m_tables[0]
.m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >=
0)
code_len = sym2 >> 9;
else {
code_len = TINFL_FAST_LOOKUP_BITS;
do {
sym2 = r->m_tables[0]
.m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
} while (sym2 < 0);
}
bit_buf >>= code_len;
num_bits -= code_len;
pOut_buf_cur[0] = (mz_uint8)counter;
if (sym2 & 256) {
pOut_buf_cur++;
counter = sym2;
break;
}
pOut_buf_cur[1] = (mz_uint8)sym2;
pOut_buf_cur += 2;
}
}
if ((counter &= 511) == 256) break;
num_extra = s_length_extra[counter - 257];
counter = s_length_base[counter - 257];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(25, extra_bits, num_extra);
counter += extra_bits;
}
TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
num_extra = s_dist_extra[dist];
dist = s_dist_base[dist];
if (num_extra) {
mz_uint extra_bits;
TINFL_GET_BITS(27, extra_bits, num_extra);
dist += extra_bits;
}
dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start;
if ((dist > dist_from_out_buf_start) &&
(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) {
TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED);
}
pSrc = pOut_buf_start +
((dist_from_out_buf_start - dist) & out_buf_size_mask);
if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) {
while (counter--) {
while (pOut_buf_cur >= pOut_buf_end) {
TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT);
}
*pOut_buf_cur++ =
pOut_buf_start[(dist_from_out_buf_start++ - dist) &
out_buf_size_mask];
}
continue;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
else if ((counter >= 9) && (counter <= dist)) {
const mz_uint8 *pSrc_end = pSrc + (counter & ~7);
do {
((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0];
((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1];
pOut_buf_cur += 8;
} while ((pSrc += 8) < pSrc_end);
if ((counter &= 7) < 3) {
if (counter) {
pOut_buf_cur[0] = pSrc[0];
if (counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
continue;
}
}
#endif
do {
pOut_buf_cur[0] = pSrc[0];
pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur[2] = pSrc[2];
pOut_buf_cur += 3;
pSrc += 3;
} while ((int)(counter -= 3) > 2);
if ((int)counter > 0) {
pOut_buf_cur[0] = pSrc[0];
if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1];
pOut_buf_cur += counter;
}
}
}
} while (!(r->m_final & 1));
if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) {
TINFL_SKIP_BITS(32, num_bits & 7);
for (counter = 0; counter < 4; ++counter) {
mz_uint s;
if (num_bits)
TINFL_GET_BITS(41, s, 8);
else
TINFL_GET_BYTE(42, s);
r->m_z_adler32 = (r->m_z_adler32 << 8) | s;
}
}
TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE);
TINFL_CR_FINISH
common_exit :
r->m_num_bits = num_bits;
r->m_bit_buf = bit_buf;
r->m_dist = dist;
r->m_counter = counter;
r->m_num_extra = num_extra;
r->m_dist_from_out_buf_start = dist_from_out_buf_start;
*pIn_buf_size = pIn_buf_cur - pIn_buf_next;
*pOut_buf_size = pOut_buf_cur - pOut_buf_next;
if ((decomp_flags &
(TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) &&
(status >= 0)) {
const mz_uint8 *ptr = pOut_buf_next;
size_t buf_len = *pOut_buf_size;
mz_uint32 i, s1 = r->m_check_adler32 & 0xffff,
s2 = r->m_check_adler32 >> 16;
size_t block_len = buf_len % 5552;
while (buf_len) {
for (i = 0; i + 7 < block_len; i += 8, ptr += 8) {
s1 += ptr[0], s2 += s1;
s1 += ptr[1], s2 += s1;
s1 += ptr[2], s2 += s1;
s1 += ptr[3], s2 += s1;
s1 += ptr[4], s2 += s1;
s1 += ptr[5], s2 += s1;
s1 += ptr[6], s2 += s1;
s1 += ptr[7], s2 += s1;
}
for (; i < block_len; ++i) s1 += *ptr++, s2 += s1;
s1 %= 65521U, s2 %= 65521U;
buf_len -= block_len;
block_len = 5552;
}
r->m_check_adler32 = (s2 << 16) + s1;
if ((status == TINFL_STATUS_DONE) &&
(decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) &&
(r->m_check_adler32 != r->m_z_adler32))
status = TINFL_STATUS_ADLER32_MISMATCH;
}
return status;
}
// Higher level helper functions.
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tinfl_decompressor decomp;
void *pBuf = NULL, *pNew_buf;
size_t src_buf_ofs = 0, out_buf_capacity = 0;
*pOut_len = 0;
tinfl_init(&decomp);
for (;;) {
size_t src_buf_size = src_buf_len - src_buf_ofs,
dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
tinfl_status status = tinfl_decompress(
&decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size,
(mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL,
&dst_buf_size,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
src_buf_ofs += src_buf_size;
*pOut_len += dst_buf_size;
if (status == TINFL_STATUS_DONE) break;
new_out_buf_capacity = out_buf_capacity * 2;
if (new_out_buf_capacity < 128) new_out_buf_capacity = 128;
pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity);
if (!pNew_buf) {
MZ_FREE(pBuf);
*pOut_len = 0;
return NULL;
}
pBuf = pNew_buf;
out_buf_capacity = new_out_buf_capacity;
}
return pBuf;
}
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tinfl_decompressor decomp;
tinfl_status status;
tinfl_init(&decomp);
status =
tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len,
(mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len,
(flags & ~TINFL_FLAG_HAS_MORE_INPUT) |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED
: out_buf_len;
}
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size,
tinfl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
int result = 0;
tinfl_decompressor decomp;
mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE);
size_t in_buf_ofs = 0, dict_ofs = 0;
if (!pDict) return TINFL_STATUS_FAILED;
tinfl_init(&decomp);
for (;;) {
size_t in_buf_size = *pIn_buf_size - in_buf_ofs,
dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs;
tinfl_status status =
tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs,
&in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size,
(flags & ~(TINFL_FLAG_HAS_MORE_INPUT |
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)));
in_buf_ofs += in_buf_size;
if ((dst_buf_size) &&
(!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user)))
break;
if (status != TINFL_STATUS_HAS_MORE_OUTPUT) {
result = (status == TINFL_STATUS_DONE);
break;
}
dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1);
}
MZ_FREE(pDict);
*pIn_buf_size = in_buf_ofs;
return result;
}
// ------------------- Low-level Compression (independent from all decompression
// API's)
// Purposely making these tables static for faster init and thread safety.
static const mz_uint16 s_tdefl_len_sym[256] = {
257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268,
268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272,
272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274,
274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276,
276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279,
279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280,
280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284,
285 };
static const mz_uint8 s_tdefl_len_extra[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0 };
static const mz_uint8 s_tdefl_small_dist_sym[512] = {
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8,
8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17 };
static const mz_uint8 s_tdefl_small_dist_extra[512] = {
0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 };
static const mz_uint8 s_tdefl_large_dist_sym[128] = {
0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24,
24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29 };
static const mz_uint8 s_tdefl_large_dist_extra[128] = {
0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11,
11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13 };
// Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted
// values.
typedef struct {
mz_uint16 m_key, m_sym_index;
} tdefl_sym_freq;
static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms,
tdefl_sym_freq *pSyms0,
tdefl_sym_freq *pSyms1) {
mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2];
tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1;
MZ_CLEAR_OBJ(hist);
for (i = 0; i < num_syms; i++) {
mz_uint freq = pSyms0[i].m_key;
hist[freq & 0xFF]++;
hist[256 + ((freq >> 8) & 0xFF)]++;
}
while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256]))
total_passes--;
for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) {
const mz_uint32 *pHist = &hist[pass << 8];
mz_uint offsets[256], cur_ofs = 0;
for (i = 0; i < 256; i++) {
offsets[i] = cur_ofs;
cur_ofs += pHist[i];
}
for (i = 0; i < num_syms; i++)
pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] =
pCur_syms[i];
{
tdefl_sym_freq *t = pCur_syms;
pCur_syms = pNew_syms;
pNew_syms = t;
}
}
return pCur_syms;
}
// tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat,
// alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996.
static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) {
int root, leaf, next, avbl, used, dpth;
if (n == 0)
return;
else if (n == 1) {
A[0].m_key = 1;
return;
}
A[0].m_key += A[1].m_key;
root = 0;
leaf = 2;
for (next = 1; next < n - 1; next++) {
if (leaf >= n || A[root].m_key < A[leaf].m_key) {
A[next].m_key = A[root].m_key;
A[root++].m_key = (mz_uint16)next;
}
else
A[next].m_key = A[leaf++].m_key;
if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) {
A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key);
A[root++].m_key = (mz_uint16)next;
}
else
A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key);
}
A[n - 2].m_key = 0;
for (next = n - 3; next >= 0; next--)
A[next].m_key = A[A[next].m_key].m_key + 1;
avbl = 1;
used = dpth = 0;
root = n - 2;
next = n - 1;
while (avbl > 0) {
while (root >= 0 && (int)A[root].m_key == dpth) {
used++;
root--;
}
while (avbl > used) {
A[next--].m_key = (mz_uint16)(dpth);
avbl--;
}
avbl = 2 * used;
dpth++;
used = 0;
}
}
// Limits canonical Huffman code table's max code size.
enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 };
static void tdefl_huffman_enforce_max_code_size(int *pNum_codes,
int code_list_len,
int max_code_size) {
int i;
mz_uint32 total = 0;
if (code_list_len <= 1) return;
for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++)
pNum_codes[max_code_size] += pNum_codes[i];
for (i = max_code_size; i > 0; i--)
total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i));
while (total != (1UL << max_code_size)) {
pNum_codes[max_code_size]--;
for (i = max_code_size - 1; i > 0; i--)
if (pNum_codes[i]) {
pNum_codes[i]--;
pNum_codes[i + 1] += 2;
break;
}
total--;
}
}
static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num,
int table_len, int code_size_limit,
int static_table) {
int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE];
mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
MZ_CLEAR_OBJ(num_codes);
if (static_table) {
for (i = 0; i < table_len; i++)
num_codes[d->m_huff_code_sizes[table_num][i]]++;
}
else {
tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS],
*pSyms;
int num_used_syms = 0;
const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0];
for (i = 0; i < table_len; i++)
if (pSym_count[i]) {
syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i];
syms0[num_used_syms++].m_sym_index = (mz_uint16)i;
}
pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1);
tdefl_calculate_minimum_redundancy(pSyms, num_used_syms);
for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++;
tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms,
code_size_limit);
MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]);
MZ_CLEAR_OBJ(d->m_huff_codes[table_num]);
for (i = 1, j = num_used_syms; i <= code_size_limit; i++)
for (l = num_codes[i]; l > 0; l--)
d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i);
}
next_code[1] = 0;
for (j = 0, i = 2; i <= code_size_limit; i++)
next_code[i] = j = ((j + num_codes[i - 1]) << 1);
for (i = 0; i < table_len; i++) {
mz_uint rev_code = 0, code, code_size;
if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue;
code = next_code[code_size]++;
for (l = code_size; l > 0; l--, code >>= 1)
rev_code = (rev_code << 1) | (code & 1);
d->m_huff_codes[table_num][i] = (mz_uint16)rev_code;
}
}
#define TDEFL_PUT_BITS(b, l) \
do { \
mz_uint bits = b; \
mz_uint len = l; \
MZ_ASSERT(bits <= ((1U << len) - 1U)); \
d->m_bit_buffer |= (bits << d->m_bits_in); \
d->m_bits_in += len; \
while (d->m_bits_in >= 8) { \
if (d->m_pOutput_buf < d->m_pOutput_buf_end) \
*d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \
d->m_bit_buffer >>= 8; \
d->m_bits_in -= 8; \
} \
} \
MZ_MACRO_END
#define TDEFL_RLE_PREV_CODE_SIZE() \
{ \
if (rle_repeat_count) { \
if (rle_repeat_count < 3) { \
d->m_huff_count[2][prev_code_size] = (mz_uint16)( \
d->m_huff_count[2][prev_code_size] + rle_repeat_count); \
while (rle_repeat_count--) \
packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \
} else { \
d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 16; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_repeat_count - 3); \
} \
rle_repeat_count = 0; \
} \
}
#define TDEFL_RLE_ZERO_CODE_SIZE() \
{ \
if (rle_z_count) { \
if (rle_z_count < 3) { \
d->m_huff_count[2][0] = \
(mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \
while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \
} else if (rle_z_count <= 10) { \
d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 17; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 3); \
} else { \
d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \
packed_code_sizes[num_packed_code_sizes++] = 18; \
packed_code_sizes[num_packed_code_sizes++] = \
(mz_uint8)(rle_z_count - 11); \
} \
rle_z_count = 0; \
} \
}
static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 };
static void tdefl_start_dynamic_block(tdefl_compressor *d) {
int num_lit_codes, num_dist_codes, num_bit_lengths;
mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count,
rle_repeat_count, packed_code_sizes_index;
mz_uint8
code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1],
prev_code_size = 0xFF;
d->m_huff_count[0][256] = 1;
tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE);
tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE);
for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--)
if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break;
for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--)
if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break;
memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes);
memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0],
num_dist_codes);
total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
num_packed_code_sizes = 0;
rle_z_count = 0;
rle_repeat_count = 0;
memset(&d->m_huff_count[2][0], 0,
sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2);
for (i = 0; i < total_code_sizes_to_pack; i++) {
mz_uint8 code_size = code_sizes_to_pack[i];
if (!code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
if (++rle_z_count == 138) {
TDEFL_RLE_ZERO_CODE_SIZE();
}
}
else {
TDEFL_RLE_ZERO_CODE_SIZE();
if (code_size != prev_code_size) {
TDEFL_RLE_PREV_CODE_SIZE();
d->m_huff_count[2][code_size] =
(mz_uint16)(d->m_huff_count[2][code_size] + 1);
packed_code_sizes[num_packed_code_sizes++] = code_size;
}
else if (++rle_repeat_count == 6) {
TDEFL_RLE_PREV_CODE_SIZE();
}
}
prev_code_size = code_size;
}
if (rle_repeat_count) {
TDEFL_RLE_PREV_CODE_SIZE();
}
else {
TDEFL_RLE_ZERO_CODE_SIZE();
}
tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE);
TDEFL_PUT_BITS(2, 2);
TDEFL_PUT_BITS(num_lit_codes - 257, 5);
TDEFL_PUT_BITS(num_dist_codes - 1, 5);
for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--)
if (d->m_huff_code_sizes
[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]])
break;
num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1));
TDEFL_PUT_BITS(num_bit_lengths - 4, 4);
for (i = 0; (int)i < num_bit_lengths; i++)
TDEFL_PUT_BITS(
d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3);
for (packed_code_sizes_index = 0;
packed_code_sizes_index < num_packed_code_sizes;) {
mz_uint code = packed_code_sizes[packed_code_sizes_index++];
MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2);
TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]);
if (code >= 16)
TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++],
"\02\03\07"[code - 16]);
}
}
static void tdefl_start_static_block(tdefl_compressor *d) {
mz_uint i;
mz_uint8 *p = &d->m_huff_code_sizes[0][0];
for (i = 0; i <= 143; ++i) *p++ = 8;
for (; i <= 255; ++i) *p++ = 9;
for (; i <= 279; ++i) *p++ = 7;
for (; i <= 287; ++i) *p++ = 8;
memset(d->m_huff_code_sizes[1], 5, 32);
tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE);
tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE);
TDEFL_PUT_BITS(1, 2);
}
static const mz_uint mz_bitmasks[17] = {
0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF,
0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF };
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \
MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
mz_uint8 *pOutput_buf = d->m_pOutput_buf;
mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf;
mz_uint64 bit_buffer = d->m_bit_buffer;
mz_uint bits_in = d->m_bits_in;
#define TDEFL_PUT_BITS_FAST(b, l) \
{ \
bit_buffer |= (((mz_uint64)(b)) << bits_in); \
bits_in += (l); \
}
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint s0, s1, n0, n1, sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = *(const mz_uint16 *)(pLZ_codes + 1);
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
// This sequence coaxes MSVC into using cmov's vs. jmp's.
s0 = s_tdefl_small_dist_sym[match_dist & 511];
n0 = s_tdefl_small_dist_extra[match_dist & 511];
s1 = s_tdefl_large_dist_sym[match_dist >> 8];
n1 = s_tdefl_large_dist_extra[match_dist >> 8];
sym = (match_dist < 512) ? s0 : s1;
num_extra_bits = (match_dist < 512) ? n0 : n1;
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym],
d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits],
num_extra_bits);
}
else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) {
flags >>= 1;
lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit],
d->m_huff_code_sizes[0][lit]);
}
}
}
if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE;
*(mz_uint64 *)pOutput_buf = bit_buffer;
pOutput_buf += (bits_in >> 3);
bit_buffer >>= (bits_in & ~7);
bits_in &= 7;
}
#undef TDEFL_PUT_BITS_FAST
d->m_pOutput_buf = pOutput_buf;
d->m_bits_in = 0;
d->m_bit_buffer = 0;
while (bits_in) {
mz_uint32 n = MZ_MIN(bits_in, 16);
TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n);
bit_buffer >>= n;
bits_in -= n;
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#else
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) {
mz_uint flags;
mz_uint8 *pLZ_codes;
flags = 1;
for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf;
flags >>= 1) {
if (flags == 1) flags = *pLZ_codes++ | 0x100;
if (flags & 1) {
mz_uint sym, num_extra_bits;
mz_uint match_len = pLZ_codes[0],
match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8));
pLZ_codes += 3;
MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]],
d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]],
s_tdefl_len_extra[match_len]);
if (match_dist < 512) {
sym = s_tdefl_small_dist_sym[match_dist];
num_extra_bits = s_tdefl_small_dist_extra[match_dist];
}
else {
sym = s_tdefl_large_dist_sym[match_dist >> 8];
num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8];
}
MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
}
else {
mz_uint lit = *pLZ_codes++;
MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
}
}
TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN &&
// MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) {
if (static_block)
tdefl_start_static_block(d);
else
tdefl_start_dynamic_block(d);
return tdefl_compress_lz_codes(d);
}
static int tdefl_flush_block(tdefl_compressor *d, int flush) {
mz_uint saved_bit_buf, saved_bits_in;
mz_uint8 *pSaved_output_buf;
mz_bool comp_block_succeeded = MZ_FALSE;
int n, use_raw_block =
((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) &&
(d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size;
mz_uint8 *pOutput_buf_start =
((d->m_pPut_buf_func == NULL) &&
((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE))
? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs)
: d->m_output_buf;
d->m_pOutput_buf = pOutput_buf_start;
d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16;
MZ_ASSERT(!d->m_output_flush_remaining);
d->m_output_flush_ofs = 0;
d->m_output_flush_remaining = 0;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left);
d->m_pLZ_code_buf -= (d->m_num_flags_left == 8);
if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) {
TDEFL_PUT_BITS(0x78, 8);
TDEFL_PUT_BITS(0x01, 8);
}
TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1);
pSaved_output_buf = d->m_pOutput_buf;
saved_bit_buf = d->m_bit_buffer;
saved_bits_in = d->m_bits_in;
if (!use_raw_block)
comp_block_succeeded =
tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) ||
(d->m_total_lz_bytes < 48));
// If the block gets expanded, forget the current contents of the output
// buffer and send a raw block instead.
if (((use_raw_block) ||
((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >=
d->m_total_lz_bytes))) &&
((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) {
mz_uint i;
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
TDEFL_PUT_BITS(0, 2);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) {
TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16);
}
for (i = 0; i < d->m_total_lz_bytes; ++i) {
TDEFL_PUT_BITS(
d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK],
8);
}
}
// Check for the extremely unlikely (if not impossible) case of the compressed
// block not fitting into the output buffer when using dynamic codes.
else if (!comp_block_succeeded) {
d->m_pOutput_buf = pSaved_output_buf;
d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
tdefl_compress_block(d, MZ_TRUE);
}
if (flush) {
if (flush == TDEFL_FINISH) {
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) {
mz_uint i, a = d->m_adler32;
for (i = 0; i < 4; i++) {
TDEFL_PUT_BITS((a >> 24) & 0xFF, 8);
a <<= 8;
}
}
}
else {
mz_uint i, z = 0;
TDEFL_PUT_BITS(0, 3);
if (d->m_bits_in) {
TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
}
for (i = 2; i; --i, z ^= 0xFFFF) {
TDEFL_PUT_BITS(z & 0xFFFF, 16);
}
}
}
MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end);
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes;
d->m_total_lz_bytes = 0;
d->m_block_index++;
if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) {
if (d->m_pPut_buf_func) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user))
return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED);
}
else if (pOutput_buf_start == d->m_output_buf) {
int bytes_to_copy = (int)MZ_MIN(
(size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs));
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf,
bytes_to_copy);
d->m_out_buf_ofs += bytes_to_copy;
if ((n -= bytes_to_copy) != 0) {
d->m_output_flush_ofs = bytes_to_copy;
d->m_output_flush_remaining = n;
}
}
else {
d->m_out_buf_ofs += n;
}
}
return d->m_output_flush_remaining;
}
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p)
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q;
mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]),
s01 = TDEFL_READ_UNALIGNED_WORD(s);
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
q = (const mz_uint16 *)(d->m_dict + probe_pos);
if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue;
p = s;
probe_len = 32;
do {
} while (
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
if (!probe_len) {
*pMatch_dist = dist;
*pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN);
break;
}
else if ((probe_len = ((mz_uint)(p - s) * 2) +
(mz_uint)(*(const mz_uint8 *)p ==
*(const mz_uint8 *)q)) > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) ==
max_match_len)
break;
c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]);
}
}
}
#else
static MZ_FORCEINLINE void tdefl_find_match(
tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist,
mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) {
mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK,
match_len = *pMatch_len, probe_pos = pos, next_probe_pos,
probe_len;
mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
const mz_uint8 *s = d->m_dict + pos, *p, *q;
mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1];
MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
if (max_match_len <= match_len) return;
for (;;) {
for (;;) {
if (--num_probes_left == 0) return;
#define TDEFL_PROBE \
next_probe_pos = d->m_next[probe_pos]; \
if ((!next_probe_pos) || \
((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
return; \
probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
if ((d->m_dict[probe_pos + match_len] == c0) && \
(d->m_dict[probe_pos + match_len - 1] == c1)) \
break;
TDEFL_PROBE;
TDEFL_PROBE;
TDEFL_PROBE;
}
if (!dist) break;
p = s;
q = d->m_dict + probe_pos;
for (probe_len = 0; probe_len < max_match_len; probe_len++)
if (*p++ != *q++) break;
if (probe_len > match_len) {
*pMatch_dist = dist;
if ((*pMatch_len = match_len = probe_len) == max_match_len) return;
c0 = d->m_dict[pos + match_len];
c1 = d->m_dict[pos + match_len - 1];
}
}
}
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static mz_bool tdefl_compress_fast(tdefl_compressor *d) {
// Faster, minimally featured LZRW1-style match+parse loop with better
// register utilization. Intended for applications where raw throughput is
// valued more highly than ratio.
mz_uint lookahead_pos = d->m_lookahead_pos,
lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size,
total_lz_bytes = d->m_total_lz_bytes,
num_flags_left = d->m_num_flags_left;
mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags;
mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) {
const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096;
mz_uint dst_pos =
(lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size);
d->m_src_buf_left -= num_bytes_to_process;
lookahead_size += num_bytes_to_process;
while (num_bytes_to_process) {
mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
memcpy(d->m_dict + dst_pos, d->m_pSrc, n);
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc,
MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos));
d->m_pSrc += n;
dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK;
num_bytes_to_process -= n;
}
dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size);
if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE))
break;
while (lookahead_size >= 4) {
mz_uint cur_match_dist, cur_match_len = 1;
mz_uint8 *pCur_dict = d->m_dict + cur_pos;
mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF;
mz_uint hash =
(first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) &
TDEFL_LEVEL1_HASH_SIZE_MASK;
mz_uint probe_pos = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)lookahead_pos;
if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <=
dict_size) &&
((*(const mz_uint32 *)(d->m_dict +
(probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) &
0xFFFFFF) == first_trigram)) {
const mz_uint16 *p = (const mz_uint16 *)pCur_dict;
const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos);
mz_uint32 probe_len = 32;
do {
} while ((TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(TDEFL_READ_UNALIGNED_WORD(++p) ==
TDEFL_READ_UNALIGNED_WORD(++q)) &&
(--probe_len > 0));
cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) +
(mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q);
if (!probe_len)
cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0;
if ((cur_match_len < TDEFL_MIN_MATCH_LEN) ||
((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U))) {
cur_match_len = 1;
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
}
else {
mz_uint32 s0, s1;
cur_match_len = MZ_MIN(cur_match_len, lookahead_size);
MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 1) &&
(cur_match_dist <= TDEFL_LZ_DICT_SIZE));
cur_match_dist--;
pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN);
*(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist;
pLZ_code_buf += 3;
*pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80);
s0 = s_tdefl_small_dist_sym[cur_match_dist & 511];
s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8];
d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++;
d->m_huff_count[0][s_tdefl_len_sym[cur_match_len -
TDEFL_MIN_MATCH_LEN]]++;
}
}
else {
*pLZ_code_buf++ = (mz_uint8)first_trigram;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
d->m_huff_count[0][(mz_uint8)first_trigram]++;
}
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
total_lz_bytes += cur_match_len;
lookahead_pos += cur_match_len;
dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
MZ_ASSERT(lookahead_size >= cur_match_len);
lookahead_size -= cur_match_len;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
while (lookahead_size) {
mz_uint8 lit = d->m_dict[cur_pos];
total_lz_bytes++;
*pLZ_code_buf++ = lit;
*pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
if (--num_flags_left == 0) {
num_flags_left = 8;
pLZ_flags = pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
lookahead_pos++;
dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE);
cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
lookahead_size--;
if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) {
int n;
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
total_lz_bytes = d->m_total_lz_bytes;
pLZ_code_buf = d->m_pLZ_code_buf;
pLZ_flags = d->m_pLZ_flags;
num_flags_left = d->m_num_flags_left;
}
}
}
d->m_lookahead_pos = lookahead_pos;
d->m_lookahead_size = lookahead_size;
d->m_dict_size = dict_size;
d->m_total_lz_bytes = total_lz_bytes;
d->m_pLZ_code_buf = pLZ_code_buf;
d->m_pLZ_flags = pLZ_flags;
d->m_num_flags_left = num_flags_left;
return MZ_TRUE;
}
#endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d,
mz_uint8 lit) {
d->m_total_lz_bytes++;
*d->m_pLZ_code_buf++ = lit;
*d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
d->m_huff_count[0][lit]++;
}
static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d,
mz_uint match_len,
mz_uint match_dist) {
mz_uint32 s0, s1;
MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) &&
(match_dist <= TDEFL_LZ_DICT_SIZE));
d->m_total_lz_bytes += match_len;
d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN);
match_dist -= 1;
d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF);
d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8);
d->m_pLZ_code_buf += 3;
*d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80);
if (--d->m_num_flags_left == 0) {
d->m_num_flags_left = 8;
d->m_pLZ_flags = d->m_pLZ_code_buf++;
}
s0 = s_tdefl_small_dist_sym[match_dist & 511];
s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127];
d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++;
if (match_len >= TDEFL_MIN_MATCH_LEN)
d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++;
}
static mz_bool tdefl_compress_normal(tdefl_compressor *d) {
const mz_uint8 *pSrc = d->m_pSrc;
size_t src_buf_left = d->m_src_buf_left;
tdefl_flush flush = d->m_flush;
while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) {
mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos;
// Update dictionary and hash chains. Keeps the lookahead size equal to
// TDEFL_MAX_MATCH_LEN.
if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) {
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK,
ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2;
mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK];
mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(
src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size);
const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process;
src_buf_left -= num_bytes_to_process;
d->m_lookahead_size += num_bytes_to_process;
while (pSrc != pSrc_end) {
mz_uint8 c = *pSrc++;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
ins_pos++;
}
}
else {
while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) {
mz_uint8 c = *pSrc++;
mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) &
TDEFL_LZ_DICT_SIZE_MASK;
src_buf_left--;
d->m_dict[dst_pos] = c;
if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) {
mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2;
mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK]
<< (TDEFL_LZ_HASH_SHIFT * 2)) ^
(d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]
<< TDEFL_LZ_HASH_SHIFT) ^
c) &
(TDEFL_LZ_HASH_SIZE - 1);
d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
d->m_hash[hash] = (mz_uint16)(ins_pos);
}
}
}
d->m_dict_size =
MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size);
if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break;
// Simple lazy/greedy parsing state machine.
len_to_move = 1;
cur_match_dist = 0;
cur_match_len =
d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1);
cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) {
if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) {
mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK];
cur_match_len = 0;
while (cur_match_len < d->m_lookahead_size) {
if (d->m_dict[cur_pos + cur_match_len] != c) break;
cur_match_len++;
}
if (cur_match_len < TDEFL_MIN_MATCH_LEN)
cur_match_len = 0;
else
cur_match_dist = 1;
}
}
else {
tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size,
d->m_lookahead_size, &cur_match_dist, &cur_match_len);
}
if (((cur_match_len == TDEFL_MIN_MATCH_LEN) &&
(cur_match_dist >= 8U * 1024U)) ||
(cur_pos == cur_match_dist) ||
((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) {
cur_match_dist = cur_match_len = 0;
}
if (d->m_saved_match_len) {
if (cur_match_len > d->m_saved_match_len) {
tdefl_record_literal(d, (mz_uint8)d->m_saved_lit);
if (cur_match_len >= 128) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
d->m_saved_match_len = 0;
len_to_move = cur_match_len;
}
else {
d->m_saved_lit = d->m_dict[cur_pos];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
}
else {
tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist);
len_to_move = d->m_saved_match_len - 1;
d->m_saved_match_len = 0;
}
}
else if (!cur_match_dist)
tdefl_record_literal(d,
d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]);
else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) ||
(cur_match_len >= 128)) {
tdefl_record_match(d, cur_match_len, cur_match_dist);
len_to_move = cur_match_len;
}
else {
d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)];
d->m_saved_match_dist = cur_match_dist;
d->m_saved_match_len = cur_match_len;
}
// Move the lookahead forward by len_to_move bytes.
d->m_lookahead_pos += len_to_move;
MZ_ASSERT(d->m_lookahead_size >= len_to_move);
d->m_lookahead_size -= len_to_move;
d->m_dict_size =
MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE);
// Check if it's time to flush the current LZ codes to the internal output
// buffer.
if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) ||
((d->m_total_lz_bytes > 31 * 1024) &&
(((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >=
d->m_total_lz_bytes) ||
(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) {
int n;
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
if ((n = tdefl_flush_block(d, 0)) != 0)
return (n < 0) ? MZ_FALSE : MZ_TRUE;
}
}
d->m_pSrc = pSrc;
d->m_src_buf_left = src_buf_left;
return MZ_TRUE;
}
static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) {
if (d->m_pIn_buf_size) {
*d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
}
if (d->m_pOut_buf_size) {
size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs,
d->m_output_flush_remaining);
memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs,
d->m_output_buf + d->m_output_flush_ofs, n);
d->m_output_flush_ofs += (mz_uint)n;
d->m_output_flush_remaining -= (mz_uint)n;
d->m_out_buf_ofs += n;
*d->m_pOut_buf_size = d->m_out_buf_ofs;
}
return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE
: TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf,
size_t *pIn_buf_size, void *pOut_buf,
size_t *pOut_buf_size, tdefl_flush flush) {
if (!d) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return TDEFL_STATUS_BAD_PARAM;
}
d->m_pIn_buf = pIn_buf;
d->m_pIn_buf_size = pIn_buf_size;
d->m_pOut_buf = pOut_buf;
d->m_pOut_buf_size = pOut_buf_size;
d->m_pSrc = (const mz_uint8 *)(pIn_buf);
d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0;
d->m_out_buf_ofs = 0;
d->m_flush = flush;
if (((d->m_pPut_buf_func != NULL) ==
((pOut_buf != NULL) || (pOut_buf_size != NULL))) ||
(d->m_prev_return_status != TDEFL_STATUS_OKAY) ||
(d->m_wants_to_finish && (flush != TDEFL_FINISH)) ||
(pIn_buf_size && *pIn_buf_size && !pIn_buf) ||
(pOut_buf_size && *pOut_buf_size && !pOut_buf)) {
if (pIn_buf_size) *pIn_buf_size = 0;
if (pOut_buf_size) *pOut_buf_size = 0;
return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM);
}
d->m_wants_to_finish |= (flush == TDEFL_FINISH);
if ((d->m_output_flush_remaining) || (d->m_finished))
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) &&
((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) &&
((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS |
TDEFL_RLE_MATCHES)) == 0)) {
if (!tdefl_compress_fast(d)) return d->m_prev_return_status;
}
else
#endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
{
if (!tdefl_compress_normal(d)) return d->m_prev_return_status;
}
if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) &&
(pIn_buf))
d->m_adler32 =
(mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf,
d->m_pSrc - (const mz_uint8 *)pIn_buf);
if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) &&
(!d->m_output_flush_remaining)) {
if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status;
d->m_finished = (flush == TDEFL_FINISH);
if (flush == TDEFL_FULL_FLUSH) {
MZ_CLEAR_OBJ(d->m_hash);
MZ_CLEAR_OBJ(d->m_next);
d->m_dict_size = 0;
}
}
return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
}
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf,
size_t in_buf_size, tdefl_flush flush) {
MZ_ASSERT(d->m_pPut_buf_func);
return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush);
}
tdefl_status tdefl_init(tdefl_compressor *d,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
d->m_pPut_buf_func = pPut_buf_func;
d->m_pPut_buf_user = pPut_buf_user;
d->m_flags = (mz_uint)(flags);
d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3;
d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0;
d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3;
if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash);
d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size =
d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0;
d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished =
d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0;
d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
d->m_pLZ_flags = d->m_lz_code_buf;
d->m_num_flags_left = 8;
d->m_pOutput_buf = d->m_output_buf;
d->m_pOutput_buf_end = d->m_output_buf;
d->m_prev_return_status = TDEFL_STATUS_OKAY;
d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0;
d->m_adler32 = 1;
d->m_pIn_buf = NULL;
d->m_pOut_buf = NULL;
d->m_pIn_buf_size = NULL;
d->m_pOut_buf_size = NULL;
d->m_flush = TDEFL_NO_FLUSH;
d->m_pSrc = NULL;
d->m_src_buf_left = 0;
d->m_out_buf_ofs = 0;
memset(&d->m_huff_count[0][0], 0,
sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
memset(&d->m_huff_count[1][0], 0,
sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
return TDEFL_STATUS_OKAY;
}
tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) {
return d->m_prev_return_status;
}
mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; }
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len,
tdefl_put_buf_func_ptr pPut_buf_func,
void *pPut_buf_user, int flags) {
tdefl_compressor *pComp;
mz_bool succeeded;
if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE;
pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
if (!pComp) return MZ_FALSE;
succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) ==
TDEFL_STATUS_OKAY);
succeeded =
succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) ==
TDEFL_STATUS_DONE);
MZ_FREE(pComp);
return succeeded;
}
typedef struct {
size_t m_size, m_capacity;
mz_uint8 *m_pBuf;
mz_bool m_expandable;
} tdefl_output_buffer;
static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len,
void *pUser) {
tdefl_output_buffer *p = (tdefl_output_buffer *)pUser;
size_t new_size = p->m_size + len;
if (new_size > p->m_capacity) {
size_t new_capacity = p->m_capacity;
mz_uint8 *pNew_buf;
if (!p->m_expandable) return MZ_FALSE;
do {
new_capacity = MZ_MAX(128U, new_capacity << 1U);
} while (new_size > new_capacity);
pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity);
if (!pNew_buf) return MZ_FALSE;
p->m_pBuf = pNew_buf;
p->m_capacity = new_capacity;
}
memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len);
p->m_size = new_size;
return MZ_TRUE;
}
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len,
size_t *pOut_len, int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_len)
return MZ_FALSE;
else
*pOut_len = 0;
out_buf.m_expandable = MZ_TRUE;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return NULL;
*pOut_len = out_buf.m_size;
return out_buf.m_pBuf;
}
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len,
const void *pSrc_buf, size_t src_buf_len,
int flags) {
tdefl_output_buffer out_buf;
MZ_CLEAR_OBJ(out_buf);
if (!pOut_buf) return 0;
out_buf.m_pBuf = (mz_uint8 *)pOut_buf;
out_buf.m_capacity = out_buf_len;
if (!tdefl_compress_mem_to_output(
pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
return 0;
return out_buf.m_size;
}
#ifndef MINIZ_NO_ZLIB_APIS
static const mz_uint s_tdefl_num_probes[11] = { 0, 1, 6, 32, 16, 32,
128, 256, 512, 768, 1500 };
// level may actually range from [0,10] (10 is a "hidden" max level, where we
// want a bit more compression and it's fine if throughput to fall off a cliff
// on some files).
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits,
int strategy) {
mz_uint comp_flags =
s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] |
((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0);
if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER;
if (!level)
comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
else if (strategy == MZ_FILTERED)
comp_flags |= TDEFL_FILTER_MATCHES;
else if (strategy == MZ_HUFFMAN_ONLY)
comp_flags &= ~TDEFL_MAX_PROBES_MASK;
else if (strategy == MZ_FIXED)
comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
else if (strategy == MZ_RLE)
comp_flags |= TDEFL_RLE_MATCHES;
return comp_flags;
}
#endif // MINIZ_NO_ZLIB_APIS
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
// Simple PNG writer function by Alex Evans, 2011. Released into the public
// domain: https://gist.github.com/908299, more context at
// http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/.
// This is actually a modification of Alex's original code so PNG files
// generated by this function pass pngcheck.
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w,
int h, int num_chans,
size_t *pLen_out,
mz_uint level, mz_bool flip) {
// Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was
// defined.
static const mz_uint s_tdefl_png_num_probes[11] = {
0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500 };
tdefl_compressor *pComp =
(tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
tdefl_output_buffer out_buf;
int i, bpl = w * num_chans, y, z;
mz_uint32 c;
*pLen_out = 0;
if (!pComp) return NULL;
MZ_CLEAR_OBJ(out_buf);
out_buf.m_expandable = MZ_TRUE;
out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h);
if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) {
MZ_FREE(pComp);
return NULL;
}
// write dummy header
for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf);
// compress image data
tdefl_init(
pComp, tdefl_output_buffer_putter, &out_buf,
s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER);
for (y = 0; y < h; ++y) {
tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH);
tdefl_compress_buffer(pComp,
(mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl,
bpl, TDEFL_NO_FLUSH);
}
if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) !=
TDEFL_STATUS_DONE) {
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
// write real header
*pLen_out = out_buf.m_size - 41;
{
static const mz_uint8 chans[] = { 0x00, 0x00, 0x04, 0x02, 0x06 };
mz_uint8 pnghdr[41] = { 0x89,
0x50,
0x4e,
0x47,
0x0d,
0x0a,
0x1a,
0x0a,
0x00,
0x00,
0x00,
0x0d,
0x49,
0x48,
0x44,
0x52,
0,
0,
(mz_uint8)(w >> 8),
(mz_uint8)w,
0,
0,
(mz_uint8)(h >> 8),
(mz_uint8)h,
8,
chans[num_chans],
0,
0,
0,
0,
0,
0,
0,
(mz_uint8)(*pLen_out >> 24),
(mz_uint8)(*pLen_out >> 16),
(mz_uint8)(*pLen_out >> 8),
(mz_uint8)*pLen_out,
0x49,
0x44,
0x41,
0x54 };
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17);
for (i = 0; i < 4; ++i, c <<= 8)
((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24);
memcpy(out_buf.m_pBuf, pnghdr, 41);
}
// write footer (IDAT CRC-32, followed by IEND chunk)
if (!tdefl_output_buffer_putter(
"\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) {
*pLen_out = 0;
MZ_FREE(pComp);
MZ_FREE(out_buf.m_pBuf);
return NULL;
}
c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4,
*pLen_out + 4);
for (i = 0; i < 4; ++i, c <<= 8)
(out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24);
// compute final size of file, grab compressed data buffer and return
*pLen_out += 57;
MZ_FREE(pComp);
return out_buf.m_pBuf;
}
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h,
int num_chans, size_t *pLen_out) {
// Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we
// can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's
// where #defined out)
return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans,
pLen_out, 6, MZ_FALSE);
}
// ------------------- .ZIP archive reading
#ifndef MINIZ_NO_ARCHIVE_APIS
#error "No arvhive APIs"
#ifdef MINIZ_NO_STDIO
#define MZ_FILE void *
#else
#include <stdio.h>
#include <sys/stat.h>
#if defined(_MSC_VER) || defined(__MINGW64__)
static FILE *mz_fopen(const char *pFilename, const char *pMode) {
FILE *pFile = NULL;
fopen_s(&pFile, pFilename, pMode);
return pFile;
}
static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) {
FILE *pFile = NULL;
if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL;
return pFile;
}
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN mz_fopen
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 _ftelli64
#define MZ_FSEEK64 _fseeki64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN mz_freopen
#define MZ_DELETE_FILE remove
#elif defined(__MINGW32__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__TINYC__)
#ifndef MINIZ_NO_TIME
#include <sys/utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftell
#define MZ_FSEEK64 fseek
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen64(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT stat64
#define MZ_FILE_STAT stat64
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(p, m, s) freopen64(p, m, s)
#define MZ_DELETE_FILE remove
#else
#ifndef MINIZ_NO_TIME
#include <utime.h>
#endif
#define MZ_FILE FILE
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello
#define MZ_FSEEK64 fseeko
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#endif // #ifdef _MSC_VER
#endif // #ifdef MINIZ_NO_STDIO
#define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c))
// Various ZIP archive enums. To completely avoid cross platform compiler
// alignment and platform endian issues, miniz.c doesn't use structs for any of
// this stuff.
enum {
// ZIP archive identifiers and record sizes
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50,
MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22,
// Central directory header record offsets
MZ_ZIP_CDH_SIG_OFS = 0,
MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4,
MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6,
MZ_ZIP_CDH_BIT_FLAG_OFS = 8,
MZ_ZIP_CDH_METHOD_OFS = 10,
MZ_ZIP_CDH_FILE_TIME_OFS = 12,
MZ_ZIP_CDH_FILE_DATE_OFS = 14,
MZ_ZIP_CDH_CRC32_OFS = 16,
MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20,
MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24,
MZ_ZIP_CDH_FILENAME_LEN_OFS = 28,
MZ_ZIP_CDH_EXTRA_LEN_OFS = 30,
MZ_ZIP_CDH_COMMENT_LEN_OFS = 32,
MZ_ZIP_CDH_DISK_START_OFS = 34,
MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36,
MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38,
MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42,
// Local directory header offsets
MZ_ZIP_LDH_SIG_OFS = 0,
MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4,
MZ_ZIP_LDH_BIT_FLAG_OFS = 6,
MZ_ZIP_LDH_METHOD_OFS = 8,
MZ_ZIP_LDH_FILE_TIME_OFS = 10,
MZ_ZIP_LDH_FILE_DATE_OFS = 12,
MZ_ZIP_LDH_CRC32_OFS = 14,
MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18,
MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22,
MZ_ZIP_LDH_FILENAME_LEN_OFS = 26,
MZ_ZIP_LDH_EXTRA_LEN_OFS = 28,
// End of central directory offsets
MZ_ZIP_ECDH_SIG_OFS = 0,
MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4,
MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6,
MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8,
MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10,
MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12,
MZ_ZIP_ECDH_CDIR_OFS_OFS = 16,
MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20,
};
typedef struct {
void *m_p;
size_t m_size, m_capacity;
mz_uint m_element_size;
} mz_zip_array;
struct mz_zip_internal_state_tag {
mz_zip_array m_central_dir;
mz_zip_array m_central_dir_offsets;
mz_zip_array m_sorted_central_dir_offsets;
MZ_FILE *m_pFile;
void *m_pMem;
size_t m_mem_size;
size_t m_mem_capacity;
};
#define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \
(array_ptr)->m_element_size = element_size
#define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \
((element_type *)((array_ptr)->m_p))[index]
static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip,
mz_zip_array *pArray) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p);
memset(pArray, 0, sizeof(mz_zip_array));
}
static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t min_new_capacity,
mz_uint growing) {
void *pNew_p;
size_t new_capacity = min_new_capacity;
MZ_ASSERT(pArray->m_element_size);
if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE;
if (growing) {
new_capacity = MZ_MAX(1, pArray->m_capacity);
while (new_capacity < min_new_capacity) new_capacity *= 2;
}
if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p,
pArray->m_element_size, new_capacity)))
return MZ_FALSE;
pArray->m_p = pNew_p;
pArray->m_capacity = new_capacity;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_capacity,
mz_uint growing) {
if (new_capacity > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing))
return MZ_FALSE;
}
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t new_size,
mz_uint growing) {
if (new_size > pArray->m_capacity) {
if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing))
return MZ_FALSE;
}
pArray->m_size = new_size;
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip,
mz_zip_array *pArray,
size_t n) {
return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE);
}
static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip,
mz_zip_array *pArray,
const void *pElements,
size_t n) {
size_t orig_size = pArray->m_size;
if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE))
return MZ_FALSE;
memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size,
pElements, n * pArray->m_element_size);
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) {
struct tm tm;
memset(&tm, 0, sizeof(tm));
tm.tm_isdst = -1;
tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900;
tm.tm_mon = ((dos_date >> 5) & 15) - 1;
tm.tm_mday = dos_date & 31;
tm.tm_hour = (dos_time >> 11) & 31;
tm.tm_min = (dos_time >> 5) & 63;
tm.tm_sec = (dos_time << 1) & 62;
return mktime(&tm);
}
static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef _MSC_VER
struct tm tm_struct;
struct tm *tm = &tm_struct;
errno_t err = localtime_s(tm, &time);
if (err) {
*pDOS_date = 0;
*pDOS_time = 0;
return;
}
#else
struct tm *tm = localtime(&time);
#endif
*pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) +
((tm->tm_sec) >> 1));
*pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) +
((tm->tm_mon + 1) << 5) + tm->tm_mday);
}
#endif
#ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_get_file_modified_time(const char *pFilename,
mz_uint16 *pDOS_time,
mz_uint16 *pDOS_date) {
#ifdef MINIZ_NO_TIME
(void)pFilename;
*pDOS_date = *pDOS_time = 0;
#else
struct MZ_FILE_STAT_STRUCT file_stat;
// On Linux with x86 glibc, this call will fail on large files (>= 0x80000000
// bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh.
if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE;
mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date);
#endif // #ifdef MINIZ_NO_TIME
return MZ_TRUE;
}
#ifndef MINIZ_NO_TIME
static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time,
time_t modified_time) {
struct utimbuf t;
t.actime = access_time;
t.modtime = modified_time;
return !utime(pFilename, &t);
}
#endif // #ifndef MINIZ_NO_TIME
#endif // #ifndef MINIZ_NO_STDIO
static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip,
mz_uint32 flags) {
(void)flags;
if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_READING;
pZip->m_archive_size = 0;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static MZ_FORCEINLINE mz_bool
mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets,
mz_uint l_index, mz_uint r_index) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index));
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS),
r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (l_len < r_len) : (l < r);
}
#define MZ_SWAP_UINT32(a, b) \
do { \
mz_uint32 t = a; \
a = b; \
b = t; \
} \
MZ_MACRO_END
// Heap sort of lowercased filenames, used to help accelerate plain central
// directory searches by mz_zip_reader_locate_file(). (Could also use qsort(),
// but it could allocate memory.)
static void mz_zip_reader_sort_central_dir_offsets_by_filename(
mz_zip_archive *pZip) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
int start = (size - 2) >> 1, end;
while (start >= 0) {
int child, root = start;
for (;;) {
if ((child = (root << 1) + 1) >= size) break;
child +=
(((child + 1) < size) &&
(mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1])));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
start--;
}
end = size - 1;
while (end > 0) {
int child, root = 0;
MZ_SWAP_UINT32(pIndices[end], pIndices[0]);
for (;;) {
if ((child = (root << 1) + 1) >= end) break;
child +=
(((child + 1) < end) &&
mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[child], pIndices[child + 1]));
if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets,
pIndices[root], pIndices[child]))
break;
MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
root = child;
}
end--;
}
}
static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip,
mz_uint32 flags) {
mz_uint cdir_size, num_this_disk, cdir_disk_index;
mz_uint64 cdir_ofs;
mz_int64 cur_file_ofs;
const mz_uint8 *p;
mz_uint32 buf_u32[4096 / sizeof(mz_uint32)];
mz_uint8 *pBuf = (mz_uint8 *)buf_u32;
mz_bool sort_central_dir =
((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0);
// Basic sanity checks - reject files which are too small, and check the first
// 4 bytes of the file to make sure a local header is there.
if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
// Find the end of central directory record by scanning the file from the end
// towards the beginning.
cur_file_ofs =
MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0);
for (;;) {
int i,
n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n)
return MZ_FALSE;
for (i = n - 4; i >= 0; --i)
if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break;
if (i >= 0) {
cur_file_ofs += i;
break;
}
if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >=
(0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)))
return MZ_FALSE;
cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0);
}
// Read and verify the end of central directory record.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) !=
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) ||
((pZip->m_total_files =
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) !=
MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS)))
return MZ_FALSE;
num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS);
cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS);
if (((num_this_disk | cdir_disk_index) != 0) &&
((num_this_disk != 1) || (cdir_disk_index != 1)))
return MZ_FALSE;
if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) <
pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS);
if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE;
pZip->m_central_directory_file_ofs = cdir_ofs;
if (pZip->m_total_files) {
mz_uint i, n;
// Read the entire central directory into a heap block, and allocate another
// heap block to hold the unsorted central dir file record offsets, and
// another to hold the sorted indices.
if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size,
MZ_FALSE)) ||
(!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets,
pZip->m_total_files, MZ_FALSE)))
return MZ_FALSE;
if (sort_central_dir) {
if (!mz_zip_array_resize(pZip,
&pZip->m_pState->m_sorted_central_dir_offsets,
pZip->m_total_files, MZ_FALSE))
return MZ_FALSE;
}
if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs,
pZip->m_pState->m_central_dir.m_p,
cdir_size) != cdir_size)
return MZ_FALSE;
// Now create an index into the central directory file records, do some
// basic sanity checking on each record, and check for zip64 entries (which
// are not yet supported).
p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p;
for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) {
mz_uint total_header_size, comp_size, decomp_size, disk_index;
if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) ||
(MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG))
return MZ_FALSE;
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
i) =
(mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p);
if (sort_central_dir)
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets,
mz_uint32, i) = i;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) &&
(decomp_size != comp_size)) ||
(decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) ||
(comp_size == 0xFFFFFFFF))
return MZ_FALSE;
disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS);
if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE;
if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) >
n)
return MZ_FALSE;
n -= total_header_size;
p += total_header_size;
}
}
if (sort_central_dir)
mz_zip_reader_sort_central_dir_offsets_by_filename(pZip);
return MZ_TRUE;
}
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size,
mz_uint32 flags) {
if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE;
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
size_t s = (file_ofs >= pZip->m_archive_size)
? 0
: (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n);
memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s);
return s;
}
mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem,
size_t size, mz_uint32 flags) {
if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE;
pZip->m_archive_size = size;
pZip->m_pRead = mz_zip_mem_read_func;
pZip->m_pIO_opaque = pZip;
#ifdef __cplusplus
pZip->m_pState->m_pMem = const_cast<void *>(pMem);
#else
pZip->m_pState->m_pMem = (void *)pMem;
#endif
pZip->m_pState->m_mem_size = size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs,
void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint32 flags) {
mz_uint64 file_size;
MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb");
if (!pFile) return MZ_FALSE;
if (MZ_FSEEK64(pFile, 0, SEEK_END)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
file_size = MZ_FTELL64(pFile);
if (!mz_zip_reader_init_internal(pZip, flags)) {
MZ_FCLOSE(pFile);
return MZ_FALSE;
}
pZip->m_pRead = mz_zip_file_read_func;
pZip->m_pIO_opaque = pZip;
pZip->m_pState->m_pFile = pFile;
pZip->m_archive_size = file_size;
if (!mz_zip_reader_read_central_dir(pZip, flags)) {
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) {
return pZip ? pZip->m_total_files : 0;
}
static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh(
mz_zip_archive *pZip, mz_uint file_index) {
if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return NULL;
return &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
}
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint m_bit_flag;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
return (m_bit_flag & 1);
}
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip,
mz_uint file_index) {
mz_uint filename_len, external_attr;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) return MZ_FALSE;
// First see if the filename ends with a '/' character.
filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_len) {
if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/')
return MZ_TRUE;
}
// Bugfix: This code was also checking if the internal attribute was non-zero,
// which wasn't correct.
// Most/all zip writers (hopefully) set DOS file/directory attributes in the
// low 16-bits, so check for the DOS directory flag and ignore the source OS
// ID in the created by field.
// FIXME: Remove this check? Is it necessary - we already check the filename.
external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
if ((external_attr & 0x10) != 0) return MZ_TRUE;
return MZ_FALSE;
}
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index,
mz_zip_archive_file_stat *pStat) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if ((!p) || (!pStat)) return MZ_FALSE;
// Unpack the central directory record.
pStat->m_file_index = file_index;
pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index);
pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS);
pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS);
pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS);
#ifndef MINIZ_NO_TIME
pStat->m_time =
mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS),
MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS));
#endif
pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS);
pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS);
pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
// Copy as much of the filename and comment as possible.
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1);
memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pStat->m_filename[n] = '\0';
n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS);
n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1);
pStat->m_comment_size = n;
memcpy(pStat->m_comment,
p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS),
n);
pStat->m_comment[n] = '\0';
return MZ_TRUE;
}
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index,
char *pFilename, mz_uint filename_buf_size) {
mz_uint n;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
if (!p) {
if (filename_buf_size) pFilename[0] = '\0';
return 0;
}
n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
if (filename_buf_size) {
n = MZ_MIN(n, filename_buf_size - 1);
memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
pFilename[n] = '\0';
}
return n + 1;
}
static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA,
const char *pB,
mz_uint len,
mz_uint flags) {
mz_uint i;
if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len);
for (i = 0; i < len; ++i)
if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE;
return MZ_TRUE;
}
static MZ_FORCEINLINE int mz_zip_reader_filename_compare(
const mz_zip_array *pCentral_dir_array,
const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR,
mz_uint r_len) {
const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(
pCentral_dir_array, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32,
l_index)),
*pE;
mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS);
mz_uint8 l = 0, r = 0;
pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
pE = pL + MZ_MIN(l_len, r_len);
while (pL < pE) {
if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break;
pL++;
pR++;
}
return (pL == pE) ? (int)(l_len - r_len) : (l - r);
}
static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState = pZip->m_pState;
const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
const mz_zip_array *pCentral_dir = &pState->m_central_dir;
mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(
&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
const int size = pZip->m_total_files;
const mz_uint filename_len = (mz_uint)strlen(pFilename);
int l = 0, h = size - 1;
while (l <= h) {
int m = (l + h) >> 1, file_index = pIndices[m],
comp =
mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets,
file_index, pFilename, filename_len);
if (!comp)
return file_index;
else if (comp < 0)
l = m + 1;
else
h = m - 1;
}
return -1;
}
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName,
const char *pComment, mz_uint flags) {
mz_uint file_index;
size_t name_len, comment_len;
if ((!pZip) || (!pZip->m_pState) || (!pName) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return -1;
if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) &&
(!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size))
return mz_zip_reader_locate_file_binary_search(pZip, pName);
name_len = strlen(pName);
if (name_len > 0xFFFF) return -1;
comment_len = pComment ? strlen(pComment) : 0;
if (comment_len > 0xFFFF) return -1;
for (file_index = 0; file_index < pZip->m_total_files; file_index++) {
const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT(
&pZip->m_pState->m_central_dir, mz_uint8,
MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32,
file_index));
mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS);
const char *pFilename =
(const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
if (filename_len < name_len) continue;
if (comment_len) {
mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS),
file_comment_len =
MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS);
const char *pFile_comment = pFilename + filename_len + file_extra_len;
if ((file_comment_len != comment_len) ||
(!mz_zip_reader_string_equal(pComment, pFile_comment,
file_comment_len, flags)))
continue;
}
if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) {
int ofs = filename_len - 1;
do {
if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') ||
(pFilename[ofs] == ':'))
break;
} while (--ofs >= 0);
ofs++;
pFilename += ofs;
filename_len -= ofs;
}
if ((filename_len == name_len) &&
(mz_zip_reader_string_equal(pName, pFilename, filename_len, flags)))
return file_index;
}
return -1;
}
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip,
mz_uint file_index, void *pBuf,
size_t buf_size, mz_uint flags,
void *pUser_read_buf,
size_t user_read_buf_size) {
int status = TINFL_STATUS_DONE;
mz_uint64 needed_size, cur_file_ofs, comp_remaining,
out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail;
mz_zip_archive_file_stat file_stat;
void *pRead_buf;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
tinfl_decompressor inflator;
if ((buf_size) && (!pBuf)) return MZ_FALSE;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Ensure supplied output buffer is large enough.
needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size
: file_stat.m_uncomp_size;
if (buf_size < needed_size) return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf,
(size_t)needed_size) != needed_size)
return MZ_FALSE;
return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) == file_stat.m_crc32);
}
// Decompress the file either directly from memory or from a file input
// buffer.
tinfl_init(&inflator);
if (pZip->m_pState->m_pMem) {
// Read directly from the archive in memory.
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
}
else if (pUser_read_buf) {
// Use a user provided read buffer.
if (!user_read_buf_size) return MZ_FALSE;
pRead_buf = (mz_uint8 *)pUser_read_buf;
read_buf_size = user_read_buf_size;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
else {
// Temporarily allocate a read buffer.
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(read_buf_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF))
#endif
return MZ_FALSE;
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
do {
size_t in_buf_size,
out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs);
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size,
TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF |
(comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0));
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
out_buf_ofs += out_buf_size;
} while (status == TINFL_STATUS_NEEDS_MORE_INPUT);
if (status == TINFL_STATUS_DONE) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf,
(size_t)file_stat.m_uncomp_size) != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf))
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(
mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size,
mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, pUser_read_buf,
user_read_buf_size);
}
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index,
void *pBuf, size_t buf_size,
mz_uint flags) {
return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size,
flags, NULL, 0);
}
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip,
const char *pFilename, void *pBuf,
size_t buf_size, mz_uint flags) {
return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf,
buf_size, flags, NULL, 0);
}
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index,
size_t *pSize, mz_uint flags) {
mz_uint64 comp_size, uncomp_size, alloc_size;
const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index);
void *pBuf;
if (pSize) *pSize = 0;
if (!p) return NULL;
comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size;
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
#endif
return NULL;
if (NULL ==
(pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size)))
return NULL;
if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size,
flags)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return NULL;
}
if (pSize) *pSize = (size_t)alloc_size;
return pBuf;
}
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip,
const char *pFilename, size_t *pSize,
mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) {
if (pSize) *pSize = 0;
return MZ_FALSE;
}
return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags);
}
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip,
mz_uint file_index,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int status = TINFL_STATUS_DONE;
mz_uint file_crc32 = MZ_CRC32_INIT;
mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining,
out_buf_ofs = 0, cur_file_ofs;
mz_zip_archive_file_stat file_stat;
void *pRead_buf = NULL;
void *pWrite_buf = NULL;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
// Empty file, or a directory (but not always a directory - I've seen odd zips
// with directories that have compressed data which inflates to 0 bytes)
if (!file_stat.m_comp_size) return MZ_TRUE;
// Entry is a subdirectory (I've seen old zips with dir entries which have
// compressed deflate data which inflates to 0 bytes, but these entries claim
// to uncompress to 512 bytes in the headers).
// I'm torn how to handle this case - should it fail instead?
if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE;
// Encryption and patch files are not supported.
if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE;
// This function only supports stored and deflate.
if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) &&
(file_stat.m_method != MZ_DEFLATED))
return MZ_FALSE;
// Read and parse the local directory entry.
cur_file_ofs = file_stat.m_local_header_ofs;
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
return MZ_FALSE;
// Decompress the file either directly from memory or from a file input
// buffer.
if (pZip->m_pState->m_pMem) {
pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
read_buf_size = read_buf_avail = file_stat.m_comp_size;
comp_remaining = 0;
}
else {
read_buf_size =
MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
(size_t)read_buf_size)))
return MZ_FALSE;
read_buf_avail = 0;
comp_remaining = file_stat.m_comp_size;
}
if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) {
// The file is stored or the caller has requested the compressed data.
if (pZip->m_pState->m_pMem) {
#ifdef _MSC_VER
if (((0, sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#else
if (((sizeof(size_t) == sizeof(mz_uint32))) &&
(file_stat.m_comp_size > 0xFFFFFFFF))
#endif
return MZ_FALSE;
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)file_stat.m_comp_size) != file_stat.m_comp_size)
status = TINFL_STATUS_FAILED;
else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf,
(size_t)file_stat.m_comp_size);
cur_file_ofs += file_stat.m_comp_size;
out_buf_ofs += file_stat.m_comp_size;
comp_remaining = 0;
}
else {
while (comp_remaining) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
file_crc32 = (mz_uint32)mz_crc32(
file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail);
if (pCallback(pOpaque, out_buf_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
out_buf_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
}
}
}
else {
tinfl_decompressor inflator;
tinfl_init(&inflator);
if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1,
TINFL_LZ_DICT_SIZE)))
status = TINFL_STATUS_FAILED;
else {
do {
mz_uint8 *pWrite_buf_cur =
(mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
size_t in_buf_size,
out_buf_size =
TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) {
read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf,
(size_t)read_buf_avail) != read_buf_avail) {
status = TINFL_STATUS_FAILED;
break;
}
cur_file_ofs += read_buf_avail;
comp_remaining -= read_buf_avail;
read_buf_ofs = 0;
}
in_buf_size = (size_t)read_buf_avail;
status = tinfl_decompress(
&inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size,
(mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size,
comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0);
read_buf_avail -= in_buf_size;
read_buf_ofs += in_buf_size;
if (out_buf_size) {
if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) !=
out_buf_size) {
status = TINFL_STATUS_FAILED;
break;
}
file_crc32 =
(mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size);
if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) {
status = TINFL_STATUS_FAILED;
break;
}
}
} while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) ||
(status == TINFL_STATUS_HAS_MORE_OUTPUT));
}
}
if ((status == TINFL_STATUS_DONE) &&
(!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) {
// Make sure the entire file was decompressed, and check its CRC.
if ((out_buf_ofs != file_stat.m_uncomp_size) ||
(file_crc32 != file_stat.m_crc32))
status = TINFL_STATUS_FAILED;
}
if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf);
return status == TINFL_STATUS_DONE;
}
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip,
const char *pFilename,
mz_file_write_func pCallback,
void *pOpaque, mz_uint flags) {
int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque,
flags);
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs,
const void *pBuf, size_t n) {
(void)ofs;
return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque);
}
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index,
const char *pDst_filename,
mz_uint flags) {
mz_bool status;
mz_zip_archive_file_stat file_stat;
MZ_FILE *pFile;
if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE;
pFile = MZ_FOPEN(pDst_filename, "wb");
if (!pFile) return MZ_FALSE;
status = mz_zip_reader_extract_to_callback(
pZip, file_index, mz_zip_file_write_callback, pFile, flags);
if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
if (status)
mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time);
#endif
return status;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_end(mz_zip_archive *pZip) {
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
if (pZip->m_pState) {
mz_zip_internal_state *pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
}
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip,
const char *pArchive_filename,
const char *pDst_filename,
mz_uint flags) {
int file_index =
mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags);
if (file_index < 0) return MZ_FALSE;
return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags);
}
#endif
// ------------------- .ZIP archive writing
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
static void mz_write_le16(mz_uint8 *p, mz_uint16 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
}
static void mz_write_le32(mz_uint8 *p, mz_uint32 v) {
p[0] = (mz_uint8)v;
p[1] = (mz_uint8)(v >> 8);
p[2] = (mz_uint8)(v >> 16);
p[3] = (mz_uint8)(v >> 24);
}
#define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v))
#define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v))
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) {
if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
return MZ_FALSE;
if (pZip->m_file_offset_alignment) {
// Ensure user specified file offset alignment is a power of 2.
if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1))
return MZ_FALSE;
}
if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func;
if (!pZip->m_pFree) pZip->m_pFree = def_free_func;
if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_archive_size = existing_size;
pZip->m_central_directory_file_ofs = 0;
pZip->m_total_files = 0;
if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
return MZ_FALSE;
memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir,
sizeof(mz_uint8));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets,
sizeof(mz_uint32));
MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets,
sizeof(mz_uint32));
return MZ_TRUE;
}
static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size);
#ifdef _MSC_VER
if ((!n) ||
((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#else
if ((!n) ||
((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF)))
#endif
return 0;
if (new_size > pState->m_mem_capacity) {
void *pNew_block;
size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity);
while (new_capacity < new_size) new_capacity *= 2;
if (NULL == (pNew_block = pZip->m_pRealloc(
pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity)))
return 0;
pState->m_pMem = pNew_block;
pState->m_mem_capacity = new_capacity;
}
memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n);
pState->m_mem_size = (size_t)new_size;
return n;
}
mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip,
size_t size_to_reserve_at_beginning,
size_t initial_allocation_size) {
pZip->m_pWrite = mz_zip_heap_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size,
size_to_reserve_at_beginning))) {
if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, initial_allocation_size))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_mem_capacity = initial_allocation_size;
}
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs,
const void *pBuf, size_t n) {
mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
if (((mz_int64)file_ofs < 0) ||
(((cur_ofs != (mz_int64)file_ofs)) &&
(MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
return 0;
return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile);
}
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename,
mz_uint64 size_to_reserve_at_beginning) {
MZ_FILE *pFile;
pZip->m_pWrite = mz_zip_file_write_func;
pZip->m_pIO_opaque = pZip;
if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE;
if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
pZip->m_pState->m_pFile = pFile;
if (size_to_reserve_at_beginning) {
mz_uint64 cur_ofs = 0;
char buf[4096];
MZ_CLEAR_OBJ(buf);
do {
size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) {
mz_zip_writer_end(pZip);
return MZ_FALSE;
}
cur_ofs += n;
size_to_reserve_at_beginning -= n;
} while (size_to_reserve_at_beginning);
}
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip,
const char *pFilename) {
mz_zip_internal_state *pState;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING))
return MZ_FALSE;
// No sense in trying to write to an archive that's already at the support max
// size
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
pState = pZip->m_pState;
if (pState->m_pFile) {
#ifdef MINIZ_NO_STDIO
pFilename;
return MZ_FALSE;
#else
// Archive is being read from stdio - try to reopen as writable.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
if (!pFilename) return MZ_FALSE;
pZip->m_pWrite = mz_zip_file_write_func;
if (NULL ==
(pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) {
// The mz_zip_archive is now in a bogus state because pState->m_pFile is
// NULL, so just close it.
mz_zip_reader_end(pZip);
return MZ_FALSE;
}
#endif // #ifdef MINIZ_NO_STDIO
}
else if (pState->m_pMem) {
// Archive lives in a memory block. Assume it's from the heap that we can
// resize using the realloc callback.
if (pZip->m_pIO_opaque != pZip) return MZ_FALSE;
pState->m_mem_capacity = pState->m_mem_size;
pZip->m_pWrite = mz_zip_heap_write_func;
}
// Archive is being read via a user provided read function - make sure the
// user has specified a write function too.
else if (!pZip->m_pWrite)
return MZ_FALSE;
// Start writing new files at the archive's current central directory
// location.
pZip->m_archive_size = pZip->m_central_directory_file_ofs;
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;
pZip->m_central_directory_file_ofs = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name,
const void *pBuf, size_t buf_size,
mz_uint level_and_flags) {
return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0,
level_and_flags, 0, 0);
}
typedef struct {
mz_zip_archive *m_pZip;
mz_uint64 m_cur_archive_file_ofs;
mz_uint64 m_comp_size;
} mz_zip_writer_add_state;
static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len,
void *pUser) {
mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser;
if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque,
pState->m_cur_archive_file_ofs, pBuf,
len) != len)
return MZ_FALSE;
pState->m_cur_archive_file_ofs += len;
pState->m_comp_size += len;
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_local_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_create_central_dir_header(
mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size,
mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size,
mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method,
mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date,
mz_uint64 local_header_ofs, mz_uint32 ext_attributes) {
(void)pZip;
memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size);
MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes);
MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs);
return MZ_TRUE;
}
static mz_bool mz_zip_writer_add_to_central_dir(
mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size,
const void *pExtra, mz_uint16 extra_size, const void *pComment,
mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size,
mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags,
mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs,
mz_uint32 ext_attributes) {
mz_zip_internal_state *pState = pZip->m_pState;
mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size;
size_t orig_central_dir_size = pState->m_central_dir.m_size;
mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
// No zip64 support yet
if ((local_header_ofs > 0xFFFFFFFF) ||
(((mz_uint64)pState->m_central_dir.m_size +
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size +
comment_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_central_dir_header(
pZip, central_dir_header, filename_size, extra_size, comment_size,
uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time,
dos_date, local_header_ofs, ext_attributes))
return MZ_FALSE;
if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename,
filename_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra,
extra_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment,
comment_size)) ||
(!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets,
¢ral_dir_ofs, 1))) {
// Try to push the central directory array back into its original state.
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
return MZ_TRUE;
}
static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) {
// Basic ZIP archive filename validity checks: Valid filenames cannot start
// with a forward slash, cannot contain a drive letter, and cannot use
// DOS-style backward slashes.
if (*pArchive_name == '/') return MZ_FALSE;
while (*pArchive_name) {
if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE;
pArchive_name++;
}
return MZ_TRUE;
}
static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment(
mz_zip_archive *pZip) {
mz_uint32 n;
if (!pZip->m_file_offset_alignment) return 0;
n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1));
return (pZip->m_file_offset_alignment - n) &
(pZip->m_file_offset_alignment - 1);
}
static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip,
mz_uint64 cur_file_ofs, mz_uint32 n) {
char buf[4096];
memset(buf, 0, MZ_MIN(sizeof(buf), n));
while (n) {
mz_uint32 s = MZ_MIN(sizeof(buf), n);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s)
return MZ_FALSE;
cur_file_ofs += s;
n -= s;
}
return MZ_TRUE;
}
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip,
const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags, mz_uint64 uncomp_size,
mz_uint32 uncomp_crc32) {
mz_uint16 method = 0, dos_time = 0, dos_date = 0;
mz_uint level, ext_attributes = 0, num_alignment_padding_bytes;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
tdefl_compressor *pComp = NULL;
mz_bool store_data_uncompressed;
mz_zip_internal_state *pState;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
store_data_uncompressed =
((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA));
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) ||
(!pArchive_name) || ((comment_size) && (!pComment)) ||
(pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
pState = pZip->m_pState;
if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size))
return MZ_FALSE;
// No zip64 support yet
if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
#ifndef MINIZ_NO_TIME
{
time_t cur_time;
time(&cur_time);
mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date);
}
#endif // #ifndef MINIZ_NO_TIME
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) {
// Set DOS Subdirectory attribute bit.
ext_attributes |= 0x10;
// Subdirectories cannot contain data.
if ((buf_size) || (uncomp_size)) return MZ_FALSE;
}
// Try to do any allocations before writing to the archive, so if an
// allocation fails the file remains unmodified. (A good idea if we're doing
// an in-place modification.)
if ((!mz_zip_array_ensure_room(
pZip, &pState->m_central_dir,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) ||
(!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1)))
return MZ_FALSE;
if ((!store_data_uncompressed) && (buf_size)) {
if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor))))
return MZ_FALSE;
}
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) {
uncomp_crc32 =
(mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size);
uncomp_size = buf_size;
if (uncomp_size <= 3) {
level = 0;
store_data_uncompressed = MZ_TRUE;
}
}
if (store_data_uncompressed) {
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf,
buf_size) != buf_size) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
cur_archive_file_ofs += buf_size;
comp_size = buf_size;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED;
}
else if (buf_size) {
mz_zip_writer_add_state state;
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) ||
(tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) !=
TDEFL_STATUS_DONE)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pComp = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name,
const char *pSrc_filename, const void *pComment,
mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes;
mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0;
mz_uint64 local_dir_header_ofs = pZip->m_archive_size,
cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0,
comp_size = 0;
size_t archive_name_size;
mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
MZ_FILE *pSrc_file = NULL;
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
level = level_and_flags & 0xF;
if ((!pZip) || (!pZip->m_pState) ||
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) ||
((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
archive_name_size = strlen(pArchive_name);
if (archive_name_size > 0xFFFF) return MZ_FALSE;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE +
comment_size + archive_name_size) > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date))
return MZ_FALSE;
pSrc_file = MZ_FOPEN(pSrc_filename, "rb");
if (!pSrc_file) return MZ_FALSE;
MZ_FSEEK64(pSrc_file, 0, SEEK_END);
uncomp_size = MZ_FTELL64(pSrc_file);
MZ_FSEEK64(pSrc_file, 0, SEEK_SET);
if (uncomp_size > 0xFFFFFFFF) {
// No zip64 support yet
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (uncomp_size <= 3) level = 0;
if (!mz_zip_writer_write_zeros(
pZip, cur_archive_file_ofs,
num_alignment_padding_bytes + sizeof(local_dir_header))) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
local_dir_header_ofs += num_alignment_padding_bytes;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
cur_archive_file_ofs +=
num_alignment_padding_bytes + sizeof(local_dir_header);
MZ_CLEAR_OBJ(local_dir_header);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name,
archive_name_size) != archive_name_size) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
cur_archive_file_ofs += archive_name_size;
if (uncomp_size) {
mz_uint64 uncomp_remaining = uncomp_size;
void *pRead_buf =
pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE);
if (!pRead_buf) {
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
if (!level) {
while (uncomp_remaining) {
mz_uint n =
(mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining);
if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) ||
(pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf,
n) != n)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
uncomp_crc32 =
(mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n);
uncomp_remaining -= n;
cur_archive_file_ofs += n;
}
comp_size = uncomp_size;
}
else {
mz_bool result = MZ_FALSE;
mz_zip_writer_add_state state;
tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor));
if (!pComp) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
state.m_pZip = pZip;
state.m_cur_archive_file_ofs = cur_archive_file_ofs;
state.m_comp_size = 0;
if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state,
tdefl_create_comp_flags_from_zip_params(
level, -15, MZ_DEFAULT_STRATEGY)) !=
TDEFL_STATUS_OKAY) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
for (;;) {
size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining,
(mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE);
tdefl_status status;
if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size)
break;
uncomp_crc32 = (mz_uint32)mz_crc32(
uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size);
uncomp_remaining -= in_buf_size;
status = tdefl_compress_buffer(
pComp, pRead_buf, in_buf_size,
uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH);
if (status == TDEFL_STATUS_DONE) {
result = MZ_TRUE;
break;
}
else if (status != TDEFL_STATUS_OKAY)
break;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
if (!result) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
MZ_FCLOSE(pSrc_file);
return MZ_FALSE;
}
comp_size = state.m_comp_size;
cur_archive_file_ofs = state.m_cur_archive_file_ofs;
method = MZ_DEFLATED;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
}
MZ_FCLOSE(pSrc_file);
pSrc_file = NULL;
// no zip64 support yet
if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF))
return MZ_FALSE;
if (!mz_zip_writer_create_local_dir_header(
pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size,
comp_size, uncomp_crc32, method, 0, dos_time, dos_date))
return MZ_FALSE;
if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header,
sizeof(local_dir_header)) != sizeof(local_dir_header))
return MZ_FALSE;
if (!mz_zip_writer_add_to_central_dir(
pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment,
comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0,
dos_time, dos_date, local_dir_header_ofs, ext_attributes))
return MZ_FALSE;
pZip->m_total_files++;
pZip->m_archive_size = cur_archive_file_ofs;
return MZ_TRUE;
}
#endif // #ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip,
mz_zip_archive *pSource_zip,
mz_uint file_index) {
mz_uint n, bit_flags, num_alignment_padding_bytes;
mz_uint64 comp_bytes_remaining, local_dir_header_ofs;
mz_uint64 cur_src_file_ofs, cur_dst_file_ofs;
mz_uint32
local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) /
sizeof(mz_uint32)];
mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
size_t orig_central_dir_size;
mz_zip_internal_state *pState;
void *pBuf;
const mz_uint8 *pSrc_central_header;
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
if (NULL ==
(pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index)))
return MZ_FALSE;
pState = pZip->m_pState;
num_alignment_padding_bytes =
mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);
// no zip64 support yet
if ((pZip->m_total_files == 0xFFFF) ||
((pZip->m_archive_size + num_alignment_padding_bytes +
MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) >
0xFFFFFFFF))
return MZ_FALSE;
cur_src_file_ofs =
MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
cur_dst_file_ofs = pZip->m_archive_size;
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs,
pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
return MZ_FALSE;
cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs,
num_alignment_padding_bytes))
return MZ_FALSE;
cur_dst_file_ofs += num_alignment_padding_bytes;
local_dir_header_ofs = cur_dst_file_ofs;
if (pZip->m_file_offset_alignment) {
MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) ==
0);
}
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header,
MZ_ZIP_LOCAL_DIR_HEADER_SIZE) !=
MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
return MZ_FALSE;
cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;
n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
comp_bytes_remaining =
n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
if (NULL == (pBuf = pZip->m_pAlloc(
pZip->m_pAlloc_opaque, 1,
(size_t)MZ_MAX(sizeof(mz_uint32) * 4,
MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE,
comp_bytes_remaining)))))
return MZ_FALSE;
while (comp_bytes_remaining) {
n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining);
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_dst_file_ofs += n;
comp_bytes_remaining -= n;
}
bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS);
if (bit_flags & 8) {
// Copy data descriptor
if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf,
sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3);
if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
return MZ_FALSE;
}
cur_src_file_ofs += n;
cur_dst_file_ofs += n;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
// no zip64 support yet
if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE;
orig_central_dir_size = pState->m_central_dir.m_size;
memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS,
local_dir_header_ofs);
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header,
MZ_ZIP_CENTRAL_DIR_HEADER_SIZE))
return MZ_FALSE;
n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) +
MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS);
if (!mz_zip_array_push_back(
pZip, &pState->m_central_dir,
pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE;
n = (mz_uint32)orig_central_dir_size;
if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) {
mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size,
MZ_FALSE);
return MZ_FALSE;
}
pZip->m_total_files++;
pZip->m_archive_size = cur_dst_file_ofs;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_uint64 central_dir_ofs, central_dir_size;
mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE];
if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
return MZ_FALSE;
pState = pZip->m_pState;
// no zip64 support yet
if ((pZip->m_total_files > 0xFFFF) ||
((pZip->m_archive_size + pState->m_central_dir.m_size +
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF))
return MZ_FALSE;
central_dir_ofs = 0;
central_dir_size = 0;
if (pZip->m_total_files) {
// Write central directory
central_dir_ofs = pZip->m_archive_size;
central_dir_size = pState->m_central_dir.m_size;
pZip->m_central_directory_file_ofs = central_dir_ofs;
if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs,
pState->m_central_dir.m_p,
(size_t)central_dir_size) != central_dir_size)
return MZ_FALSE;
pZip->m_archive_size += central_dir_size;
}
// Write end of central directory record
MZ_CLEAR_OBJ(hdr);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS,
MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS,
pZip->m_total_files);
MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size);
MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs);
if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr,
sizeof(hdr)) != sizeof(hdr))
return MZ_FALSE;
#ifndef MINIZ_NO_STDIO
if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE;
#endif // #ifndef MINIZ_NO_STDIO
pZip->m_archive_size += sizeof(hdr);
pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED;
return MZ_TRUE;
}
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf,
size_t *pSize) {
if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE;
if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE;
if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE;
*pBuf = pZip->m_pState->m_pMem;
*pSize = pZip->m_pState->m_mem_size;
pZip->m_pState->m_pMem = NULL;
pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0;
return MZ_TRUE;
}
mz_bool mz_zip_writer_end(mz_zip_archive *pZip) {
mz_zip_internal_state *pState;
mz_bool status = MZ_TRUE;
if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) ||
((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) &&
(pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED)))
return MZ_FALSE;
pState = pZip->m_pState;
pZip->m_pState = NULL;
mz_zip_array_clear(pZip, &pState->m_central_dir);
mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);
#ifndef MINIZ_NO_STDIO
if (pState->m_pFile) {
MZ_FCLOSE(pState->m_pFile);
pState->m_pFile = NULL;
}
#endif // #ifndef MINIZ_NO_STDIO
if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) {
pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem);
pState->m_pMem = NULL;
}
pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
return status;
}
#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_add_mem_to_archive_file_in_place(
const char *pZip_filename, const char *pArchive_name, const void *pBuf,
size_t buf_size, const void *pComment, mz_uint16 comment_size,
mz_uint level_and_flags) {
mz_bool status, created_new_archive = MZ_FALSE;
mz_zip_archive zip_archive;
struct MZ_FILE_STAT_STRUCT file_stat;
MZ_CLEAR_OBJ(zip_archive);
if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL;
if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) ||
((comment_size) && (!pComment)) ||
((level_and_flags & 0xF) > MZ_UBER_COMPRESSION))
return MZ_FALSE;
if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE;
if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) {
// Create a new archive.
if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0))
return MZ_FALSE;
created_new_archive = MZ_TRUE;
}
else {
// Append to an existing archive.
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return MZ_FALSE;
if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) {
mz_zip_reader_end(&zip_archive);
return MZ_FALSE;
}
}
status =
mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size,
pComment, comment_size, level_and_flags, 0, 0);
// Always finalize, even if adding failed for some reason, so we have a valid
// central directory. (This may not always succeed, but we can try.)
if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE;
if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE;
if ((!status) && (created_new_archive)) {
// It's a new archive and something went wrong, so just delete it.
int ignoredStatus = MZ_DELETE_FILE(pZip_filename);
(void)ignoredStatus;
}
return status;
}
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename,
const char *pArchive_name,
size_t *pSize, mz_uint flags) {
int file_index;
mz_zip_archive zip_archive;
void *p = NULL;
if (pSize) *pSize = 0;
if ((!pZip_filename) || (!pArchive_name)) return NULL;
MZ_CLEAR_OBJ(zip_archive);
if (!mz_zip_reader_init_file(
&zip_archive, pZip_filename,
flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY))
return NULL;
if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL,
flags)) >= 0)
p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags);
mz_zip_reader_end(&zip_archive);
return p;
}
#endif // #ifndef MINIZ_NO_STDIO
#endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
#endif // #ifndef MINIZ_NO_ARCHIVE_APIS
#ifdef __cplusplus
}
#endif
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#endif // MINIZ_HEADER_FILE_ONLY
/*
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>
*/
// ---------------------- end of miniz ----------------------------------------
#ifdef __clang__
#pragma clang diagnostic pop
#endif
} // namespace miniz
#else
// Reuse MINIZ_LITTE_ENDIAN macro
#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \
defined(__i386) || defined(__i486__) || defined(__i486) || \
defined(i386) || defined(__ia64__) || defined(__x86_64__)
// MINIZ_X86_OR_X64_CPU is only used to help set the below macros.
#define MINIZ_X86_OR_X64_CPU 1
#endif
#if defined(__sparcv9)
// Big endian
#else
#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
// Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian.
#define MINIZ_LITTLE_ENDIAN 1
#endif
#endif
#endif // TINYEXR_USE_MINIZ
// static bool IsBigEndian(void) {
// union {
// unsigned int i;
// char c[4];
// } bint = {0x01020304};
//
// return bint.c[0] == 1;
//}
static void SetErrorMessage(const std::string &msg, const char **err) {
if (err) {
#ifdef _WIN32
(*err) = _strdup(msg.c_str());
#else
(*err) = strdup(msg.c_str());
#endif
}
}
static const int kEXRVersionSize = 8;
static void cpy2(unsigned short *dst_val, const unsigned short *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
}
static void swap2(unsigned short *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned short tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[1];
dst[1] = src[0];
#endif
}
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-function"
#endif
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-function"
#endif
static void cpy4(int *dst_val, const int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(unsigned int *dst_val, const unsigned int *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static void cpy4(float *dst_val, const float *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
static void swap4(unsigned int *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
unsigned int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
static void swap4(int *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
int tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
static void swap4(float *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
float tmp = *val;
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
#endif
}
#if 0
static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) {
unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val);
const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
dst[4] = src[4];
dst[5] = src[5];
dst[6] = src[6];
dst[7] = src[7];
}
#endif
static void swap8(tinyexr::tinyexr_uint64 *val) {
#ifdef MINIZ_LITTLE_ENDIAN
(void)val;
#else
tinyexr::tinyexr_uint64 tmp = (*val);
unsigned char *dst = reinterpret_cast<unsigned char *>(val);
unsigned char *src = reinterpret_cast<unsigned char *>(&tmp);
dst[0] = src[7];
dst[1] = src[6];
dst[2] = src[5];
dst[3] = src[4];
dst[4] = src[3];
dst[5] = src[2];
dst[6] = src[1];
dst[7] = src[0];
#endif
}
// https://gist.github.com/rygorous/2156668
// Reuse MINIZ_LITTLE_ENDIAN flag from miniz.
union FP32 {
unsigned int u;
float f;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 23;
unsigned int Exponent : 8;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 8;
unsigned int Mantissa : 23;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wpadded"
#endif
union FP16 {
unsigned short u;
struct {
#if MINIZ_LITTLE_ENDIAN
unsigned int Mantissa : 10;
unsigned int Exponent : 5;
unsigned int Sign : 1;
#else
unsigned int Sign : 1;
unsigned int Exponent : 5;
unsigned int Mantissa : 10;
#endif
} s;
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
static FP32 half_to_float(FP16 h) {
static const FP32 magic = { 113 << 23 };
static const unsigned int shifted_exp = 0x7c00
<< 13; // exponent mask after shift
FP32 o;
o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits
unsigned int exp_ = shifted_exp & o.u; // just the exponent
o.u += (127 - 15) << 23; // exponent adjust
// handle exponent special cases
if (exp_ == shifted_exp) // Inf/NaN?
o.u += (128 - 16) << 23; // extra exp adjust
else if (exp_ == 0) // Zero/Denormal?
{
o.u += 1 << 23; // extra exp adjust
o.f -= magic.f; // renormalize
}
o.u |= (h.u & 0x8000U) << 16U; // sign bit
return o;
}
static FP16 float_to_half_full(FP32 f) {
FP16 o = { 0 };
// Based on ISPC reference code (with minor modifications)
if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow)
o.s.Exponent = 0;
else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set)
{
o.s.Exponent = 31;
o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf
}
else // Normalized number
{
// Exponent unbias the single, then bias the halfp
int newexp = f.s.Exponent - 127 + 15;
if (newexp >= 31) // Overflow, return signed infinity
o.s.Exponent = 31;
else if (newexp <= 0) // Underflow
{
if ((14 - newexp) <= 24) // Mantissa might be non-zero
{
unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit
o.s.Mantissa = mant >> (14 - newexp);
if ((mant >> (13 - newexp)) & 1) // Check for rounding
o.u++; // Round, might overflow into exp bit, but this is OK
}
}
else {
o.s.Exponent = static_cast<unsigned int>(newexp);
o.s.Mantissa = f.s.Mantissa >> 13;
if (f.s.Mantissa & 0x1000) // Check for rounding
o.u++; // Round, might overflow to inf, this is OK
}
}
o.s.Sign = f.s.Sign;
return o;
}
// NOTE: From OpenEXR code
// #define IMF_INCREASING_Y 0
// #define IMF_DECREASING_Y 1
// #define IMF_RAMDOM_Y 2
//
// #define IMF_NO_COMPRESSION 0
// #define IMF_RLE_COMPRESSION 1
// #define IMF_ZIPS_COMPRESSION 2
// #define IMF_ZIP_COMPRESSION 3
// #define IMF_PIZ_COMPRESSION 4
// #define IMF_PXR24_COMPRESSION 5
// #define IMF_B44_COMPRESSION 6
// #define IMF_B44A_COMPRESSION 7
#ifdef __clang__
#pragma clang diagnostic push
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#endif
static const char *ReadString(std::string *s, const char *ptr, size_t len) {
// Read untile NULL(\0).
const char *p = ptr;
const char *q = ptr;
while ((size_t(q - ptr) < len) && (*q) != 0) {
q++;
}
if (size_t(q - ptr) >= len) {
(*s) = std::string();
return NULL;
}
(*s) = std::string(p, q);
return q + 1; // skip '\0'
}
static bool ReadAttribute(std::string *name, std::string *type,
std::vector<unsigned char> *data, size_t *marker_size,
const char *marker, size_t size) {
size_t name_len = strnlen(marker, size);
if (name_len == size) {
// String does not have a terminating character.
return false;
}
*name = std::string(marker, name_len);
marker += name_len + 1;
size -= name_len + 1;
size_t type_len = strnlen(marker, size);
if (type_len == size) {
return false;
}
*type = std::string(marker, type_len);
marker += type_len + 1;
size -= type_len + 1;
if (size < sizeof(uint32_t)) {
return false;
}
uint32_t data_len;
memcpy(&data_len, marker, sizeof(uint32_t));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len == 0) {
if ((*type).compare("string") == 0) {
// Accept empty string attribute.
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t);
data->resize(1);
(*data)[0] = '\0';
return true;
}
else {
return false;
}
}
marker += sizeof(uint32_t);
size -= sizeof(uint32_t);
if (size < data_len) {
return false;
}
data->resize(static_cast<size_t>(data_len));
memcpy(&data->at(0), marker, static_cast<size_t>(data_len));
*marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len;
return true;
}
static void WriteAttributeToMemory(std::vector<unsigned char> *out,
const char *name, const char *type,
const unsigned char *data, int len) {
out->insert(out->end(), name, name + strlen(name) + 1);
out->insert(out->end(), type, type + strlen(type) + 1);
int outLen = len;
tinyexr::swap4(&outLen);
out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen),
reinterpret_cast<unsigned char *>(&outLen) + sizeof(int));
out->insert(out->end(), data, data + len);
}
typedef struct {
std::string name; // less than 255 bytes long
int pixel_type;
int x_sampling;
int y_sampling;
unsigned char p_linear;
unsigned char pad[3];
} ChannelInfo;
typedef struct {
int min_x;
int min_y;
int max_x;
int max_y;
} Box2iInfo;
struct HeaderInfo {
std::vector<tinyexr::ChannelInfo> channels;
std::vector<EXRAttribute> attributes;
Box2iInfo data_window;
int line_order;
Box2iInfo display_window;
float screen_window_center[2];
float screen_window_width;
float pixel_aspect_ratio;
int chunk_count;
// Tiled format
int tiled; // Non-zero if the part is tiled.
int tile_size_x;
int tile_size_y;
int tile_level_mode;
int tile_rounding_mode;
unsigned int header_len;
int compression_type;
// required for multi-part or non-image files
std::string name;
// required for multi-part or non-image files
std::string type;
void clear() {
channels.clear();
attributes.clear();
data_window.min_x = 0;
data_window.min_y = 0;
data_window.max_x = 0;
data_window.max_y = 0;
line_order = 0;
display_window.min_x = 0;
display_window.min_y = 0;
display_window.max_x = 0;
display_window.max_y = 0;
screen_window_center[0] = 0.0f;
screen_window_center[1] = 0.0f;
screen_window_width = 0.0f;
pixel_aspect_ratio = 0.0f;
chunk_count = 0;
// Tiled format
tiled = 0;
tile_size_x = 0;
tile_size_y = 0;
tile_level_mode = 0;
tile_rounding_mode = 0;
header_len = 0;
compression_type = 0;
name.clear();
type.clear();
}
};
static bool ReadChannelInfo(std::vector<ChannelInfo> &channels,
const std::vector<unsigned char> &data) {
const char *p = reinterpret_cast<const char *>(&data.at(0));
for (;;) {
if ((*p) == 0) {
break;
}
ChannelInfo info;
tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) -
(p - reinterpret_cast<const char *>(data.data()));
if (data_len < 0) {
return false;
}
p = ReadString(&info.name, p, size_t(data_len));
if ((p == NULL) && (info.name.empty())) {
// Buffer overrun. Issue #51.
return false;
}
const unsigned char *data_end =
reinterpret_cast<const unsigned char *>(p) + 16;
if (data_end >= (data.data() + data.size())) {
return false;
}
memcpy(&info.pixel_type, p, sizeof(int));
p += 4;
info.p_linear = static_cast<unsigned char>(p[0]); // uchar
p += 1 + 3; // reserved: uchar[3]
memcpy(&info.x_sampling, p, sizeof(int)); // int
p += 4;
memcpy(&info.y_sampling, p, sizeof(int)); // int
p += 4;
tinyexr::swap4(&info.pixel_type);
tinyexr::swap4(&info.x_sampling);
tinyexr::swap4(&info.y_sampling);
channels.push_back(info);
}
return true;
}
static void WriteChannelInfo(std::vector<unsigned char> &data,
const std::vector<ChannelInfo> &channels) {
size_t sz = 0;
// Calculate total size.
for (size_t c = 0; c < channels.size(); c++) {
sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0
sz += 16; // 4 * int
}
data.resize(sz + 1);
unsigned char *p = &data.at(0);
for (size_t c = 0; c < channels.size(); c++) {
memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str()));
p += strlen(channels[c].name.c_str());
(*p) = '\0';
p++;
int pixel_type = channels[c].pixel_type;
int x_sampling = channels[c].x_sampling;
int y_sampling = channels[c].y_sampling;
tinyexr::swap4(&pixel_type);
tinyexr::swap4(&x_sampling);
tinyexr::swap4(&y_sampling);
memcpy(p, &pixel_type, sizeof(int));
p += sizeof(int);
(*p) = channels[c].p_linear;
p += 4;
memcpy(p, &x_sampling, sizeof(int));
p += sizeof(int);
memcpy(p, &y_sampling, sizeof(int));
p += sizeof(int);
}
(*p) = '\0';
}
static void CompressZip(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
#if TINYEXR_USE_MINIZ
//
// Compress the data using miniz
//
miniz::mz_ulong outSize = miniz::mz_compressBound(src_size);
int ret = miniz::mz_compress(
dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)),
src_size);
assert(ret == miniz::MZ_OK);
(void)ret;
compressedSize = outSize;
#else
uLong outSize = compressBound(static_cast<uLong>(src_size));
int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)),
src_size);
assert(ret == Z_OK);
compressedSize = outSize;
#endif
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressZip(unsigned char *dst,
unsigned long *uncompressed_size /* inout */,
const unsigned char *src, unsigned long src_size) {
if ((*uncompressed_size) == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
std::vector<unsigned char> tmpBuf(*uncompressed_size);
#if TINYEXR_USE_MINIZ
int ret =
miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (miniz::MZ_OK != ret) {
return false;
}
#else
int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size);
if (Z_OK != ret) {
return false;
}
#endif
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfZipCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size);
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(*uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + (*uncompressed_size);
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
// RLE code from OpenEXR --------------------------------------
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wsign-conversion"
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) // nonstandard extension used : non-constant
// aggregate initializer (also supported by GNU
// C and C99, so no big deal)
#pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4267) // 'argument': conversion from '__int64' to
// 'int', possible loss of data
#pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is
// deprecated. Instead, use the ISO C and C++
// conformant name: _strdup.
#endif
const int MIN_RUN_LENGTH = 3;
const int MAX_RUN_LENGTH = 127;
//
// Compress an array of bytes, using run-length encoding,
// and return the length of the compressed data.
//
static int rleCompress(int inLength, const char in[], signed char out[]) {
const char *inEnd = in + inLength;
const char *runStart = in;
const char *runEnd = in + 1;
signed char *outWrite = out;
while (runStart < inEnd) {
while (runEnd < inEnd && *runStart == *runEnd &&
runEnd - runStart - 1 < MAX_RUN_LENGTH) {
++runEnd;
}
if (runEnd - runStart >= MIN_RUN_LENGTH) {
//
// Compressible run
//
*outWrite++ = static_cast<char>(runEnd - runStart) - 1;
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart));
runStart = runEnd;
}
else {
//
// Uncompressable run
//
while (runEnd < inEnd &&
((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) ||
(runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) &&
runEnd - runStart < MAX_RUN_LENGTH) {
++runEnd;
}
*outWrite++ = static_cast<char>(runStart - runEnd);
while (runStart < runEnd) {
*outWrite++ = *(reinterpret_cast<const signed char *>(runStart++));
}
}
++runEnd;
}
return static_cast<int>(outWrite - out);
}
//
// Uncompress an array of bytes compressed with rleCompress().
// Returns the length of the oncompressed data, or 0 if the
// length of the uncompressed data would be more than maxLength.
//
static int rleUncompress(int inLength, int maxLength, const signed char in[],
char out[]) {
char *outStart = out;
while (inLength > 0) {
if (*in < 0) {
int count = -(static_cast<int>(*in++));
inLength -= count + 1;
// Fixes #116: Add bounds check to in buffer.
if ((0 > (maxLength -= count)) || (inLength < 0)) return 0;
memcpy(out, in, count);
out += count;
in += count;
}
else {
int count = *in++;
inLength -= 2;
if (0 > (maxLength -= count + 1)) return 0;
memset(out, *reinterpret_cast<const char *>(in), count + 1);
out += count + 1;
in++;
}
}
return static_cast<int>(out - outStart);
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
// End of RLE code from OpenEXR -----------------------------------
static void CompressRle(unsigned char *dst,
tinyexr::tinyexr_uint64 &compressedSize,
const unsigned char *src, unsigned long src_size) {
std::vector<unsigned char> tmpBuf(src_size);
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
//
// Reorder the pixel data.
//
const char *srcPtr = reinterpret_cast<const char *>(src);
{
char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0));
char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2;
const char *stop = srcPtr + src_size;
for (;;) {
if (srcPtr < stop)
*(t1++) = *(srcPtr++);
else
break;
if (srcPtr < stop)
*(t2++) = *(srcPtr++);
else
break;
}
}
//
// Predictor.
//
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + src_size;
int p = t[-1];
while (t < stop) {
int d = int(t[0]) - p + (128 + 256);
p = t[0];
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// outSize will be (srcSiz * 3) / 2 at max.
int outSize = rleCompress(static_cast<int>(src_size),
reinterpret_cast<const char *>(&tmpBuf.at(0)),
reinterpret_cast<signed char *>(dst));
assert(outSize > 0);
compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize);
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if (compressedSize >= src_size) {
compressedSize = src_size;
memcpy(dst, src, src_size);
}
}
static bool DecompressRle(unsigned char *dst,
const unsigned long uncompressed_size,
const unsigned char *src, unsigned long src_size) {
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
return true;
}
// Workaround for issue #112.
// TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`.
if (src_size <= 2) {
return false;
}
std::vector<unsigned char> tmpBuf(uncompressed_size);
int ret = rleUncompress(static_cast<int>(src_size),
static_cast<int>(uncompressed_size),
reinterpret_cast<const signed char *>(src),
reinterpret_cast<char *>(&tmpBuf.at(0)));
if (ret != static_cast<int>(uncompressed_size)) {
return false;
}
//
// Apply EXR-specific? postprocess. Grabbed from OpenEXR's
// ImfRleCompressor.cpp
//
// Predictor.
{
unsigned char *t = &tmpBuf.at(0) + 1;
unsigned char *stop = &tmpBuf.at(0) + uncompressed_size;
while (t < stop) {
int d = int(t[-1]) + int(t[0]) - 128;
t[0] = static_cast<unsigned char>(d);
++t;
}
}
// Reorder the pixel data.
{
const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0));
const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) +
(uncompressed_size + 1) / 2;
char *s = reinterpret_cast<char *>(dst);
char *stop = s + uncompressed_size;
for (;;) {
if (s < stop)
*(s++) = *(t1++);
else
break;
if (s < stop)
*(s++) = *(t2++);
else
break;
}
}
return true;
}
#if TINYEXR_USE_PIZ
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++11-long-long"
#pragma clang diagnostic ignored "-Wold-style-cast"
#pragma clang diagnostic ignored "-Wpadded"
#pragma clang diagnostic ignored "-Wsign-conversion"
#pragma clang diagnostic ignored "-Wc++11-extensions"
#pragma clang diagnostic ignored "-Wconversion"
#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
#if __has_warning("-Wcast-qual")
#pragma clang diagnostic ignored "-Wcast-qual"
#endif
#if __has_warning("-Wextra-semi-stmt")
#pragma clang diagnostic ignored "-Wextra-semi-stmt"
#endif
#endif
//
// PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp
//
// -----------------------------------------------------------------
// Copyright (c) 2004, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC)
// (3 clause BSD license)
//
struct PIZChannelData {
unsigned short *start;
unsigned short *end;
int nx;
int ny;
int ys;
int size;
};
//-----------------------------------------------------------------------------
//
// 16-bit Haar Wavelet encoding and decoding
//
// The source code in this file is derived from the encoding
// and decoding routines written by Christian Rouet for his
// PIZ image file format.
//
//-----------------------------------------------------------------------------
//
// Wavelet basis functions without modulo arithmetic; they produce
// the best compression ratios when the wavelet-transformed data are
// Huffman-encoded, but the wavelet transform works only for 14-bit
// data (untransformed data values must be less than (1 << 14)).
//
inline void wenc14(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
short as = static_cast<short>(a);
short bs = static_cast<short>(b);
short ms = (as + bs) >> 1;
short ds = as - bs;
l = static_cast<unsigned short>(ms);
h = static_cast<unsigned short>(ds);
}
inline void wdec14(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
short ls = static_cast<short>(l);
short hs = static_cast<short>(h);
int hi = hs;
int ai = ls + (hi & 1) + (hi >> 1);
short as = static_cast<short>(ai);
short bs = static_cast<short>(ai - hi);
a = static_cast<unsigned short>(as);
b = static_cast<unsigned short>(bs);
}
//
// Wavelet basis functions with modulo arithmetic; they work with full
// 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't
// compress the data quite as well.
//
const int NBITS = 16;
const int A_OFFSET = 1 << (NBITS - 1);
const int M_OFFSET = 1 << (NBITS - 1);
const int MOD_MASK = (1 << NBITS) - 1;
inline void wenc16(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
int ao = (a + A_OFFSET) & MOD_MASK;
int m = ((ao + b) >> 1);
int d = ao - b;
if (d < 0) m = (m + M_OFFSET) & MOD_MASK;
d &= MOD_MASK;
l = static_cast<unsigned short>(m);
h = static_cast<unsigned short>(d);
}
inline void wdec16(unsigned short l, unsigned short h, unsigned short &a,
unsigned short &b) {
int m = l;
int d = h;
int bb = (m - (d >> 1)) & MOD_MASK;
int aa = (d + bb - A_OFFSET) & MOD_MASK;
b = static_cast<unsigned short>(bb);
a = static_cast<unsigned short>(aa);
}
//
// 2D Wavelet encoding:
//
static void wav2Encode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1; // == 1 << level
int p2 = 2; // == 1 << (level+1)
//
// Hierarchical loop on smaller dimension n
//
while (p2 <= n) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet encoding
//
if (w14) {
wenc14(*px, *p01, i00, i01);
wenc14(*p10, *p11, i10, i11);
wenc14(i00, i10, *px, *p10);
wenc14(i01, i11, *p01, *p11);
}
else {
wenc16(*px, *p01, i00, i01);
wenc16(*p10, *p11, i10, i11);
wenc16(i00, i10, *px, *p10);
wenc16(i01, i11, *p01, *p11);
}
}
//
// Encode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wenc14(*px, *p10, i00, *p10);
else
wenc16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Encode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wenc14(*px, *p01, i00, *p01);
else
wenc16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p = p2;
p2 <<= 1;
}
}
//
// 2D Wavelet decoding:
//
static void wav2Decode(
unsigned short *in, // io: values are transformed in place
int nx, // i : x size
int ox, // i : x offset
int ny, // i : y size
int oy, // i : y offset
unsigned short mx) // i : maximum in[x][y] value
{
bool w14 = (mx < (1 << 14));
int n = (nx > ny) ? ny : nx;
int p = 1;
int p2;
//
// Search max level
//
while (p <= n) p <<= 1;
p >>= 1;
p2 = p;
p >>= 1;
//
// Hierarchical loop on smaller dimension n
//
while (p >= 1) {
unsigned short *py = in;
unsigned short *ey = in + oy * (ny - p2);
int oy1 = oy * p;
int oy2 = oy * p2;
int ox1 = ox * p;
int ox2 = ox * p2;
unsigned short i00, i01, i10, i11;
//
// Y loop
//
for (; py <= ey; py += oy2) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
//
// X loop
//
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
unsigned short *p10 = px + oy1;
unsigned short *p11 = p10 + ox1;
//
// 2D wavelet decoding
//
if (w14) {
wdec14(*px, *p10, i00, i10);
wdec14(*p01, *p11, i01, i11);
wdec14(i00, i01, *px, *p01);
wdec14(i10, i11, *p10, *p11);
}
else {
wdec16(*px, *p10, i00, i10);
wdec16(*p01, *p11, i01, i11);
wdec16(i00, i01, *px, *p01);
wdec16(i10, i11, *p10, *p11);
}
}
//
// Decode (1D) odd column (still in Y loop)
//
if (nx & p) {
unsigned short *p10 = px + oy1;
if (w14)
wdec14(*px, *p10, i00, *p10);
else
wdec16(*px, *p10, i00, *p10);
*px = i00;
}
}
//
// Decode (1D) odd line (must loop in X)
//
if (ny & p) {
unsigned short *px = py;
unsigned short *ex = py + ox * (nx - p2);
for (; px <= ex; px += ox2) {
unsigned short *p01 = px + ox1;
if (w14)
wdec14(*px, *p01, i00, *p01);
else
wdec16(*px, *p01, i00, *p01);
*px = i00;
}
}
//
// Next level
//
p2 = p;
p >>= 1;
}
}
//-----------------------------------------------------------------------------
//
// 16-bit Huffman compression and decompression.
//
// The source code in this file is derived from the 8-bit
// Huffman compression and decompression routines written
// by Christian Rouet for his PIZ image file format.
//
//-----------------------------------------------------------------------------
// Adds some modification for tinyexr.
const int HUF_ENCBITS = 16; // literal (value) bit length
const int HUF_DECBITS = 14; // decoding bit size (>= 8)
const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size
const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size
const int HUF_DECMASK = HUF_DECSIZE - 1;
struct HufDec { // short code long code
//-------------------------------
unsigned int len : 8; // code length 0
unsigned int lit : 24; // lit p size
unsigned int *p; // 0 lits
};
inline long long hufLength(long long code) { return code & 63; }
inline long long hufCode(long long code) { return code >> 6; }
inline void outputBits(int nBits, long long bits, long long &c, int &lc,
char *&out) {
c <<= nBits;
lc += nBits;
c |= bits;
while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8)));
}
inline long long getBits(int nBits, long long &c, int &lc, const char *&in) {
while (lc < nBits) {
c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++));
lc += 8;
}
lc -= nBits;
return (c >> lc) & ((1 << nBits) - 1);
}
//
// ENCODING TABLE BUILDING & (UN)PACKING
//
//
// Build a "canonical" Huffman code table:
// - for each (uncompressed) symbol, hcode contains the length
// of the corresponding code (in the compressed data)
// - canonical codes are computed and stored in hcode
// - the rules for constructing canonical codes are as follows:
// * shorter codes (if filled with zeroes to the right)
// have a numerically higher value than longer codes
// * for codes with the same length, numerical values
// increase with numerical symbol values
// - because the canonical code table can be constructed from
// symbol lengths alone, the code table can be transmitted
// without sending the actual code values
// - see http://www.compressconsult.com/huffman/
//
static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) {
long long n[59];
//
// For each i from 0 through 58, count the
// number of different codes of length i, and
// store the count in n[i].
//
for (int i = 0; i <= 58; ++i) n[i] = 0;
for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1;
//
// For each i from 58 through 1, compute the
// numerically lowest code with length i, and
// store that code in n[i].
//
long long c = 0;
for (int i = 58; i > 0; --i) {
long long nc = ((c + n[i]) >> 1);
n[i] = c;
c = nc;
}
//
// hcode[i] contains the length, l, of the
// code for symbol i. Assign the next available
// code of length l to the symbol and store both
// l and the code in hcode[i].
//
for (int i = 0; i < HUF_ENCSIZE; ++i) {
int l = static_cast<int>(hcode[i]);
if (l > 0) hcode[i] = l | (n[l]++ << 6);
}
}
//
// Compute Huffman codes (based on frq input) and store them in frq:
// - code structure is : [63:lsb - 6:msb] | [5-0: bit length];
// - max code length is 58 bits;
// - codes outside the range [im-iM] have a null length (unused values);
// - original frequencies are destroyed;
// - encoding tables are used by hufEncode() and hufBuildDecTable();
//
struct FHeapCompare {
bool operator()(long long *a, long long *b) { return *a > *b; }
};
static void hufBuildEncTable(
long long *frq, // io: input frequencies [HUF_ENCSIZE], output table
int *im, // o: min frq index
int *iM) // o: max frq index
{
//
// This function assumes that when it is called, array frq
// indicates the frequency of all possible symbols in the data
// that are to be Huffman-encoded. (frq[i] contains the number
// of occurrences of symbol i in the data.)
//
// The loop below does three things:
//
// 1) Finds the minimum and maximum indices that point
// to non-zero entries in frq:
//
// frq[im] != 0, and frq[i] == 0 for all i < im
// frq[iM] != 0, and frq[i] == 0 for all i > iM
//
// 2) Fills array fHeap with pointers to all non-zero
// entries in frq.
//
// 3) Initializes array hlink such that hlink[i] == i
// for all array entries.
//
std::vector<int> hlink(HUF_ENCSIZE);
std::vector<long long *> fHeap(HUF_ENCSIZE);
*im = 0;
while (!frq[*im]) (*im)++;
int nf = 0;
for (int i = *im; i < HUF_ENCSIZE; i++) {
hlink[i] = i;
if (frq[i]) {
fHeap[nf] = &frq[i];
nf++;
*iM = i;
}
}
//
// Add a pseudo-symbol, with a frequency count of 1, to frq;
// adjust the fHeap and hlink array accordingly. Function
// hufEncode() uses the pseudo-symbol for run-length encoding.
//
(*iM)++;
frq[*iM] = 1;
fHeap[nf] = &frq[*iM];
nf++;
//
// Build an array, scode, such that scode[i] contains the number
// of bits assigned to symbol i. Conceptually this is done by
// constructing a tree whose leaves are the symbols with non-zero
// frequency:
//
// Make a heap that contains all symbols with a non-zero frequency,
// with the least frequent symbol on top.
//
// Repeat until only one symbol is left on the heap:
//
// Take the two least frequent symbols off the top of the heap.
// Create a new node that has first two nodes as children, and
// whose frequency is the sum of the frequencies of the first
// two nodes. Put the new node back into the heap.
//
// The last node left on the heap is the root of the tree. For each
// leaf node, the distance between the root and the leaf is the length
// of the code for the corresponding symbol.
//
// The loop below doesn't actually build the tree; instead we compute
// the distances of the leaves from the root on the fly. When a new
// node is added to the heap, then that node's descendants are linked
// into a single linear list that starts at the new node, and the code
// lengths of the descendants (that is, their distance from the root
// of the tree) are incremented by one.
//
std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
std::vector<long long> scode(HUF_ENCSIZE);
memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE);
while (nf > 1) {
//
// Find the indices, mm and m, of the two smallest non-zero frq
// values in fHeap, add the smallest frq to the second-smallest
// frq, and remove the smallest frq value from fHeap.
//
int mm = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
--nf;
int m = fHeap[0] - frq;
std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
frq[m] += frq[mm];
std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare());
//
// The entries in scode are linked into lists with the
// entries in hlink serving as "next" pointers and with
// the end of a list marked by hlink[j] == j.
//
// Traverse the lists that start at scode[m] and scode[mm].
// For each element visited, increment the length of the
// corresponding code by one bit. (If we visit scode[j]
// during the traversal, then the code for symbol j becomes
// one bit longer.)
//
// Merge the lists that start at scode[m] and scode[mm]
// into a single list that starts at scode[m].
//
//
// Add a bit to all codes in the first list.
//
for (int j = m;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) {
//
// Merge the two lists.
//
hlink[j] = mm;
break;
}
}
//
// Add a bit to all codes in the second list
//
for (int j = mm;; j = hlink[j]) {
scode[j]++;
assert(scode[j] <= 58);
if (hlink[j] == j) break;
}
}
//
// Build a canonical Huffman code table, replacing the code
// lengths in scode with (code, code length) pairs. Copy the
// code table from scode into frq.
//
hufCanonicalCodeTable(scode.data());
memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE);
}
//
// Pack an encoding table:
// - only code lengths, not actual codes, are stored
// - runs of zeroes are compressed as follows:
//
// unpacked packed
// --------------------------------
// 1 zero 0 (6 bits)
// 2 zeroes 59
// 3 zeroes 60
// 4 zeroes 61
// 5 zeroes 62
// n zeroes (6 or more) 63 n-6 (6 + 8 bits)
//
const int SHORT_ZEROCODE_RUN = 59;
const int LONG_ZEROCODE_RUN = 63;
const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN;
const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN;
static void hufPackEncTable(
const long long *hcode, // i : encoding table [HUF_ENCSIZE]
int im, // i : min hcode index
int iM, // i : max hcode index
char **pcode) // o: ptr to packed table (updated)
{
char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
int l = hufLength(hcode[im]);
if (l == 0) {
int zerun = 1;
while ((im < iM) && (zerun < LONGEST_LONG_RUN)) {
if (hufLength(hcode[im + 1]) > 0) break;
im++;
zerun++;
}
if (zerun >= 2) {
if (zerun >= SHORTEST_LONG_RUN) {
outputBits(6, LONG_ZEROCODE_RUN, c, lc, p);
outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p);
}
else {
outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p);
}
continue;
}
}
outputBits(6, l, c, lc, p);
}
if (lc > 0) *p++ = (unsigned char)(c << (8 - lc));
*pcode = p;
}
//
// Unpack an encoding table packed by hufPackEncTable():
//
static bool hufUnpackEncTable(
const char **pcode, // io: ptr to packed table (updated)
int ni, // i : input size (in bytes)
int im, // i : min hcode index
int iM, // i : max hcode index
long long *hcode) // o: encoding table [HUF_ENCSIZE]
{
memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE);
const char *p = *pcode;
long long c = 0;
int lc = 0;
for (; im <= iM; im++) {
if (p - *pcode >= ni) {
return false;
}
long long l = hcode[im] = getBits(6, c, lc, p); // code length
if (l == (long long)LONG_ZEROCODE_RUN) {
if (p - *pcode > ni) {
return false;
}
int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
}
else if (l >= (long long)SHORT_ZEROCODE_RUN) {
int zerun = l - SHORT_ZEROCODE_RUN + 2;
if (im + zerun > iM + 1) {
return false;
}
while (zerun--) hcode[im++] = 0;
im--;
}
}
*pcode = const_cast<char *>(p);
hufCanonicalCodeTable(hcode);
return true;
}
//
// DECODING TABLE BUILDING
//
//
// Clear a newly allocated decoding table so that it contains only zeroes.
//
static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
for (int i = 0; i < HUF_DECSIZE; i++) {
hdecod[i].len = 0;
hdecod[i].lit = 0;
hdecod[i].p = NULL;
}
// memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE);
}
//
// Build a decoding hash table based on the encoding table hcode:
// - short codes (<= HUF_DECBITS) are resolved with a single table access;
// - long code entry allocations are not optimized, because long codes are
// unfrequent;
// - decoding tables are used by hufDecode();
//
static bool hufBuildDecTable(const long long *hcode, // i : encoding table
int im, // i : min index in hcode
int iM, // i : max index in hcode
HufDec *hdecod) // o: (allocated by caller)
// decoding table [HUF_DECSIZE]
{
//
// Init hashtable & loop on all codes.
// Assumes that hufClearDecTable(hdecod) has already been called.
//
for (; im <= iM; im++) {
long long c = hufCode(hcode[im]);
int l = hufLength(hcode[im]);
if (c >> l) {
//
// Error: c is supposed to be an l-bit code,
// but c contains a value that is greater
// than the largest l-bit number.
//
// invalidTableEntry();
return false;
}
if (l > HUF_DECBITS) {
//
// Long code: add a secondary entry
//
HufDec *pl = hdecod + (c >> (l - HUF_DECBITS));
if (pl->len) {
//
// Error: a short code has already
// been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->lit++;
if (pl->p) {
unsigned int *p = pl->p;
pl->p = new unsigned int[pl->lit];
for (int i = 0; i < int(pl->lit) - 1; ++i) pl->p[i] = p[i];
delete[] p;
}
else {
pl->p = new unsigned int[1];
}
pl->p[pl->lit - 1] = im;
}
else if (l) {
//
// Short code: init all primary entries
//
HufDec *pl = hdecod + (c << (HUF_DECBITS - l));
for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) {
if (pl->len || pl->p) {
//
// Error: a short code or a long code has
// already been stored in table entry *pl.
//
// invalidTableEntry();
return false;
}
pl->len = l;
pl->lit = im;
}
}
}
return true;
}
//
// Free the long code entries of a decoding table built by hufBuildDecTable()
//
static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table
{
for (int i = 0; i < HUF_DECSIZE; i++) {
if (hdecod[i].p) {
delete[] hdecod[i].p;
hdecod[i].p = 0;
}
}
}
//
// ENCODING
//
inline void outputCode(long long code, long long &c, int &lc, char *&out) {
outputBits(hufLength(code), hufCode(code), c, lc, out);
}
inline void sendCode(long long sCode, int runCount, long long runCode,
long long &c, int &lc, char *&out) {
//
// Output a run of runCount instances of the symbol sCount.
// Output the symbols explicitly, or if that is shorter, output
// the sCode symbol once followed by a runCode symbol and runCount
// expressed as an 8-bit number.
//
if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) {
outputCode(sCode, c, lc, out);
outputCode(runCode, c, lc, out);
outputBits(8, runCount, c, lc, out);
}
else {
while (runCount-- >= 0) outputCode(sCode, c, lc, out);
}
}
//
// Encode (compress) ni values based on the Huffman encoding table hcode:
//
static int hufEncode // return: output size (in bits)
(const long long *hcode, // i : encoding table
const unsigned short *in, // i : uncompressed input buffer
const int ni, // i : input buffer size (in bytes)
int rlc, // i : rl code
char *out) // o: compressed output buffer
{
char *outStart = out;
long long c = 0; // bits not yet written to out
int lc = 0; // number of valid bits in c (LSB)
int s = in[0];
int cs = 0;
//
// Loop on input values
//
for (int i = 1; i < ni; i++) {
//
// Count same values or send code
//
if (s == in[i] && cs < 255) {
cs++;
}
else {
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
cs = 0;
}
s = in[i];
}
//
// Send remaining code
//
sendCode(hcode[s], cs, hcode[rlc], c, lc, out);
if (lc) *out = (c << (8 - lc)) & 0xff;
return (out - outStart) * 8 + lc;
}
//
// DECODING
//
//
// In order to force the compiler to inline them,
// getChar() and getCode() are implemented as macros
// instead of "inline" functions.
//
#define getChar(c, lc, in) \
{ \
c = (c << 8) | *(unsigned char *)(in++); \
lc += 8; \
}
#if 0
#define getCode(po, rlc, c, lc, in, out, ob, oe) \
{ \
if (po == rlc) { \
if (lc < 8) getChar(c, lc, in); \
\
lc -= 8; \
\
unsigned char cs = (c >> lc); \
\
if (out + cs > oe) return false; \
\
/* TinyEXR issue 78 */ \
unsigned short s = out[-1]; \
\
while (cs-- > 0) *out++ = s; \
} else if (out < oe) { \
*out++ = po; \
} else { \
return false; \
} \
}
#else
static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in,
const char *in_end, unsigned short *&out,
const unsigned short *ob, const unsigned short *oe) {
(void)ob;
if (po == rlc) {
if (lc < 8) {
/* TinyEXR issue 78 */
if ((in + 1) >= in_end) {
return false;
}
getChar(c, lc, in);
}
lc -= 8;
unsigned char cs = (c >> lc);
if (out + cs > oe) return false;
// Bounds check for safety
// Issue 100.
if ((out - 1) < ob) return false;
unsigned short s = out[-1];
while (cs-- > 0) *out++ = s;
}
else if (out < oe) {
*out++ = po;
}
else {
return false;
}
return true;
}
#endif
//
// Decode (uncompress) ni bits based on encoding & decoding tables:
//
static bool hufDecode(const long long *hcode, // i : encoding table
const HufDec *hdecod, // i : decoding table
const char *in, // i : compressed input buffer
int ni, // i : input size (in bits)
int rlc, // i : run-length code
int no, // i : expected output size (in bytes)
unsigned short *out) // o: uncompressed output buffer
{
long long c = 0;
int lc = 0;
unsigned short *outb = out; // begin
unsigned short *oe = out + no; // end
const char *ie = in + (ni + 7) / 8; // input byte size
//
// Loop on input bytes
//
while (in < ie) {
getChar(c, lc, in);
//
// Access decoding table
//
while (lc >= HUF_DECBITS) {
const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK];
if (pl.len) {
//
// Get short code
//
lc -= pl.len;
// std::cout << "lit = " << pl.lit << std::endl;
// std::cout << "rlc = " << rlc << std::endl;
// std::cout << "c = " << c << std::endl;
// std::cout << "lc = " << lc << std::endl;
// std::cout << "in = " << in << std::endl;
// std::cout << "out = " << out << std::endl;
// std::cout << "oe = " << oe << std::endl;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
}
else {
if (!pl.p) {
return false;
}
// invalidCode(); // wrong code
//
// Search long code
//
int j;
for (j = 0; j < int(pl.lit); j++) {
int l = hufLength(hcode[pl.p[j]]);
while (lc < l && in < ie) // get more bits
getChar(c, lc, in);
if (lc >= l) {
if (hufCode(hcode[pl.p[j]]) ==
((c >> (lc - l)) & (((long long)(1) << l) - 1))) {
//
// Found : get long code
//
lc -= l;
if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
break;
}
}
}
if (j == pl.lit) {
return false;
// invalidCode(); // Not found
}
}
}
}
//
// Get remaining (short) codes
//
int i = (8 - ni) & 7;
c >>= i;
lc -= i;
while (lc > 0) {
const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK];
if (pl.len) {
lc -= pl.len;
if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) {
return false;
}
}
else {
return false;
// invalidCode(); // wrong (long) code
}
}
if (out - outb != no) {
return false;
}
// notEnoughData ();
return true;
}
static void countFrequencies(std::vector<long long> &freq,
const unsigned short data[/*n*/], int n) {
for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0;
for (int i = 0; i < n; ++i) ++freq[data[i]];
}
static void writeUInt(char buf[4], unsigned int i) {
unsigned char *b = (unsigned char *)buf;
b[0] = i;
b[1] = i >> 8;
b[2] = i >> 16;
b[3] = i >> 24;
}
static unsigned int readUInt(const char buf[4]) {
const unsigned char *b = (const unsigned char *)buf;
return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) |
((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000);
}
//
// EXTERNAL INTERFACE
//
static int hufCompress(const unsigned short raw[], int nRaw,
char compressed[]) {
if (nRaw == 0) return 0;
std::vector<long long> freq(HUF_ENCSIZE);
countFrequencies(freq, raw, nRaw);
int im = 0;
int iM = 0;
hufBuildEncTable(freq.data(), &im, &iM);
char *tableStart = compressed + 20;
char *tableEnd = tableStart;
hufPackEncTable(freq.data(), im, iM, &tableEnd);
int tableLength = tableEnd - tableStart;
char *dataStart = tableEnd;
int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart);
int data_length = (nBits + 7) / 8;
writeUInt(compressed, im);
writeUInt(compressed + 4, iM);
writeUInt(compressed + 8, tableLength);
writeUInt(compressed + 12, nBits);
writeUInt(compressed + 16, 0); // room for future extensions
return dataStart + data_length - compressed;
}
static bool hufUncompress(const char compressed[], int nCompressed,
std::vector<unsigned short> *raw) {
if (nCompressed == 0) {
if (raw->size() != 0) return false;
return false;
}
int im = readUInt(compressed);
int iM = readUInt(compressed + 4);
// int tableLength = readUInt (compressed + 8);
int nBits = readUInt(compressed + 12);
if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false;
const char *ptr = compressed + 20;
//
// Fast decoder needs at least 2x64-bits of compressed data, and
// needs to be run-able on this platform. Otherwise, fall back
// to the original decoder
//
// if (FastHufDecoder::enabled() && nBits > 128)
//{
// FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM);
// fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw);
//}
// else
{
std::vector<long long> freq(HUF_ENCSIZE);
std::vector<HufDec> hdec(HUF_DECSIZE);
hufClearDecTable(&hdec.at(0));
hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM,
&freq.at(0));
{
if (nBits > 8 * (nCompressed - (ptr - compressed))) {
return false;
}
hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0));
hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(),
raw->data());
}
// catch (...)
//{
// hufFreeDecTable (hdec);
// throw;
//}
hufFreeDecTable(&hdec.at(0));
}
return true;
}
//
// Functions to compress the range of values in the pixel data
//
const int USHORT_RANGE = (1 << 16);
const int BITMAP_SIZE = (USHORT_RANGE >> 3);
static void bitmapFromData(const unsigned short data[/*nData*/], int nData,
unsigned char bitmap[BITMAP_SIZE],
unsigned short &minNonZero,
unsigned short &maxNonZero) {
for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0;
for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7));
bitmap[0] &= ~1; // zero is not explicitly stored in
// the bitmap; we assume that the
// data always contain zeroes
minNonZero = BITMAP_SIZE - 1;
maxNonZero = 0;
for (int i = 0; i < BITMAP_SIZE; ++i) {
if (bitmap[i]) {
if (minNonZero > i) minNonZero = i;
if (maxNonZero < i) maxNonZero = i;
}
}
}
static unsigned short forwardLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7))))
lut[i] = k++;
else
lut[i] = 0;
}
return k - 1; // maximum value stored in lut[],
} // i.e. number of ones in bitmap minus 1
static unsigned short reverseLutFromBitmap(
const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) {
int k = 0;
for (int i = 0; i < USHORT_RANGE; ++i) {
if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i;
}
int n = k - 1;
while (k < USHORT_RANGE) lut[k++] = 0;
return n; // maximum k where lut[k] is non-zero,
} // i.e. number of ones in bitmap minus 1
static void applyLut(const unsigned short lut[USHORT_RANGE],
unsigned short data[/*nData*/], int nData) {
for (int i = 0; i < nData; ++i) data[i] = lut[data[i]];
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif // __clang__
#ifdef _MSC_VER
#pragma warning(pop)
#endif
static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize,
const unsigned char *inPtr, size_t inSize,
const std::vector<ChannelInfo> &channelInfo,
int data_width, int num_lines) {
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
// Assume `inSize` is multiple of 2 or 4.
std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short));
std::vector<PIZChannelData> channelData(channelInfo.size());
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t c = 0; c < channelData.size(); c++) {
PIZChannelData &cd = channelData[c];
cd.start = tmpBufferEnd;
cd.end = cd.start;
cd.nx = data_width;
cd.ny = num_lines;
// cd.ys = c.channel().ySampling;
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
cd.size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += cd.nx * cd.ny * cd.size;
}
const unsigned char *ptr = inPtr;
for (int y = 0; y < num_lines; ++y) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(cd.end, ptr, n * sizeof(unsigned short));
ptr += n * sizeof(unsigned short);
cd.end += n;
}
}
bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()),
bitmap.data(), minNonZero, maxNonZero);
std::vector<unsigned short> lut(USHORT_RANGE);
unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data());
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()));
//
// Store range compression info in _outBuffer
//
char *buf = reinterpret_cast<char *>(outPtr);
memcpy(buf, &minNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
memcpy(buf, &maxNonZero, sizeof(unsigned short));
buf += sizeof(unsigned short);
if (minNonZero <= maxNonZero) {
memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero),
maxNonZero - minNonZero + 1);
buf += maxNonZero - minNonZero + 1;
}
//
// Apply wavelet encoding
//
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Apply Huffman encoding; append the result to _outBuffer
//
// length header(4byte), then huff data. Initialize length header with zero,
// then later fill it by `length`.
char *lengthPtr = buf;
int zero = 0;
memcpy(buf, &zero, sizeof(int));
buf += sizeof(int);
int length =
hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf);
memcpy(lengthPtr, &length, sizeof(int));
(*outSize) = static_cast<unsigned int>(
(reinterpret_cast<unsigned char *>(buf) - outPtr) +
static_cast<unsigned int>(length));
// Use uncompressed data when compressed data is larger than uncompressed.
// (Issue 40)
if ((*outSize) >= inSize) {
(*outSize) = static_cast<unsigned int>(inSize);
memcpy(outPtr, inPtr, inSize);
}
return true;
}
static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr,
size_t tmpBufSize, size_t inLen, int num_channels,
const EXRChannelInfo *channels, int data_width,
int num_lines) {
if (inLen == tmpBufSize) {
// Data is not compressed(Issue 40).
memcpy(outPtr, inPtr, inLen);
return true;
}
std::vector<unsigned char> bitmap(BITMAP_SIZE);
unsigned short minNonZero;
unsigned short maxNonZero;
#if !MINIZ_LITTLE_ENDIAN
// @todo { PIZ compression on BigEndian architecture. }
assert(0);
return false;
#endif
memset(bitmap.data(), 0, BITMAP_SIZE);
const unsigned char *ptr = inPtr;
// minNonZero = *(reinterpret_cast<const unsigned short *>(ptr));
tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr));
// maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2));
tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2));
ptr += 4;
if (maxNonZero >= BITMAP_SIZE) {
return false;
}
if (minNonZero <= maxNonZero) {
memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr,
maxNonZero - minNonZero + 1);
ptr += maxNonZero - minNonZero + 1;
}
std::vector<unsigned short> lut(USHORT_RANGE);
memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE);
unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data());
//
// Huffman decoding
//
int length;
// length = *(reinterpret_cast<const int *>(ptr));
tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr));
ptr += sizeof(int);
if (size_t((ptr - inPtr) + length) > inLen) {
return false;
}
std::vector<unsigned short> tmpBuffer(tmpBufSize);
hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer);
//
// Wavelet decoding
//
std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels));
unsigned short *tmpBufferEnd = &tmpBuffer.at(0);
for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) {
const EXRChannelInfo &chan = channels[i];
size_t pixelSize = sizeof(int); // UINT and FLOAT
if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) {
pixelSize = sizeof(short);
}
channelData[i].start = tmpBufferEnd;
channelData[i].end = channelData[i].start;
channelData[i].nx = data_width;
channelData[i].ny = num_lines;
// channelData[i].ys = 1;
channelData[i].size = static_cast<int>(pixelSize / sizeof(short));
tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size;
}
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
for (int j = 0; j < cd.size; ++j) {
wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size,
maxValue);
}
}
//
// Expand the pixel data to their original range
//
applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize));
for (int y = 0; y < num_lines; y++) {
for (size_t i = 0; i < channelData.size(); ++i) {
PIZChannelData &cd = channelData[i];
// if (modp (y, cd.ys) != 0)
// continue;
size_t n = static_cast<size_t>(cd.nx * cd.size);
memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short)));
outPtr += n * sizeof(unsigned short);
cd.end += n;
}
}
return true;
}
#endif // TINYEXR_USE_PIZ
#if TINYEXR_USE_ZFP
struct ZFPCompressionParam {
double rate;
unsigned int precision;
unsigned int __pad0;
double tolerance;
int type; // TINYEXR_ZFP_COMPRESSIONTYPE_*
unsigned int __pad1;
ZFPCompressionParam() {
type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE;
rate = 2.0;
precision = 0;
tolerance = 0.0;
}
};
static bool FindZFPCompressionParam(ZFPCompressionParam *param,
const EXRAttribute *attributes,
int num_attributes, std::string *err) {
bool foundType = false;
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionType") == 0)) {
if (attributes[i].size == 1) {
param->type = static_cast<int>(attributes[i].value[0]);
foundType = true;
break;
}
else {
if (err) {
(*err) +=
"zfpCompressionType attribute must be uchar(1 byte) type.\n";
}
return false;
}
}
}
if (!foundType) {
if (err) {
(*err) += "`zfpCompressionType` attribute not found.\n";
}
return false;
}
if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) &&
(attributes[i].size == 8)) {
param->rate = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionRate` attribute not found.\n";
}
}
else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) &&
(attributes[i].size == 4)) {
param->rate = *(reinterpret_cast<int *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionPrecision` attribute not found.\n";
}
}
else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
for (int i = 0; i < num_attributes; i++) {
if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) &&
(attributes[i].size == 8)) {
param->tolerance = *(reinterpret_cast<double *>(attributes[i].value));
return true;
}
}
if (err) {
(*err) += "`zfpCompressionTolerance` attribute not found.\n";
}
}
else {
if (err) {
(*err) += "Unknown value specified for `zfpCompressionType`.\n";
}
}
return false;
}
// Assume pixel format is FLOAT for all channels.
static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines,
size_t num_channels, const unsigned char *src,
unsigned long src_size,
const ZFPCompressionParam ¶m) {
size_t uncompressed_size =
size_t(dst_width) * size_t(dst_num_lines) * num_channels;
if (uncompressed_size == src_size) {
// Data is not compressed(Issue 40).
memcpy(dst, src, src_size);
}
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((dst_width % 4) == 0);
assert((dst_num_lines % 4) == 0);
if ((size_t(dst_width) & 3U) || (size_t(dst_num_lines) & 3U)) {
return false;
}
field =
zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)),
zfp_type_float, static_cast<unsigned int>(dst_width),
static_cast<unsigned int>(dst_num_lines) *
static_cast<unsigned int>(num_channels));
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimension */ 2,
/* write random access */ 0);
}
else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision);
}
else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance);
}
else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
std::vector<unsigned char> buf(buf_size);
memcpy(&buf.at(0), src, src_size);
bitstream *stream = stream_open(&buf.at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_stream_rewind(zfp);
size_t image_size = size_t(dst_width) * size_t(dst_num_lines);
for (size_t c = 0; c < size_t(num_channels); c++) {
// decompress 4x4 pixel block.
for (size_t y = 0; y < size_t(dst_num_lines); y += 4) {
for (size_t x = 0; x < size_t(dst_width); x += 4) {
float fblock[16];
zfp_decode_block_float_2(zfp, fblock);
for (size_t j = 0; j < 4; j++) {
for (size_t i = 0; i < 4; i++) {
dst[c * image_size + ((y + j) * size_t(dst_width) + (x + i))] =
fblock[j * 4 + i];
}
}
}
}
}
zfp_field_free(field);
zfp_stream_close(zfp);
stream_close(stream);
return true;
}
// Assume pixel format is FLOAT for all channels.
static bool CompressZfp(std::vector<unsigned char> *outBuf,
unsigned int *outSize, const float *inPtr, int width,
int num_lines, int num_channels,
const ZFPCompressionParam ¶m) {
zfp_stream *zfp = NULL;
zfp_field *field = NULL;
assert((width % 4) == 0);
assert((num_lines % 4) == 0);
if ((size_t(width) & 3U) || (size_t(num_lines) & 3U)) {
return false;
}
// create input array.
field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)),
zfp_type_float, static_cast<unsigned int>(width),
static_cast<unsigned int>(num_lines * num_channels));
zfp = zfp_stream_open(NULL);
if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) {
zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0);
}
else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) {
zfp_stream_set_precision(zfp, param.precision);
}
else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) {
zfp_stream_set_accuracy(zfp, param.tolerance);
}
else {
assert(0);
}
size_t buf_size = zfp_stream_maximum_size(zfp, field);
outBuf->resize(buf_size);
bitstream *stream = stream_open(&outBuf->at(0), buf_size);
zfp_stream_set_bit_stream(zfp, stream);
zfp_field_free(field);
size_t image_size = size_t(width) * size_t(num_lines);
for (size_t c = 0; c < size_t(num_channels); c++) {
// compress 4x4 pixel block.
for (size_t y = 0; y < size_t(num_lines); y += 4) {
for (size_t x = 0; x < size_t(width); x += 4) {
float fblock[16];
for (size_t j = 0; j < 4; j++) {
for (size_t i = 0; i < 4; i++) {
fblock[j * 4 + i] =
inPtr[c * image_size + ((y + j) * size_t(width) + (x + i))];
}
}
zfp_encode_block_float_2(zfp, fblock);
}
}
}
zfp_stream_flush(zfp);
(*outSize) = static_cast<unsigned int>(zfp_stream_compressed_size(zfp));
zfp_stream_close(zfp);
return true;
}
#endif
//
// -----------------------------------------------------------------
//
// heuristics
#define TINYEXR_DIMENSION_THRESHOLD (1024 * 8192)
// TODO(syoyo): Refactor function arguments.
static bool DecodePixelData(/* out */ unsigned char **out_images,
const int *requested_pixel_types,
const unsigned char *data_ptr, size_t data_len,
int compression_type, int line_order, int width,
int height, int x_stride, int y, int line_no,
int num_lines, size_t pixel_data_size,
size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ
#if TINYEXR_USE_PIZ
if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) {
// Invalid input #90
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(
static_cast<size_t>(width * num_lines) * pixel_data_size));
size_t tmpBufLen = outBuf.size();
bool ret = tinyexr::DecompressPiz(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen,
data_len, static_cast<int>(num_channels), channels, width, num_lines);
if (!ret) {
return false;
}
// For PIZ_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
FP16 hf;
// hf.u = line_ptr[u];
// use `cpy` to avoid unaligned memory access when compiler's
// optimization is on.
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
}
else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
}
else { // HALF -> FLOAT
FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
}
else {
offset = static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
}
else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
}
else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
}
else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(&outBuf.at(
v * pixel_data_size * static_cast<size_t>(x_stride) +
channel_offset_list[c] * static_cast<size_t>(x_stride)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
}
else {
image += static_cast<size_t>(
(height - 1 - (line_no + static_cast<int>(v)))) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
}
else {
assert(0);
}
}
#else
assert(0 && "PIZ is enabled in this build");
return false;
#endif
}
else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS ||
compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
assert(dstLen > 0);
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For ZIP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
}
else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
}
else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
size_t offset = 0;
if (line_order == 0) {
offset = (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
}
else {
offset = (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
image += offset;
*image = f32.f;
}
}
}
}
else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
}
else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
}
else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
}
else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
}
else {
assert(0);
return false;
}
}
}
else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = static_cast<unsigned long>(outBuf.size());
if (dstLen == 0) {
return false;
}
if (!tinyexr::DecompressRle(
reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr,
static_cast<unsigned long>(data_len))) {
return false;
}
// For RLE_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&outBuf.at(v * static_cast<size_t>(pixel_data_size) *
static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *image =
reinterpret_cast<unsigned short **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
}
else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = hf.u;
}
else { // HALF -> FLOAT
tinyexr::FP32 f32 = half_to_float(hf);
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
}
else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = f32.f;
}
}
}
}
else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const unsigned int *line_ptr = reinterpret_cast<unsigned int *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
unsigned int val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(&val);
unsigned int *image =
reinterpret_cast<unsigned int **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
}
else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
}
else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
// val = line_ptr[u];
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
}
else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
}
else {
assert(0);
return false;
}
}
}
else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
std::string e;
if (!tinyexr::FindZFPCompressionParam(&zfp_compression_param, attributes,
int(num_attributes), &e)) {
// This code path should not be reachable.
assert(0);
return false;
}
// Allocate original data size.
std::vector<unsigned char> outBuf(static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
pixel_data_size);
unsigned long dstLen = outBuf.size();
assert(dstLen > 0);
tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width,
num_lines, num_channels, data_ptr,
static_cast<unsigned long>(data_len),
zfp_compression_param);
// For ZFP_COMPRESSION:
// pixel sample data for channel 0 for scanline 0
// pixel sample data for channel 1 for scanline 0
// pixel sample data for channel ... for scanline 0
// pixel sample data for channel n for scanline 0
// pixel sample data for channel 0 for scanline 1
// pixel sample data for channel 1 for scanline 1
// pixel sample data for channel ... for scanline 1
// pixel sample data for channel n for scanline 1
// ...
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT);
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
const float *line_ptr = reinterpret_cast<float *>(
&outBuf.at(v * pixel_data_size * static_cast<size_t>(width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (size_t u = 0; u < static_cast<size_t>(width); u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
float *image = reinterpret_cast<float **>(out_images)[c];
if (line_order == 0) {
image += (static_cast<size_t>(line_no) + v) *
static_cast<size_t>(x_stride) +
u;
}
else {
image += (static_cast<size_t>(height) - 1U -
(static_cast<size_t>(line_no) + v)) *
static_cast<size_t>(x_stride) +
u;
}
*image = val;
}
}
}
else {
assert(0);
return false;
}
}
#else
(void)attributes;
(void)num_attributes;
(void)num_channels;
assert(0);
return false;
#endif
}
else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
for (size_t c = 0; c < num_channels; c++) {
for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
const unsigned short *line_ptr =
reinterpret_cast<const unsigned short *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
unsigned short *outLine =
reinterpret_cast<unsigned short *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
}
else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
outLine[u] = hf.u;
}
}
else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
}
else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
tinyexr::FP16 hf;
// address may not be aliged. use byte-wise copy for safety.#76
// hf.u = line_ptr[u];
tinyexr::cpy2(&(hf.u), line_ptr + u);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u));
tinyexr::FP32 f32 = half_to_float(hf);
outLine[u] = f32.f;
}
}
else {
assert(0);
return false;
}
}
else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
const float *line_ptr = reinterpret_cast<const float *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
float *outLine = reinterpret_cast<float *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
}
else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
if (reinterpret_cast<const unsigned char *>(line_ptr + width) >
(data_ptr + data_len)) {
// Insufficient data size
return false;
}
for (int u = 0; u < width; u++) {
float val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
}
else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>(
data_ptr + v * pixel_data_size * size_t(width) +
channel_offset_list[c] * static_cast<size_t>(width));
unsigned int *outLine =
reinterpret_cast<unsigned int *>(out_images[c]);
if (line_order == 0) {
outLine += (size_t(y) + v) * size_t(x_stride);
}
else {
outLine +=
(size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride);
}
for (int u = 0; u < width; u++) {
if (reinterpret_cast<const unsigned char *>(line_ptr + u) >=
(data_ptr + data_len)) {
// Corrupsed data?
return false;
}
unsigned int val;
tinyexr::cpy4(&val, line_ptr + u);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&val));
outLine[u] = val;
}
}
}
}
}
return true;
}
static bool DecodeTiledPixelData(
unsigned char **out_images, int *width, int *height,
const int *requested_pixel_types, const unsigned char *data_ptr,
size_t data_len, int compression_type, int line_order, int data_width,
int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x,
int tile_size_y, size_t pixel_data_size, size_t num_attributes,
const EXRAttribute *attributes, size_t num_channels,
const EXRChannelInfo *channels,
const std::vector<size_t> &channel_offset_list) {
// Here, data_width and data_height are the dimensions of the current (sub)level.
if (tile_size_x * tile_offset_x > data_width ||
tile_size_y * tile_offset_y > data_height) {
return false;
}
// Compute actual image size in a tile.
if ((tile_offset_x + 1) * tile_size_x >= data_width) {
(*width) = data_width - (tile_offset_x * tile_size_x);
}
else {
(*width) = tile_size_x;
}
if ((tile_offset_y + 1) * tile_size_y >= data_height) {
(*height) = data_height - (tile_offset_y * tile_size_y);
}
else {
(*height) = tile_size_y;
}
// Image size = tile size.
return DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len,
compression_type, line_order, (*width), tile_size_y,
/* stride */ tile_size_x, /* y */ 0, /* line_no */ 0,
(*height), pixel_data_size, num_attributes, attributes,
num_channels, channels, channel_offset_list);
}
static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list,
int *pixel_data_size, size_t *channel_offset,
int num_channels,
const EXRChannelInfo *channels) {
channel_offset_list->resize(static_cast<size_t>(num_channels));
(*pixel_data_size) = 0;
(*channel_offset) = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
(*channel_offset_list)[c] = (*channel_offset);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
(*pixel_data_size) += sizeof(unsigned short);
(*channel_offset) += sizeof(unsigned short);
}
else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
(*pixel_data_size) += sizeof(float);
(*channel_offset) += sizeof(float);
}
else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
(*pixel_data_size) += sizeof(unsigned int);
(*channel_offset) += sizeof(unsigned int);
}
else {
// ???
return false;
}
}
return true;
}
static unsigned char **AllocateImage(int num_channels,
const EXRChannelInfo *channels,
const int *requested_pixel_types,
int data_width, int data_height) {
unsigned char **images =
reinterpret_cast<unsigned char **>(static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(num_channels))));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
size_t data_len =
static_cast<size_t>(data_width) * static_cast<size_t>(data_height);
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
// pixel_data_size += sizeof(unsigned short);
// channel_offset += sizeof(unsigned short);
// Alloc internal image for half type.
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
images[c] =
reinterpret_cast<unsigned char *>(static_cast<unsigned short *>(
malloc(sizeof(unsigned short) * data_len)));
}
else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
}
else {
assert(0);
}
}
else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
// pixel_data_size += sizeof(float);
// channel_offset += sizeof(float);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<float *>(malloc(sizeof(float) * data_len)));
}
else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
// pixel_data_size += sizeof(unsigned int);
// channel_offset += sizeof(unsigned int);
images[c] = reinterpret_cast<unsigned char *>(
static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len)));
}
else {
assert(0);
}
}
return images;
}
#ifdef _WIN32
static inline std::wstring UTF8ToWchar(const std::string &str) {
int wstr_size =
MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), NULL, 0);
std::wstring wstr(wstr_size, 0);
MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), &wstr[0],
(int)wstr.size());
return wstr;
}
#endif
static int ParseEXRHeader(HeaderInfo *info, bool *empty_header,
const EXRVersion *version, std::string *err,
const unsigned char *buf, size_t size) {
const char *marker = reinterpret_cast<const char *>(&buf[0]);
if (empty_header) {
(*empty_header) = false;
}
if (version->multipart) {
if (size > 0 && marker[0] == '\0') {
// End of header list.
if (empty_header) {
(*empty_header) = true;
}
return TINYEXR_SUCCESS;
}
}
// According to the spec, the header of every OpenEXR file must contain at
// least the following attributes:
//
// channels chlist
// compression compression
// dataWindow box2i
// displayWindow box2i
// lineOrder lineOrder
// pixelAspectRatio float
// screenWindowCenter v2f
// screenWindowWidth float
bool has_channels = false;
bool has_compression = false;
bool has_data_window = false;
bool has_display_window = false;
bool has_line_order = false;
bool has_pixel_aspect_ratio = false;
bool has_screen_window_center = false;
bool has_screen_window_width = false;
bool has_name = false;
bool has_type = false;
info->name.clear();
info->type.clear();
info->data_window.min_x = 0;
info->data_window.min_y = 0;
info->data_window.max_x = 0;
info->data_window.max_y = 0;
info->line_order = 0; // @fixme
info->display_window.min_x = 0;
info->display_window.min_y = 0;
info->display_window.max_x = 0;
info->display_window.max_y = 0;
info->screen_window_center[0] = 0.0f;
info->screen_window_center[1] = 0.0f;
info->screen_window_width = -1.0f;
info->pixel_aspect_ratio = -1.0f;
info->tiled = 0;
info->tile_size_x = -1;
info->tile_size_y = -1;
info->tile_level_mode = -1;
info->tile_rounding_mode = -1;
info->attributes.clear();
// Read attributes
size_t orig_size = size;
for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) {
if (0 == size) {
if (err) {
(*err) += "Insufficient data size for attributes.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
else if (marker[0] == '\0') {
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
if (err) {
(*err) += "Failed to read attribute.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
// For a multipart file, the version field 9th bit is 0.
if ((version->tiled || version->multipart || version->non_image) && attr_name.compare("tiles") == 0) {
unsigned int x_size, y_size;
unsigned char tile_mode;
assert(data.size() == 9);
memcpy(&x_size, &data.at(0), sizeof(int));
memcpy(&y_size, &data.at(4), sizeof(int));
tile_mode = data[8];
tinyexr::swap4(&x_size);
tinyexr::swap4(&y_size);
if (x_size > static_cast<unsigned int>(std::numeric_limits<int>::max()) ||
y_size > static_cast<unsigned int>(std::numeric_limits<int>::max())) {
if (err) {
(*err) = "Tile sizes were invalid.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->tile_size_x = static_cast<int>(x_size);
info->tile_size_y = static_cast<int>(y_size);
// mode = levelMode + roundingMode * 16
info->tile_level_mode = tile_mode & 0x3;
info->tile_rounding_mode = (tile_mode >> 4) & 0x1;
info->tiled = 1;
}
else if (attr_name.compare("compression") == 0) {
bool ok = false;
if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) {
ok = true;
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
ok = true;
#else
if (err) {
(*err) = "PIZ compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
ok = true;
#else
if (err) {
(*err) = "ZFP compression is not supported.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
#endif
}
if (!ok) {
if (err) {
(*err) = "Unknown compression type.";
}
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
info->compression_type = static_cast<int>(data[0]);
has_compression = true;
}
else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!ReadChannelInfo(info->channels, data)) {
if (err) {
(*err) += "Failed to parse channel info.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (info->channels.size() < 1) {
if (err) {
(*err) += "# of channels is zero.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
has_channels = true;
}
else if (attr_name.compare("dataWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->data_window.min_x, &data.at(0), sizeof(int));
memcpy(&info->data_window.min_y, &data.at(4), sizeof(int));
memcpy(&info->data_window.max_x, &data.at(8), sizeof(int));
memcpy(&info->data_window.max_y, &data.at(12), sizeof(int));
tinyexr::swap4(&info->data_window.min_x);
tinyexr::swap4(&info->data_window.min_y);
tinyexr::swap4(&info->data_window.max_x);
tinyexr::swap4(&info->data_window.max_y);
has_data_window = true;
}
}
else if (attr_name.compare("displayWindow") == 0) {
if (data.size() >= 16) {
memcpy(&info->display_window.min_x, &data.at(0), sizeof(int));
memcpy(&info->display_window.min_y, &data.at(4), sizeof(int));
memcpy(&info->display_window.max_x, &data.at(8), sizeof(int));
memcpy(&info->display_window.max_y, &data.at(12), sizeof(int));
tinyexr::swap4(&info->display_window.min_x);
tinyexr::swap4(&info->display_window.min_y);
tinyexr::swap4(&info->display_window.max_x);
tinyexr::swap4(&info->display_window.max_y);
has_display_window = true;
}
}
else if (attr_name.compare("lineOrder") == 0) {
if (data.size() >= 1) {
info->line_order = static_cast<int>(data[0]);
has_line_order = true;
}
}
else if (attr_name.compare("pixelAspectRatio") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float));
tinyexr::swap4(&info->pixel_aspect_ratio);
has_pixel_aspect_ratio = true;
}
}
else if (attr_name.compare("screenWindowCenter") == 0) {
if (data.size() >= 8) {
memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float));
memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float));
tinyexr::swap4(&info->screen_window_center[0]);
tinyexr::swap4(&info->screen_window_center[1]);
has_screen_window_center = true;
}
}
else if (attr_name.compare("screenWindowWidth") == 0) {
if (data.size() >= sizeof(float)) {
memcpy(&info->screen_window_width, &data.at(0), sizeof(float));
tinyexr::swap4(&info->screen_window_width);
has_screen_window_width = true;
}
}
else if (attr_name.compare("chunkCount") == 0) {
if (data.size() >= sizeof(int)) {
memcpy(&info->chunk_count, &data.at(0), sizeof(int));
tinyexr::swap4(&info->chunk_count);
}
}
else if (attr_name.compare("name") == 0) {
if (!data.empty() && data[0]) {
data.push_back(0);
size_t len = strlen(reinterpret_cast<const char*>(&data[0]));
info->name.resize(len);
info->name.assign(reinterpret_cast<const char*>(&data[0]), len);
has_name = true;
}
}
else if (attr_name.compare("type") == 0) {
if (!data.empty() && data[0]) {
data.push_back(0);
size_t len = strlen(reinterpret_cast<const char*>(&data[0]));
info->type.resize(len);
info->type.assign(reinterpret_cast<const char*>(&data[0]), len);
has_type = true;
}
}
else {
// Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES)
if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
EXRAttribute attrib;
#ifdef _MSC_VER
strncpy_s(attrib.name, attr_name.c_str(), 255);
strncpy_s(attrib.type, attr_type.c_str(), 255);
#else
strncpy(attrib.name, attr_name.c_str(), 255);
strncpy(attrib.type, attr_type.c_str(), 255);
#endif
attrib.name[255] = '\0';
attrib.type[255] = '\0';
attrib.size = static_cast<int>(data.size());
attrib.value = static_cast<unsigned char *>(malloc(data.size()));
memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0),
data.size());
info->attributes.push_back(attrib);
}
}
}
// Check if required attributes exist
{
std::stringstream ss_err;
if (!has_compression) {
ss_err << "\"compression\" attribute not found in the header."
<< std::endl;
}
if (!has_channels) {
ss_err << "\"channels\" attribute not found in the header." << std::endl;
}
if (!has_line_order) {
ss_err << "\"lineOrder\" attribute not found in the header." << std::endl;
}
if (!has_display_window) {
ss_err << "\"displayWindow\" attribute not found in the header."
<< std::endl;
}
if (!has_data_window) {
ss_err << "\"dataWindow\" attribute not found in the header or invalid."
<< std::endl;
}
if (!has_pixel_aspect_ratio) {
ss_err << "\"pixelAspectRatio\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_width) {
ss_err << "\"screenWindowWidth\" attribute not found in the header."
<< std::endl;
}
if (!has_screen_window_center) {
ss_err << "\"screenWindowCenter\" attribute not found in the header."
<< std::endl;
}
if (version->multipart || version->non_image) {
if (!has_name) {
ss_err << "\"name\" attribute not found in the header."
<< std::endl;
}
if (!has_type) {
ss_err << "\"type\" attribute not found in the header."
<< std::endl;
}
}
if (!(ss_err.str().empty())) {
if (err) {
(*err) += ss_err.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
}
info->header_len = static_cast<unsigned int>(orig_size - size);
return TINYEXR_SUCCESS;
}
// C++ HeaderInfo to C EXRHeader conversion.
static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) {
exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio;
exr_header->screen_window_center[0] = info.screen_window_center[0];
exr_header->screen_window_center[1] = info.screen_window_center[1];
exr_header->screen_window_width = info.screen_window_width;
exr_header->chunk_count = info.chunk_count;
exr_header->display_window.min_x = info.display_window.min_x;
exr_header->display_window.min_y = info.display_window.min_y;
exr_header->display_window.max_x = info.display_window.max_x;
exr_header->display_window.max_y = info.display_window.max_y;
exr_header->data_window.min_x = info.data_window.min_x;
exr_header->data_window.min_y = info.data_window.min_y;
exr_header->data_window.max_x = info.data_window.max_x;
exr_header->data_window.max_y = info.data_window.max_y;
exr_header->line_order = info.line_order;
exr_header->compression_type = info.compression_type;
exr_header->tiled = info.tiled;
exr_header->tile_size_x = info.tile_size_x;
exr_header->tile_size_y = info.tile_size_y;
exr_header->tile_level_mode = info.tile_level_mode;
exr_header->tile_rounding_mode = info.tile_rounding_mode;
EXRSetNameAttr(exr_header, info.name.c_str());
if (!info.type.empty()) {
if (info.type == "scanlineimage") {
assert(!exr_header->tiled);
}
else if (info.type == "tiledimage") {
assert(exr_header->tiled);
}
else if (info.type == "deeptile") {
exr_header->non_image = 1;
assert(exr_header->tiled);
}
else if (info.type == "deepscanline") {
exr_header->non_image = 1;
assert(!exr_header->tiled);
}
else {
assert(false);
}
}
exr_header->num_channels = static_cast<int>(info.channels.size());
exr_header->channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
#ifdef _MSC_VER
strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#else
strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255);
#endif
// manually add '\0' for safety.
exr_header->channels[c].name[255] = '\0';
exr_header->channels[c].pixel_type = info.channels[c].pixel_type;
exr_header->channels[c].p_linear = info.channels[c].p_linear;
exr_header->channels[c].x_sampling = info.channels[c].x_sampling;
exr_header->channels[c].y_sampling = info.channels[c].y_sampling;
}
exr_header->pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->pixel_types[c] = info.channels[c].pixel_type;
}
// Initially fill with values of `pixel_types`
exr_header->requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels)));
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
exr_header->requested_pixel_types[c] = info.channels[c].pixel_type;
}
exr_header->num_custom_attributes = static_cast<int>(info.attributes.size());
if (exr_header->num_custom_attributes > 0) {
// TODO(syoyo): Report warning when # of attributes exceeds
// `TINYEXR_MAX_CUSTOM_ATTRIBUTES`
if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) {
exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES;
}
exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc(
sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes)));
for (size_t i = 0; i < info.attributes.size(); i++) {
memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name,
256);
memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type,
256);
exr_header->custom_attributes[i].size = info.attributes[i].size;
// Just copy pointer
exr_header->custom_attributes[i].value = info.attributes[i].value;
}
}
else {
exr_header->custom_attributes = NULL;
}
exr_header->header_len = info.header_len;
}
struct OffsetData {
OffsetData() : num_x_levels(0), num_y_levels(0) {}
std::vector<std::vector<std::vector <tinyexr::tinyexr_uint64> > > offsets;
int num_x_levels;
int num_y_levels;
};
int LevelIndex(int lx, int ly, int tile_level_mode, int num_x_levels) {
switch (tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
return 0;
case TINYEXR_TILE_MIPMAP_LEVELS:
return lx;
case TINYEXR_TILE_RIPMAP_LEVELS:
return lx + ly * num_x_levels;
default:
assert(false);
}
return 0;
}
static int LevelSize(int toplevel_size, int level, int tile_rounding_mode) {
assert(level >= 0);
int b = (int)(1u << (unsigned)level);
int level_size = toplevel_size / b;
if (tile_rounding_mode == TINYEXR_TILE_ROUND_UP && level_size * b < toplevel_size)
level_size += 1;
return std::max(level_size, 1);
}
static int DecodeTiledLevel(EXRImage* exr_image, const EXRHeader* exr_header,
const OffsetData& offset_data,
const std::vector<size_t>& channel_offset_list,
int pixel_data_size,
const unsigned char* head, const size_t size,
std::string* err) {
int num_channels = exr_header->num_channels;
int level_index = LevelIndex(exr_image->level_x, exr_image->level_y, exr_header->tile_level_mode, offset_data.num_x_levels);
int num_y_tiles = (int)offset_data.offsets[level_index].size();
assert(num_y_tiles);
int num_x_tiles = (int)offset_data.offsets[level_index][0].size();
assert(num_x_tiles);
int num_tiles = num_x_tiles * num_y_tiles;
int err_code = TINYEXR_SUCCESS;
enum {
EF_SUCCESS = 0,
EF_INVALID_DATA = 1,
EF_INSUFFICIENT_DATA = 2,
EF_FAILED_TO_DECODE = 4
};
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<unsigned> error_flag(EF_SUCCESS);
#else
unsigned error_flag(EF_SUCCESS);
#endif
// Although the spec says : "...the data window is subdivided into an array of smaller rectangles...",
// the IlmImf library allows the dimensions of the tile to be larger (or equal) than the dimensions of the data window.
#if 0
if ((exr_header->tile_size_x > exr_image->width || exr_header->tile_size_y > exr_image->height) &&
exr_image->level_x == 0 && exr_image->level_y == 0) {
if (err) {
(*err) += "Failed to decode tile data.\n";
}
err_code = TINYEXR_ERROR_INVALID_DATA;
}
#endif
exr_image->tiles = static_cast<EXRTile*>(
calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles)));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> tile_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_tiles)) {
num_threads = int(num_tiles);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]()
{
int tile_idx = 0;
while ((tile_idx = tile_count++) < num_tiles) {
#else
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int tile_idx = 0; tile_idx < num_tiles; tile_idx++) {
#endif
// Allocate memory for each tile.
exr_image->tiles[tile_idx].images = tinyexr::AllocateImage(
num_channels, exr_header->channels,
exr_header->requested_pixel_types, exr_header->tile_size_x,
exr_header->tile_size_y);
int x_tile = tile_idx % num_x_tiles;
int y_tile = tile_idx / num_x_tiles;
// 16 byte: tile coordinates
// 4 byte : data size
// ~ : data(uncompressed or compressed)
tinyexr::tinyexr_uint64 offset = offset_data.offsets[level_index][y_tile][x_tile];
if (offset + sizeof(int) * 5 > size) {
// Insufficient data size.
error_flag |= EF_INSUFFICIENT_DATA;
continue;
}
size_t data_size =
size_t(size - (offset + sizeof(int) * 5));
const unsigned char* data_ptr =
reinterpret_cast<const unsigned char*>(head + offset);
int tile_coordinates[4];
memcpy(tile_coordinates, data_ptr, sizeof(int) * 4);
tinyexr::swap4(&tile_coordinates[0]);
tinyexr::swap4(&tile_coordinates[1]);
tinyexr::swap4(&tile_coordinates[2]);
tinyexr::swap4(&tile_coordinates[3]);
if (tile_coordinates[2] != exr_image->level_x) {
// Invalid data.
error_flag |= EF_INVALID_DATA;
continue;
}
if (tile_coordinates[3] != exr_image->level_y) {
// Invalid data.
error_flag |= EF_INVALID_DATA;
continue;
}
int data_len;
memcpy(&data_len, data_ptr + 16,
sizeof(int)); // 16 = sizeof(tile_coordinates)
tinyexr::swap4(&data_len);
if (data_len < 2 || size_t(data_len) > data_size) {
// Insufficient data size.
error_flag |= EF_INSUFFICIENT_DATA;
continue;
}
// Move to data addr: 20 = 16 + 4;
data_ptr += 20;
bool ret = tinyexr::DecodeTiledPixelData(
exr_image->tiles[tile_idx].images,
&(exr_image->tiles[tile_idx].width),
&(exr_image->tiles[tile_idx].height),
exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order,
exr_image->width, exr_image->height,
tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x,
exr_header->tile_size_y, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list);
if (!ret) {
// Failed to decode tile data.
error_flag |= EF_FAILED_TO_DECODE;
}
exr_image->tiles[tile_idx].offset_x = tile_coordinates[0];
exr_image->tiles[tile_idx].offset_y = tile_coordinates[1];
exr_image->tiles[tile_idx].level_x = tile_coordinates[2];
exr_image->tiles[tile_idx].level_y = tile_coordinates[3];
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
} // num_thread loop
for (auto& t : workers) {
t.join();
}
#else
} // parallel for
#endif
// Even in the event of an error, the reserved memory may be freed.
exr_image->num_channels = num_channels;
exr_image->num_tiles = static_cast<int>(num_tiles);
if (error_flag) err_code = TINYEXR_ERROR_INVALID_DATA;
if (err) {
if (error_flag & EF_INSUFFICIENT_DATA) {
(*err) += "Insufficient data length.\n";
}
if (error_flag & EF_FAILED_TO_DECODE) {
(*err) += "Failed to decode tile data.\n";
}
}
return err_code;
}
static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header,
const OffsetData& offset_data,
const unsigned char *head, const size_t size,
std::string *err) {
int num_channels = exr_header->num_channels;
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
}
else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
}
else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
if (!FindZFPCompressionParam(&zfp_compression_param,
exr_header->custom_attributes,
int(exr_header->num_custom_attributes), err)) {
return TINYEXR_ERROR_INVALID_HEADER;
}
#endif
}
if (exr_header->data_window.max_x < exr_header->data_window.min_x ||
exr_header->data_window.max_y < exr_header->data_window.min_y) {
if (err) {
(*err) += "Invalid data window.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int data_width =
exr_header->data_window.max_x - exr_header->data_window.min_x + 1;
int data_height =
exr_header->data_window.max_y - exr_header->data_window.min_y + 1;
// Do not allow too large data_width and data_height. header invalid?
{
if ((data_width > TINYEXR_DIMENSION_THRESHOLD) || (data_height > TINYEXR_DIMENSION_THRESHOLD)) {
if (err) {
std::stringstream ss;
ss << "data_with or data_height too large. data_width: " << data_width
<< ", "
<< "data_height = " << data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
if (exr_header->tiled) {
if ((exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) || (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD)) {
if (err) {
std::stringstream ss;
ss << "tile with or tile height too large. tile width: " << exr_header->tile_size_x
<< ", "
<< "tile height = " << exr_header->tile_size_y << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
const std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
size_t num_blocks = offsets.size();
std::vector<size_t> channel_offset_list;
int pixel_data_size = 0;
size_t channel_offset = 0;
if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size,
&channel_offset, num_channels,
exr_header->channels)) {
if (err) {
(*err) += "Failed to compute channel layout.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
#else
bool invalid_data(false);
#endif
if (exr_header->tiled) {
// value check
if (exr_header->tile_size_x < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_size_y < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) {
EXRImage* level_image = NULL;
for (int level = 0; level < offset_data.num_x_levels; ++level) {
if (!level_image) {
level_image = exr_image;
}
else {
level_image->next_level = new EXRImage;
InitEXRImage(level_image->next_level);
level_image = level_image->next_level;
}
level_image->width =
LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level, exr_header->tile_rounding_mode);
level_image->height =
LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level, exr_header->tile_rounding_mode);
level_image->level_x = level;
level_image->level_y = level;
int ret = DecodeTiledLevel(level_image, exr_header,
offset_data,
channel_offset_list,
pixel_data_size,
head, size,
err);
if (ret != TINYEXR_SUCCESS) return ret;
}
}
else {
EXRImage* level_image = NULL;
for (int level_y = 0; level_y < offset_data.num_y_levels; ++level_y)
for (int level_x = 0; level_x < offset_data.num_x_levels; ++level_x) {
if (!level_image) {
level_image = exr_image;
}
else {
level_image->next_level = new EXRImage;
InitEXRImage(level_image->next_level);
level_image = level_image->next_level;
}
level_image->width =
LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level_x, exr_header->tile_rounding_mode);
level_image->height =
LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level_y, exr_header->tile_rounding_mode);
level_image->level_x = level_x;
level_image->level_y = level_y;
int ret = DecodeTiledLevel(level_image, exr_header,
offset_data,
channel_offset_list,
pixel_data_size,
head, size,
err);
if (ret != TINYEXR_SUCCESS) return ret;
}
}
}
else { // scanline format
// Don't allow too large image(256GB * pixel_data_size or more). Workaround
// for #104.
size_t total_data_len =
size_t(data_width) * size_t(data_height) * size_t(num_channels);
const bool total_data_len_overflown =
sizeof(void *) == 8 ? (total_data_len >= 0x4000000000) : false;
if ((total_data_len == 0) || total_data_len_overflown) {
if (err) {
std::stringstream ss;
ss << "Image data size is zero or too large: width = " << data_width
<< ", height = " << data_height << ", channels = " << num_channels
<< std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
exr_image->images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> y_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_blocks)) {
num_threads = int(num_blocks);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int y = 0;
while ((y = y_count++) < int(num_blocks)) {
#else
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int y = 0; y < static_cast<int>(num_blocks); y++) {
#endif
size_t y_idx = static_cast<size_t>(y);
if (offsets[y_idx] + sizeof(int) * 2 > size) {
invalid_data = true;
}
else {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed or compressed)
size_t data_size =
size_t(size - (offsets[y_idx] + sizeof(int) * 2));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y_idx]);
int line_no;
memcpy(&line_no, data_ptr, sizeof(int));
int data_len;
memcpy(&data_len, data_ptr + 4, sizeof(int));
tinyexr::swap4(&line_no);
tinyexr::swap4(&data_len);
if (size_t(data_len) > data_size) {
invalid_data = true;
}
else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) {
// Too large value. Assume this is invalid
// 2**20 = 1048576 = heuristic value.
invalid_data = true;
}
else if (data_len == 0) {
// TODO(syoyo): May be ok to raise the threshold for example
// `data_len < 4`
invalid_data = true;
}
else {
// line_no may be negative.
int end_line_no = (std::min)(line_no + num_scanline_blocks,
(exr_header->data_window.max_y + 1));
int num_lines = end_line_no - line_no;
if (num_lines <= 0) {
invalid_data = true;
}
else {
// Move to data addr: 8 = 4 + 4;
data_ptr += 8;
// Adjust line_no with data_window.bmin.y
// overflow check
tinyexr_int64 lno =
static_cast<tinyexr_int64>(line_no) -
static_cast<tinyexr_int64>(exr_header->data_window.min_y);
if (lno > std::numeric_limits<int>::max()) {
line_no = -1; // invalid
}
else if (lno < -std::numeric_limits<int>::max()) {
line_no = -1; // invalid
}
else {
line_no -= exr_header->data_window.min_y;
}
if (line_no < 0) {
invalid_data = true;
}
else {
if (!tinyexr::DecodePixelData(
exr_image->images, exr_header->requested_pixel_types,
data_ptr, static_cast<size_t>(data_len),
exr_header->compression_type, exr_header->line_order,
data_width, data_height, data_width, y, line_no,
num_lines, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(
exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list)) {
invalid_data = true;
}
}
}
}
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
}
if (invalid_data) {
if (err) {
std::stringstream ss;
(*err) += "Invalid data found when decoding pixels.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Overwrite `pixel_type` with `requested_pixel_type`.
{
for (int c = 0; c < exr_header->num_channels; c++) {
exr_header->pixel_types[c] = exr_header->requested_pixel_types[c];
}
}
{
exr_image->num_channels = num_channels;
exr_image->width = data_width;
exr_image->height = data_height;
}
return TINYEXR_SUCCESS;
}
static bool ReconstructLineOffsets(
std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n,
const unsigned char *head, const unsigned char *marker, const size_t size) {
assert(head < marker);
assert(offsets->size() == n);
for (size_t i = 0; i < n; i++) {
size_t offset = static_cast<size_t>(marker - head);
// Offset should not exceed whole EXR file/data size.
if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) {
return false;
}
int y;
unsigned int data_len;
memcpy(&y, marker, sizeof(int));
memcpy(&data_len, marker + 4, sizeof(unsigned int));
if (data_len >= size) {
return false;
}
tinyexr::swap4(&y);
tinyexr::swap4(&data_len);
(*offsets)[i] = offset;
marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len)
}
return true;
}
static int FloorLog2(unsigned x) {
//
// For x > 0, floorLog2(y) returns floor(log(x)/log(2)).
//
int y = 0;
while (x > 1) {
y += 1;
x >>= 1u;
}
return y;
}
static int CeilLog2(unsigned x) {
//
// For x > 0, ceilLog2(y) returns ceil(log(x)/log(2)).
//
int y = 0;
int r = 0;
while (x > 1) {
if (x & 1)
r = 1;
y += 1;
x >>= 1u;
}
return y + r;
}
static int RoundLog2(int x, int tile_rounding_mode) {
return (tile_rounding_mode == TINYEXR_TILE_ROUND_DOWN) ? FloorLog2(static_cast<unsigned>(x)) : CeilLog2(static_cast<unsigned>(x));
}
static int CalculateNumXLevels(const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num = 0;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
num = 1;
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
int h = max_y - min_y + 1;
num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
num = RoundLog2(w, exr_header->tile_rounding_mode) + 1;
}
break;
default:
assert(false);
}
return num;
}
static int CalculateNumYLevels(const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num = 0;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
num = 1;
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
{
int w = max_x - min_x + 1;
int h = max_y - min_y + 1;
num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
int h = max_y - min_y + 1;
num = RoundLog2(h, exr_header->tile_rounding_mode) + 1;
}
break;
default:
assert(false);
}
return num;
}
static void CalculateNumTiles(std::vector<int>& numTiles,
int toplevel_size,
int size,
int tile_rounding_mode) {
for (unsigned i = 0; i < numTiles.size(); i++) {
int l = LevelSize(toplevel_size, i, tile_rounding_mode);
assert(l <= std::numeric_limits<int>::max() - size + 1);
numTiles[i] = (l + size - 1) / size;
}
}
static void PrecalculateTileInfo(std::vector<int>& num_x_tiles,
std::vector<int>& num_y_tiles,
const EXRHeader* exr_header) {
int min_x = exr_header->data_window.min_x;
int max_x = exr_header->data_window.max_x;
int min_y = exr_header->data_window.min_y;
int max_y = exr_header->data_window.max_y;
int num_x_levels = CalculateNumXLevels(exr_header);
int num_y_levels = CalculateNumYLevels(exr_header);
num_x_tiles.resize(num_x_levels);
num_y_tiles.resize(num_y_levels);
CalculateNumTiles(num_x_tiles,
max_x - min_x + 1,
exr_header->tile_size_x,
exr_header->tile_rounding_mode);
CalculateNumTiles(num_y_tiles,
max_y - min_y + 1,
exr_header->tile_size_y,
exr_header->tile_rounding_mode);
}
static void InitSingleResolutionOffsets(OffsetData& offset_data, size_t num_blocks) {
offset_data.offsets.resize(1);
offset_data.offsets[0].resize(1);
offset_data.offsets[0][0].resize(num_blocks);
offset_data.num_x_levels = 1;
offset_data.num_y_levels = 1;
}
// Return sum of tile blocks.
static int InitTileOffsets(OffsetData& offset_data,
const EXRHeader* exr_header,
const std::vector<int>& num_x_tiles,
const std::vector<int>& num_y_tiles) {
int num_tile_blocks = 0;
offset_data.num_x_levels = static_cast<int>(num_x_tiles.size());
offset_data.num_y_levels = static_cast<int>(num_y_tiles.size());
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
case TINYEXR_TILE_MIPMAP_LEVELS:
assert(offset_data.num_x_levels == offset_data.num_y_levels);
offset_data.offsets.resize(offset_data.num_x_levels);
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
offset_data.offsets[l].resize(num_y_tiles[l]);
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
offset_data.offsets[l][dy].resize(num_x_tiles[l]);
num_tile_blocks += num_x_tiles[l];
}
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
offset_data.offsets.resize(static_cast<size_t>(offset_data.num_x_levels) * static_cast<size_t>(offset_data.num_y_levels));
for (int ly = 0; ly < offset_data.num_y_levels; ++ly) {
for (int lx = 0; lx < offset_data.num_x_levels; ++lx) {
int l = ly * offset_data.num_x_levels + lx;
offset_data.offsets[l].resize(num_y_tiles[ly]);
for (size_t dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
offset_data.offsets[l][dy].resize(num_x_tiles[lx]);
num_tile_blocks += num_x_tiles[lx];
}
}
}
break;
default:
assert(false);
}
return num_tile_blocks;
}
static bool IsAnyOffsetsAreInvalid(const OffsetData& offset_data) {
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l)
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy)
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx)
if (reinterpret_cast<const tinyexr::tinyexr_int64&>(offset_data.offsets[l][dy][dx]) <= 0)
return true;
return false;
}
static bool isValidTile(const EXRHeader* exr_header,
const OffsetData& offset_data,
int dx, int dy, int lx, int ly) {
if (lx < 0 || ly < 0 || dx < 0 || dy < 0) return false;
int num_x_levels = offset_data.num_x_levels;
int num_y_levels = offset_data.num_y_levels;
switch (exr_header->tile_level_mode) {
case TINYEXR_TILE_ONE_LEVEL:
if (lx == 0 &&
ly == 0 &&
offset_data.offsets.size() > 0 &&
offset_data.offsets[0].size() > static_cast<size_t>(dy) &&
offset_data.offsets[0][dy].size() > static_cast<size_t>(dx)) {
return true;
}
break;
case TINYEXR_TILE_MIPMAP_LEVELS:
if (lx < num_x_levels &&
ly < num_y_levels &&
offset_data.offsets.size() > static_cast<size_t>(lx) &&
offset_data.offsets[lx].size() > static_cast<size_t>(dy) &&
offset_data.offsets[lx][dy].size() > static_cast<size_t>(dx)) {
return true;
}
break;
case TINYEXR_TILE_RIPMAP_LEVELS:
{
size_t idx = static_cast<size_t>(lx) + static_cast<size_t>(ly)* static_cast<size_t>(num_x_levels);
if (lx < num_x_levels &&
ly < num_y_levels &&
(offset_data.offsets.size() > idx) &&
offset_data.offsets[idx].size() > static_cast<size_t>(dy) &&
offset_data.offsets[idx][dy].size() > static_cast<size_t>(dx)) {
return true;
}
}
break;
default:
return false;
}
return false;
}
static void ReconstructTileOffsets(OffsetData& offset_data,
const EXRHeader* exr_header,
const unsigned char* head, const unsigned char* marker, const size_t size,
bool isMultiPartFile,
bool isDeep) {
int numXLevels = offset_data.num_x_levels;
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 tileOffset = marker - head;
if (isMultiPartFile) {
//int partNumber;
marker += sizeof(int);
}
int tileX;
memcpy(&tileX, marker, sizeof(int));
tinyexr::swap4(&tileX);
marker += sizeof(int);
int tileY;
memcpy(&tileY, marker, sizeof(int));
tinyexr::swap4(&tileY);
marker += sizeof(int);
int levelX;
memcpy(&levelX, marker, sizeof(int));
tinyexr::swap4(&levelX);
marker += sizeof(int);
int levelY;
memcpy(&levelY, marker, sizeof(int));
tinyexr::swap4(&levelY);
marker += sizeof(int);
if (isDeep) {
tinyexr::tinyexr_int64 packed_offset_table_size;
memcpy(&packed_offset_table_size, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_offset_table_size));
marker += sizeof(tinyexr::tinyexr_int64);
tinyexr::tinyexr_int64 packed_sample_size;
memcpy(&packed_sample_size, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_sample_size));
marker += sizeof(tinyexr::tinyexr_int64);
// next Int64 is unpacked sample size - skip that too
marker += packed_offset_table_size + packed_sample_size + 8;
}
else {
int dataSize;
memcpy(&dataSize, marker, sizeof(int));
tinyexr::swap4(&dataSize);
marker += sizeof(int);
marker += dataSize;
}
if (!isValidTile(exr_header, offset_data,
tileX, tileY, levelX, levelY))
return;
int level_idx = LevelIndex(levelX, levelY, exr_header->tile_level_mode, numXLevels);
offset_data.offsets[level_idx][tileY][tileX] = tileOffset;
}
}
}
}
// marker output is also
static int ReadOffsets(OffsetData& offset_data,
const unsigned char* head,
const unsigned char*& marker,
const size_t size,
const char** err) {
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 offset;
if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offset_data.offsets[l][dy][dx] = offset;
}
}
}
return TINYEXR_SUCCESS;
}
static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *head,
const unsigned char *marker, const size_t size,
const char **err) {
if (exr_image == NULL || exr_header == NULL || head == NULL ||
marker == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
}
else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
}
else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
if (exr_header->data_window.max_x < exr_header->data_window.min_x ||
exr_header->data_window.max_x - exr_header->data_window.min_x ==
std::numeric_limits<int>::max()) {
// Issue 63
tinyexr::SetErrorMessage("Invalid data width value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
int data_width =
exr_header->data_window.max_x - exr_header->data_window.min_x + 1;
if (exr_header->data_window.max_y < exr_header->data_window.min_y ||
exr_header->data_window.max_y - exr_header->data_window.min_y ==
std::numeric_limits<int>::max()) {
tinyexr::SetErrorMessage("Invalid data height value", err);
return TINYEXR_ERROR_INVALID_DATA;
}
int data_height =
exr_header->data_window.max_y - exr_header->data_window.min_y + 1;
// Do not allow too large data_width and data_height. header invalid?
{
if (data_width > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("data width too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
if (data_height > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("data height too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
if (exr_header->tiled) {
if (exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("tile width too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
if (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD) {
tinyexr::SetErrorMessage("tile height too large.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
// Read offset tables.
OffsetData offset_data;
size_t num_blocks = 0;
// For a multi-resolution image, the size of the offset table will be calculated from the other attributes of the header.
// If chunk_count > 0 then chunk_count must be equal to the calculated tile count.
if (exr_header->tiled) {
{
std::vector<int> num_x_tiles, num_y_tiles;
PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_header);
num_blocks = InitTileOffsets(offset_data, exr_header, num_x_tiles, num_y_tiles);
if (exr_header->chunk_count > 0) {
if (exr_header->chunk_count != num_blocks) {
tinyexr::SetErrorMessage("Invalid offset table size.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
int ret = ReadOffsets(offset_data, head, marker, size, err);
if (ret != TINYEXR_SUCCESS) return ret;
if (IsAnyOffsetsAreInvalid(offset_data)) {
ReconstructTileOffsets(offset_data, exr_header,
head, marker, size,
exr_header->multipart, exr_header->non_image);
}
}
else if (exr_header->chunk_count > 0) {
// Use `chunkCount` attribute.
num_blocks = static_cast<size_t>(exr_header->chunk_count);
InitSingleResolutionOffsets(offset_data, num_blocks);
}
else {
num_blocks = static_cast<size_t>(data_height) /
static_cast<size_t>(num_scanline_blocks);
if (num_blocks * static_cast<size_t>(num_scanline_blocks) <
static_cast<size_t>(data_height)) {
num_blocks++;
}
InitSingleResolutionOffsets(offset_data, num_blocks);
}
if (!exr_header->tiled) {
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
for (size_t y = 0; y < num_blocks; y++) {
tinyexr::tinyexr_uint64 offset;
// Issue #81
if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) {
tinyexr::SetErrorMessage("Insufficient data size in offset table.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
offsets[y] = offset;
}
// If line offsets are invalid, we try to reconstruct it.
// See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details.
for (size_t y = 0; y < num_blocks; y++) {
if (offsets[y] <= 0) {
// TODO(syoyo) Report as warning?
// if (err) {
// stringstream ss;
// ss << "Incomplete lineOffsets." << std::endl;
// (*err) += ss.str();
//}
bool ret =
ReconstructLineOffsets(&offsets, num_blocks, head, marker, size);
if (ret) {
// OK
break;
}
else {
tinyexr::SetErrorMessage(
"Cannot reconstruct lineOffset table in DecodeEXRImage.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
}
}
{
std::string e;
int ret = DecodeChunk(exr_image, exr_header, offset_data, head, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
#if 1
FreeEXRImage(exr_image);
#else
// release memory(if exists)
if ((exr_header->num_channels > 0) && exr_image && exr_image->images) {
for (size_t c = 0; c < size_t(exr_header->num_channels); c++) {
if (exr_image->images[c]) {
free(exr_image->images[c]);
exr_image->images[c] = NULL;
}
}
free(exr_image->images);
exr_image->images = NULL;
}
#endif
}
return ret;
}
}
static void GetLayers(const EXRHeader &exr_header,
std::vector<std::string> &layer_names) {
// Naive implementation
// Group channels by layers
// go over all channel names, split by periods
// collect unique names
layer_names.clear();
for (int c = 0; c < exr_header.num_channels; c++) {
std::string full_name(exr_header.channels[c].name);
const size_t pos = full_name.find_last_of('.');
if (pos != std::string::npos && pos != 0 && pos + 1 < full_name.size()) {
full_name.erase(pos);
if (std::find(layer_names.begin(), layer_names.end(), full_name) ==
layer_names.end())
layer_names.push_back(full_name);
}
}
}
struct LayerChannel {
explicit LayerChannel(size_t i, std::string n) : index(i), name(n) {}
size_t index;
std::string name;
};
static void ChannelsInLayer(const EXRHeader &exr_header,
const std::string layer_name,
std::vector<LayerChannel> &channels) {
channels.clear();
for (int c = 0; c < exr_header.num_channels; c++) {
std::string ch_name(exr_header.channels[c].name);
if (layer_name.empty()) {
const size_t pos = ch_name.find_last_of('.');
if (pos != std::string::npos && pos < ch_name.size()) {
ch_name = ch_name.substr(pos + 1);
}
}
else {
const size_t pos = ch_name.find(layer_name + '.');
if (pos == std::string::npos) continue;
if (pos == 0) {
ch_name = ch_name.substr(layer_name.size() + 1);
}
}
LayerChannel ch(size_t(c), ch_name);
channels.push_back(ch);
}
}
} // namespace tinyexr
int EXRLayers(const char *filename, const char **layer_names[], int *num_layers,
const char **err) {
EXRVersion exr_version;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage("Invalid EXR header.", err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
std::vector<std::string> layer_vec;
tinyexr::GetLayers(exr_header, layer_vec);
(*num_layers) = int(layer_vec.size());
(*layer_names) = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(layer_vec.size())));
for (size_t c = 0; c < static_cast<size_t>(layer_vec.size()); c++) {
#ifdef _MSC_VER
(*layer_names)[c] = _strdup(layer_vec[c].c_str());
#else
(*layer_names)[c] = strdup(layer_vec[c].c_str());
#endif
}
FreeEXRHeader(&exr_header);
return TINYEXR_SUCCESS;
}
int LoadEXR(float **out_rgba, int *width, int *height, const char *filename,
const char **err) {
return LoadEXRWithLayer(out_rgba, width, height, filename,
/* layername */ NULL, err);
}
int LoadEXRWithLayer(float **out_rgba, int *width, int *height,
const char *filename, const char *layername,
const char **err) {
if (out_rgba == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
InitEXRImage(&exr_image);
{
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
std::stringstream ss;
ss << "Failed to open EXR file or read version info from EXR file. code("
<< ret << ")";
tinyexr::SetErrorMessage(ss.str(), err);
return ret;
}
if (exr_version.multipart || exr_version.non_image) {
tinyexr::SetErrorMessage(
"Loading multipart or DeepImage is not supported in LoadEXR() API",
err);
return TINYEXR_ERROR_INVALID_DATA; // @fixme.
}
}
{
int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
// TODO: Probably limit loading to layers (channels) selected by layer index
{
int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err);
if (ret != TINYEXR_SUCCESS) {
FreeEXRHeader(&exr_header);
return ret;
}
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
std::vector<std::string> layer_names;
tinyexr::GetLayers(exr_header, layer_names);
std::vector<tinyexr::LayerChannel> channels;
tinyexr::ChannelsInLayer(
exr_header, layername == NULL ? "" : std::string(layername), channels);
if (channels.size() < 1) {
tinyexr::SetErrorMessage("Layer Not Found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_LAYER_NOT_FOUND;
}
size_t ch_count = channels.size() < 4 ? channels.size() : 4;
for (size_t c = 0; c < ch_count; c++) {
const tinyexr::LayerChannel &ch = channels[c];
if (ch.name == "R") {
idxR = int(ch.index);
}
else if (ch.name == "G") {
idxG = int(ch.index);
}
else if (ch.name == "B") {
idxB = int(ch.index);
}
else if (ch.name == "A") {
idxA = int(ch.index);
}
}
if (channels.size() == 1) {
int chIdx = int(channels.front().index);
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii = exr_image.tiles[it].offset_x *
static_cast<int>(exr_header.tile_size_x) +
i;
const int jj = exr_image.tiles[it].offset_y *
static_cast<int>(exr_header.tile_size_y) +
j;
const int idx = ii + jj * static_cast<int>(exr_image.width);
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[chIdx][srcIdx];
}
}
}
}
else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val =
reinterpret_cast<float **>(exr_image.images)[chIdx][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
}
else {
// Assume RGB(A)
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
}
else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
}
}
else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
}
else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int IsEXR(const char *filename) {
EXRVersion exr_version;
int ret = ParseEXRVersionFromFile(&exr_version, filename);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_header == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument. `memory` or `exr_header` argument is null in "
"ParseEXRHeaderFromMemory()",
err);
// Invalid argument
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Insufficient header/data size.\n", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
if (err && !err_str.empty()) {
tinyexr::SetErrorMessage(err_str, err);
}
}
ConvertHeader(exr_header, info);
exr_header->multipart = version->multipart ? 1 : 0;
exr_header->non_image = version->non_image ? 1 : 0;
return ret;
}
int LoadEXRFromMemory(float **out_rgba, int *width, int *height,
const unsigned char *memory, size_t size,
const char **err) {
if (out_rgba == NULL || memory == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRVersion exr_version;
EXRImage exr_image;
EXRHeader exr_header;
InitEXRHeader(&exr_header);
int ret = ParseEXRVersionFromMemory(&exr_version, memory, size);
if (ret != TINYEXR_SUCCESS) {
std::stringstream ss;
ss << "Failed to parse EXR version. code(" << ret << ")";
tinyexr::SetErrorMessage(ss.str(), err);
return ret;
}
ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// Read HALF channel as FLOAT.
for (int i = 0; i < exr_header.num_channels; i++) {
if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) {
exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT;
}
}
InitEXRImage(&exr_image);
ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
// RGBA
int idxR = -1;
int idxG = -1;
int idxB = -1;
int idxA = -1;
for (int c = 0; c < exr_header.num_channels; c++) {
if (strcmp(exr_header.channels[c].name, "R") == 0) {
idxR = c;
}
else if (strcmp(exr_header.channels[c].name, "G") == 0) {
idxG = c;
}
else if (strcmp(exr_header.channels[c].name, "B") == 0) {
idxB = c;
}
else if (strcmp(exr_header.channels[c].name, "A") == 0) {
idxA = c;
}
}
// TODO(syoyo): Refactor removing same code as used in LoadEXR().
if (exr_header.num_channels == 1) {
// Grayscale channel only.
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++) {
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[0][srcIdx];
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[0][srcIdx];
}
}
}
}
else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
const float val = reinterpret_cast<float **>(exr_image.images)[0][i];
(*out_rgba)[4 * i + 0] = val;
(*out_rgba)[4 * i + 1] = val;
(*out_rgba)[4 * i + 2] = val;
(*out_rgba)[4 * i + 3] = val;
}
}
}
else {
// TODO(syoyo): Support non RGBA image.
if (idxR == -1) {
tinyexr::SetErrorMessage("R channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxG == -1) {
tinyexr::SetErrorMessage("G channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
if (idxB == -1) {
tinyexr::SetErrorMessage("B channel not found", err);
// @todo { free exr_image }
return TINYEXR_ERROR_INVALID_DATA;
}
(*out_rgba) = reinterpret_cast<float *>(
malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) *
static_cast<size_t>(exr_image.height)));
if (exr_header.tiled) {
for (int it = 0; it < exr_image.num_tiles; it++) {
for (int j = 0; j < exr_header.tile_size_y; j++)
for (int i = 0; i < exr_header.tile_size_x; i++) {
const int ii =
exr_image.tiles[it].offset_x * exr_header.tile_size_x + i;
const int jj =
exr_image.tiles[it].offset_y * exr_header.tile_size_y + j;
const int idx = ii + jj * exr_image.width;
// out of region check.
if (ii >= exr_image.width) {
continue;
}
if (jj >= exr_image.height) {
continue;
}
const int srcIdx = i + j * exr_header.tile_size_x;
unsigned char **src = exr_image.tiles[it].images;
(*out_rgba)[4 * idx + 0] =
reinterpret_cast<float **>(src)[idxR][srcIdx];
(*out_rgba)[4 * idx + 1] =
reinterpret_cast<float **>(src)[idxG][srcIdx];
(*out_rgba)[4 * idx + 2] =
reinterpret_cast<float **>(src)[idxB][srcIdx];
if (idxA != -1) {
(*out_rgba)[4 * idx + 3] =
reinterpret_cast<float **>(src)[idxA][srcIdx];
}
else {
(*out_rgba)[4 * idx + 3] = 1.0;
}
}
}
}
else {
for (int i = 0; i < exr_image.width * exr_image.height; i++) {
(*out_rgba)[4 * i + 0] =
reinterpret_cast<float **>(exr_image.images)[idxR][i];
(*out_rgba)[4 * i + 1] =
reinterpret_cast<float **>(exr_image.images)[idxG][i];
(*out_rgba)[4 * i + 2] =
reinterpret_cast<float **>(exr_image.images)[idxB][i];
if (idxA != -1) {
(*out_rgba)[4 * i + 3] =
reinterpret_cast<float **>(exr_image.images)[idxA][i];
}
else {
(*out_rgba)[4 * i + 3] = 1.0;
}
}
}
}
(*width) = exr_image.width;
(*height) = exr_image.height;
FreeEXRHeader(&exr_header);
FreeEXRImage(&exr_image);
return TINYEXR_SUCCESS;
}
int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
// TODO(syoyo): return wfopen_s erro code
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize < 16) {
tinyexr::SetErrorMessage("File size too short " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize,
err);
}
int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header,
const unsigned char *memory, const size_t size,
const char **err) {
if (exr_image == NULL || memory == NULL ||
(size < tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
const unsigned char *head = memory;
const unsigned char *marker = reinterpret_cast<const unsigned char *>(
memory + exr_header->header_len +
8); // +8 for magic number + version header.
return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size,
err);
}
namespace tinyexr
{
// out_data must be allocated initially with the block-header size
// of the current image(-part) type
static bool EncodePixelData(/* out */ std::vector<unsigned char>& out_data,
const unsigned char* const* images,
const int* requested_pixel_types,
int compression_type,
int line_order,
int width, // for tiled : tile.width
int height, // for tiled : header.tile_size_y
int x_stride, // for tiled : header.tile_size_x
int line_no, // for tiled : 0
int num_lines, // for tiled : tile.height
size_t pixel_data_size,
const std::vector<ChannelInfo>& channels,
const std::vector<size_t>& channel_offset_list,
const void* compression_param = 0) // zfp compression param
{
size_t buf_size = static_cast<size_t>(width) *
static_cast<size_t>(num_lines) *
static_cast<size_t>(pixel_data_size);
//int last2bit = (buf_size & 3);
// buf_size must be multiple of four
//if(last2bit) buf_size += 4 - last2bit;
std::vector<unsigned char> buf(buf_size);
size_t start_y = static_cast<size_t>(line_no);
for (size_t c = 0; c < channels.size(); c++) {
if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) {
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
tinyexr::FP16 h16;
h16.u = reinterpret_cast<const unsigned short * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::FP32 f32 = half_to_float(h16);
tinyexr::swap4(&f32.f);
// line_ptr[x] = f32.f;
tinyexr::cpy4(line_ptr + x, &(f32.f));
}
}
}
else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
unsigned short val = reinterpret_cast<const unsigned short * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap2(&val);
// line_ptr[x] = val;
tinyexr::cpy2(line_ptr + x, &val);
}
}
}
else {
assert(0);
}
}
else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) {
if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned short *line_ptr = reinterpret_cast<unsigned short *>(
&buf.at(static_cast<size_t>(pixel_data_size * y *
width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
tinyexr::FP32 f32;
f32.f = reinterpret_cast<const float * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::FP16 h16;
h16 = float_to_half_full(f32);
tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u));
// line_ptr[x] = h16.u;
tinyexr::cpy2(line_ptr + x, &(h16.u));
}
}
}
else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
float *line_ptr = reinterpret_cast<float *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] *
static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
float val = reinterpret_cast<const float * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
}
else {
assert(0);
}
}
else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) {
for (int y = 0; y < num_lines; y++) {
// Assume increasing Y
unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at(
static_cast<size_t>(pixel_data_size * y * width) +
channel_offset_list[c] * static_cast<size_t>(width)));
for (int x = 0; x < width; x++) {
unsigned int val = reinterpret_cast<const unsigned int * const *>(
images)[c][(y + start_y) * x_stride + x];
tinyexr::swap4(&val);
// line_ptr[x] = val;
tinyexr::cpy4(line_ptr + x, &val);
}
}
}
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed)
out_data.insert(out_data.end(), buf.begin(), buf.end());
}
else if ((compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#if TINYEXR_USE_MINIZ
std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound(
static_cast<unsigned long>(buf.size())));
#else
std::vector<unsigned char> block(
compressBound(static_cast<uLong>(buf.size())));
#endif
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressZip(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
}
else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) {
// (buf.size() * 3) / 2 would be enough.
std::vector<unsigned char> block((buf.size() * 3) / 2);
tinyexr::tinyexr_uint64 outSize = block.size();
tinyexr::CompressRle(&block.at(0), outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
static_cast<unsigned long>(buf.size()));
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = static_cast<unsigned int>(outSize); // truncate
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
}
else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
#if TINYEXR_USE_PIZ
unsigned int bufLen =
8192 + static_cast<unsigned int>(
2 * static_cast<unsigned int>(
buf.size())); // @fixme { compute good bound. }
std::vector<unsigned char> block(bufLen);
unsigned int outSize = static_cast<unsigned int>(block.size());
CompressPiz(&block.at(0), &outSize,
reinterpret_cast<const unsigned char *>(&buf.at(0)),
buf.size(), channels, width, num_lines);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = outSize;
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
#else
assert(0);
#endif
}
else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
#if TINYEXR_USE_ZFP
const ZFPCompressionParam* zfp_compression_param = reinterpret_cast<const ZFPCompressionParam*>(compression_param);
std::vector<unsigned char> block;
unsigned int outSize;
tinyexr::CompressZfp(
&block, &outSize, reinterpret_cast<const float *>(&buf.at(0)),
width, num_lines, static_cast<int>(channels.size()), *zfp_compression_param);
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(compressed)
unsigned int data_len = outSize;
out_data.insert(out_data.end(), block.begin(), block.begin() + data_len);
#else
assert(0);
#endif
}
else {
assert(0);
return false;
}
return true;
}
static int EncodeTiledLevel(const EXRImage* level_image, const EXRHeader* exr_header,
const std::vector<tinyexr::ChannelInfo>& channels,
std::vector<std::vector<unsigned char> >& data_list,
size_t start_index, // for data_list
int num_x_tiles, int num_y_tiles,
const std::vector<size_t>& channel_offset_list,
int pixel_data_size,
const void* compression_param, // must be set if zfp compression is enabled
std::string* err) {
int num_tiles = num_x_tiles * num_y_tiles;
assert(num_tiles == level_image->num_tiles);
if ((exr_header->tile_size_x > level_image->width || exr_header->tile_size_y > level_image->height) &&
level_image->level_x == 0 && level_image->level_y == 0) {
if (err) {
(*err) += "Failed to encode tile data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
#else
bool invalid_data(false);
#endif
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::vector<std::thread> workers;
std::atomic<int> tile_count(0);
int num_threads = std::max(1, int(std::thread::hardware_concurrency()));
if (num_threads > int(num_tiles)) {
num_threads = int(num_tiles);
}
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int i = 0;
while ((i = tile_count++) < num_tiles) {
#else
// Use signed int since some OpenMP compiler doesn't allow unsigned type for
// `parallel for`
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_tiles; i++) {
#endif
size_t tile_idx = static_cast<size_t>(i);
size_t data_idx = tile_idx + start_index;
int x_tile = i % num_x_tiles;
int y_tile = i / num_x_tiles;
EXRTile& tile = level_image->tiles[tile_idx];
const unsigned char* const* images =
static_cast<const unsigned char* const*>(tile.images);
data_list[data_idx].resize(5 * sizeof(int));
size_t data_header_size = data_list[data_idx].size();
bool ret = EncodePixelData(data_list[data_idx],
images,
exr_header->requested_pixel_types,
exr_header->compression_type,
0, // increasing y
tile.width,
exr_header->tile_size_y,
exr_header->tile_size_x,
0,
tile.height,
pixel_data_size,
channels,
channel_offset_list,
compression_param);
if (!ret) {
invalid_data = true;
continue;
}
assert(data_list[data_idx].size() > data_header_size);
int data_len = static_cast<int>(data_list[data_idx].size() - data_header_size);
//tileX, tileY, levelX, levelY // pixel_data_size(int)
memcpy(&data_list[data_idx][0], &x_tile, sizeof(int));
memcpy(&data_list[data_idx][4], &y_tile, sizeof(int));
memcpy(&data_list[data_idx][8], &level_image->level_x, sizeof(int));
memcpy(&data_list[data_idx][12], &level_image->level_y, sizeof(int));
memcpy(&data_list[data_idx][16], &data_len, sizeof(int));
swap4(reinterpret_cast<int*>(&data_list[data_idx][0]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][4]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][8]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][12]));
swap4(reinterpret_cast<int*>(&data_list[data_idx][16]));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
if (invalid_data) {
if (err) {
(*err) += "Failed to encode tile data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
return TINYEXR_SUCCESS;
}
static int NumScanlines(int compression_type) {
int num_scanlines = 1;
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanlines = 16;
}
else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanlines = 32;
}
else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanlines = 16;
}
return num_scanlines;
}
static int EncodeChunk(const EXRImage* exr_image, const EXRHeader* exr_header,
const std::vector<ChannelInfo>& channels,
int num_blocks,
tinyexr_uint64 chunk_offset, // starting offset of current chunk
bool is_multipart,
OffsetData& offset_data, // output block offsets, must be initialized
std::vector<std::vector<unsigned char> >& data_list, // output
tinyexr_uint64& total_size, // output: ending offset of current chunk
std::string* err) {
int num_scanlines = NumScanlines(exr_header->compression_type);
data_list.resize(num_blocks);
std::vector<size_t> channel_offset_list(
static_cast<size_t>(exr_header->num_channels));
int pixel_data_size = 0;
{
size_t channel_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) {
channel_offset_list[c] = channel_offset;
if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) {
pixel_data_size += sizeof(unsigned short);
channel_offset += sizeof(unsigned short);
}
else if (exr_header->requested_pixel_types[c] ==
TINYEXR_PIXELTYPE_FLOAT) {
pixel_data_size += sizeof(float);
channel_offset += sizeof(float);
}
else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) {
pixel_data_size += sizeof(unsigned int);
channel_offset += sizeof(unsigned int);
}
else {
assert(0);
}
}
}
const void* compression_param = 0;
#if TINYEXR_USE_ZFP
tinyexr::ZFPCompressionParam zfp_compression_param;
// Use ZFP compression parameter from custom attributes(if such a parameter
// exists)
{
std::string e;
bool ret = tinyexr::FindZFPCompressionParam(
&zfp_compression_param, exr_header->custom_attributes,
exr_header->num_custom_attributes, &e);
if (!ret) {
// Use predefined compression parameter.
zfp_compression_param.type = 0;
zfp_compression_param.rate = 2;
}
compression_param = &zfp_compression_param;
}
#endif
tinyexr_uint64 offset = chunk_offset;
tinyexr_uint64 doffset = is_multipart ? 4u : 0u;
if (exr_image->tiles) {
const EXRImage* level_image = exr_image;
size_t block_idx = 0;
tinyexr::tinyexr_uint64 block_data_size = 0;
int num_levels = (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ?
offset_data.num_x_levels : (offset_data.num_x_levels * offset_data.num_y_levels);
for (int level_index = 0; level_index < num_levels; ++level_index) {
if (!level_image) {
if (err) {
(*err) += "Invalid number of tiled levels for EncodeChunk\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int level_index_from_image = LevelIndex(level_image->level_x, level_image->level_y,
exr_header->tile_level_mode, offset_data.num_x_levels);
if (level_index_from_image != level_index) {
if (err) {
(*err) += "Incorrect level ordering in tiled image\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
int num_y_tiles = (int)offset_data.offsets[level_index].size();
assert(num_y_tiles);
int num_x_tiles = (int)offset_data.offsets[level_index][0].size();
assert(num_x_tiles);
std::string e;
int ret = EncodeTiledLevel(level_image,
exr_header,
channels,
data_list,
block_idx,
num_x_tiles,
num_y_tiles,
channel_offset_list,
pixel_data_size,
compression_param,
&e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty() && err) {
(*err) += e;
}
return ret;
}
for (size_t j = 0; j < static_cast<size_t>(num_y_tiles); ++j)
for (size_t i = 0; i < static_cast<size_t>(num_x_tiles); ++i) {
offset_data.offsets[level_index][j][i] = offset;
swap8(reinterpret_cast<tinyexr_uint64*>(&offset_data.offsets[level_index][j][i]));
offset += data_list[block_idx].size() + doffset;
block_data_size += data_list[block_idx].size();
++block_idx;
}
level_image = level_image->next_level;
}
assert(block_idx == num_blocks);
total_size = offset;
}
else { // scanlines
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0];
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
std::atomic<bool> invalid_data(false);
std::vector<std::thread> workers;
std::atomic<int> block_count(0);
int num_threads = std::min(std::max(1, int(std::thread::hardware_concurrency())), num_blocks);
for (int t = 0; t < num_threads; t++) {
workers.emplace_back(std::thread([&]() {
int i = 0;
while ((i = block_count++) < num_blocks) {
#else
bool invalid_data(false);
#if TINYEXR_USE_OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < num_blocks; i++) {
#endif
int start_y = num_scanlines * i;
int end_Y = (std::min)(num_scanlines * (i + 1), exr_image->height);
int num_lines = end_Y - start_y;
const unsigned char* const* images =
static_cast<const unsigned char* const*>(exr_image->images);
data_list[i].resize(2 * sizeof(int));
size_t data_header_size = data_list[i].size();
bool ret = EncodePixelData(data_list[i],
images,
exr_header->requested_pixel_types,
exr_header->compression_type,
0, // increasing y
exr_image->width,
exr_image->height,
exr_image->width,
start_y,
num_lines,
pixel_data_size,
channels,
channel_offset_list,
compression_param);
if (!ret) {
invalid_data = true;
continue; // "break" cannot be used with OpenMP
}
assert(data_list[i].size() > data_header_size);
int data_len = static_cast<int>(data_list[i].size() - data_header_size);
memcpy(&data_list[i][0], &start_y, sizeof(int));
memcpy(&data_list[i][4], &data_len, sizeof(int));
swap4(reinterpret_cast<int*>(&data_list[i][0]));
swap4(reinterpret_cast<int*>(&data_list[i][4]));
#if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0)
}
}));
}
for (auto &t : workers) {
t.join();
}
#else
} // omp parallel
#endif
if (invalid_data) {
if (err) {
(*err) += "Failed to encode scanline data.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) {
offsets[i] = offset;
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i]));
offset += data_list[i].size() + doffset;
}
total_size = static_cast<size_t>(offset);
}
return TINYEXR_SUCCESS;
}
// can save a single or multi-part image (no deep* formats)
static size_t SaveEXRNPartImageToMemory(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
unsigned char** memory_out, const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory_out == NULL) {
SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
{
for (unsigned int i = 0; i < num_parts; ++i) {
if (exr_headers[i]->compression_type < 0) {
SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
#if !TINYEXR_USE_PIZ
if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
SetErrorMessage("PIZ compression is not supported in this build",
err);
return 0;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
SetErrorMessage("ZFP compression is not supported in this build",
err);
return 0;
}
#else
for (int c = 0; c < exr_header->num_channels; ++c) {
if (exr_headers[i]->requested_pixel_types[c] != TINYEXR_PIXELTYPE_FLOAT) {
SetErrorMessage("Pixel type must be FLOAT for ZFP compression",
err);
return 0;
}
}
#endif
}
}
std::vector<unsigned char> memory;
// Header
{
const char header[] = { 0x76, 0x2f, 0x31, 0x01 };
memory.insert(memory.end(), header, header + 4);
}
// Version
// using value from the first header
int long_name = exr_headers[0]->long_name;
{
char marker[] = { 2, 0, 0, 0 };
/* @todo
if (exr_header->non_image) {
marker[1] |= 0x8;
}
*/
// tiled
if (num_parts == 1 && exr_images[0].tiles) {
marker[1] |= 0x2;
}
// long_name
if (long_name) {
marker[1] |= 0x4;
}
// multipart
if (num_parts > 1) {
marker[1] |= 0x10;
}
memory.insert(memory.end(), marker, marker + 4);
}
int total_chunk_count = 0;
std::vector<int> chunk_count(num_parts);
std::vector<OffsetData> offset_data(num_parts);
for (unsigned int i = 0; i < num_parts; ++i) {
if (!exr_images[i].tiles) {
int num_scanlines = NumScanlines(exr_headers[i]->compression_type);
chunk_count[i] =
(exr_images[i].height + num_scanlines - 1) / num_scanlines;
InitSingleResolutionOffsets(offset_data[i], chunk_count[i]);
total_chunk_count += chunk_count[i];
}
else {
{
std::vector<int> num_x_tiles, num_y_tiles;
PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]);
chunk_count[i] =
InitTileOffsets(offset_data[i], exr_headers[i], num_x_tiles, num_y_tiles);
total_chunk_count += chunk_count[i];
}
}
}
// Write attributes to memory buffer.
std::vector< std::vector<tinyexr::ChannelInfo> > channels(num_parts);
{
std::set<std::string> partnames;
for (unsigned int i = 0; i < num_parts; ++i) {
//channels
{
std::vector<unsigned char> data;
for (int c = 0; c < exr_headers[i]->num_channels; c++) {
tinyexr::ChannelInfo info;
info.p_linear = 0;
info.pixel_type = exr_headers[i]->requested_pixel_types[c];
info.x_sampling = 1;
info.y_sampling = 1;
info.name = std::string(exr_headers[i]->channels[c].name);
channels[i].push_back(info);
}
tinyexr::WriteChannelInfo(data, channels[i]);
tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0),
static_cast<int>(data.size()));
}
{
int comp = exr_headers[i]->compression_type;
swap4(&comp);
WriteAttributeToMemory(
&memory, "compression", "compression",
reinterpret_cast<const unsigned char*>(&comp), 1);
}
{
int data[4] = { 0, 0, exr_images[i].width - 1, exr_images[i].height - 1 };
swap4(&data[0]);
swap4(&data[1]);
swap4(&data[2]);
swap4(&data[3]);
WriteAttributeToMemory(
&memory, "dataWindow", "box2i",
reinterpret_cast<const unsigned char*>(data), sizeof(int) * 4);
int data0[4] = { 0, 0, exr_images[0].width - 1, exr_images[0].height - 1 };
swap4(&data0[0]);
swap4(&data0[1]);
swap4(&data0[2]);
swap4(&data0[3]);
// Note: must be the same across parts (currently, using value from the first header)
WriteAttributeToMemory(
&memory, "displayWindow", "box2i",
reinterpret_cast<const unsigned char*>(data0), sizeof(int) * 4);
}
{
unsigned char line_order = 0; // @fixme { read line_order from EXRHeader }
WriteAttributeToMemory(&memory, "lineOrder", "lineOrder",
&line_order, 1);
}
{
// Note: must be the same across parts
float aspectRatio = 1.0f;
swap4(&aspectRatio);
WriteAttributeToMemory(
&memory, "pixelAspectRatio", "float",
reinterpret_cast<const unsigned char*>(&aspectRatio), sizeof(float));
}
{
float center[2] = { 0.0f, 0.0f };
swap4(¢er[0]);
swap4(¢er[1]);
WriteAttributeToMemory(
&memory, "screenWindowCenter", "v2f",
reinterpret_cast<const unsigned char*>(center), 2 * sizeof(float));
}
{
float w = 1.0f;
swap4(&w);
WriteAttributeToMemory(&memory, "screenWindowWidth", "float",
reinterpret_cast<const unsigned char*>(&w),
sizeof(float));
}
if (exr_images[i].tiles) {
unsigned char tile_mode = static_cast<unsigned char>(exr_headers[i]->tile_level_mode & 0x3);
if (exr_headers[i]->tile_rounding_mode) tile_mode |= (1u << 4u);
//unsigned char data[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
unsigned int datai[3] = { 0, 0, 0 };
unsigned char* data = reinterpret_cast<unsigned char*>(&datai[0]);
datai[0] = static_cast<unsigned int>(exr_headers[i]->tile_size_x);
datai[1] = static_cast<unsigned int>(exr_headers[i]->tile_size_y);
data[8] = tile_mode;
swap4(reinterpret_cast<unsigned int*>(&data[0]));
swap4(reinterpret_cast<unsigned int*>(&data[4]));
WriteAttributeToMemory(
&memory, "tiles", "tiledesc",
reinterpret_cast<const unsigned char*>(data), 9);
}
// must be present for multi-part files - according to spec.
if (num_parts > 1) {
// name
{
size_t len = 0;
if ((len = strlen(exr_headers[i]->name)) > 0) {
partnames.insert(std::string(exr_headers[i]->name));
if (partnames.size() != i + 1) {
SetErrorMessage("'name' attributes must be unique for a multi-part file", err);
return 0;
}
WriteAttributeToMemory(
&memory, "name", "string",
reinterpret_cast<const unsigned char*>(exr_headers[i]->name),
static_cast<int>(len));
}
else {
SetErrorMessage("Invalid 'name' attribute for a multi-part file", err);
return 0;
}
}
// type
{
const char* type = "scanlineimage";
if (exr_images[i].tiles) type = "tiledimage";
WriteAttributeToMemory(
&memory, "type", "string",
reinterpret_cast<const unsigned char*>(type),
static_cast<int>(strlen(type)));
}
// chunkCount
{
WriteAttributeToMemory(
&memory, "chunkCount", "int",
reinterpret_cast<const unsigned char*>(&chunk_count[i]),
4);
}
}
// Custom attributes
if (exr_headers[i]->num_custom_attributes > 0) {
for (int j = 0; j < exr_headers[i]->num_custom_attributes; j++) {
tinyexr::WriteAttributeToMemory(
&memory, exr_headers[i]->custom_attributes[j].name,
exr_headers[i]->custom_attributes[j].type,
reinterpret_cast<const unsigned char*>(
exr_headers[i]->custom_attributes[j].value),
exr_headers[i]->custom_attributes[j].size);
}
}
{ // end of header
memory.push_back(0);
}
}
}
if (num_parts > 1) {
// end of header list
memory.push_back(0);
}
tinyexr_uint64 chunk_offset = memory.size() + size_t(total_chunk_count) * sizeof(tinyexr_uint64);
tinyexr_uint64 total_size = 0;
std::vector< std::vector< std::vector<unsigned char> > > data_lists(num_parts);
for (unsigned int i = 0; i < num_parts; ++i) {
std::string e;
int ret = EncodeChunk(&exr_images[i], exr_headers[i],
channels[i],
chunk_count[i],
// starting offset of current chunk after part-number
chunk_offset,
num_parts > 1,
offset_data[i], // output: block offsets, must be initialized
data_lists[i], // output
total_size, // output
&e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
return 0;
}
chunk_offset = total_size;
}
// Allocating required memory
if (total_size == 0) { // something went wrong
tinyexr::SetErrorMessage("Output memory size is zero", err);
return 0;
}
(*memory_out) = static_cast<unsigned char*>(malloc(total_size));
// Writing header
memcpy((*memory_out), &memory[0], memory.size());
unsigned char* memory_ptr = *memory_out + memory.size();
size_t sum = memory.size();
// Writing offset data for chunks
for (unsigned int i = 0; i < num_parts; ++i) {
if (exr_images[i].tiles) {
const EXRImage* level_image = &exr_images[i];
int num_levels = (exr_headers[i]->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ?
offset_data[i].num_x_levels : (offset_data[i].num_x_levels * offset_data[i].num_y_levels);
for (int level_index = 0; level_index < num_levels; ++level_index) {
for (size_t j = 0; j < offset_data[i].offsets[level_index].size(); ++j) {
size_t num_bytes = sizeof(tinyexr_uint64) * offset_data[i].offsets[level_index][j].size();
sum += num_bytes;
assert(sum <= total_size);
memcpy(memory_ptr,
reinterpret_cast<unsigned char*>(&offset_data[i].offsets[level_index][j][0]),
num_bytes);
memory_ptr += num_bytes;
}
level_image = level_image->next_level;
}
}
else {
size_t num_bytes = sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(chunk_count[i]);
sum += num_bytes;
assert(sum <= total_size);
std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data[i].offsets[0][0];
memcpy(memory_ptr, reinterpret_cast<unsigned char*>(&offsets[0]), num_bytes);
memory_ptr += num_bytes;
}
}
// Writing chunk data
for (unsigned int i = 0; i < num_parts; ++i) {
for (size_t j = 0; j < static_cast<size_t>(chunk_count[i]); ++j) {
if (num_parts > 1) {
sum += 4;
assert(sum <= total_size);
unsigned int part_number = i;
swap4(&part_number);
memcpy(memory_ptr, &part_number, 4);
memory_ptr += 4;
}
sum += data_lists[i][j].size();
assert(sum <= total_size);
memcpy(memory_ptr, &data_lists[i][j][0], data_lists[i][j].size());
memory_ptr += data_lists[i][j].size();
}
}
assert(sum == total_size);
return total_size; // OK
}
} // tinyexr
size_t SaveEXRImageToMemory(const EXRImage* exr_image,
const EXRHeader* exr_header,
unsigned char** memory_out, const char** err) {
return tinyexr::SaveEXRNPartImageToMemory(exr_image, &exr_header, 1, memory_out, err);
}
int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header,
const char *filename, const char **err) {
if (exr_image == NULL || filename == NULL ||
exr_header->compression_type < 0) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#if !TINYEXR_USE_PIZ
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
tinyexr::SetErrorMessage("PIZ compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
#if !TINYEXR_USE_ZFP
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
tinyexr::SetErrorMessage("ZFP compression is not supported in this build",
err);
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
#endif
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "wb");
#endif
#else
fp = fopen(filename, "wb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err);
if (mem_size == 0) {
return TINYEXR_ERROR_SERIALZATION_FAILED;
}
size_t written_size = 0;
if ((mem_size > 0) && mem) {
written_size = fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
if (written_size != mem_size) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
return TINYEXR_SUCCESS;
}
size_t SaveEXRMultipartImageToMemory(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
unsigned char** memory_out, const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts < 2 ||
memory_out == NULL) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory",
err);
return 0;
}
return tinyexr::SaveEXRNPartImageToMemory(exr_images, exr_headers, num_parts, memory_out, err);
}
int SaveEXRMultipartImageToFile(const EXRImage* exr_images,
const EXRHeader** exr_headers,
unsigned int num_parts,
const char* filename,
const char** err) {
if (exr_images == NULL || exr_headers == NULL || num_parts < 2) {
tinyexr::SetErrorMessage("Invalid argument for SaveEXRMultipartImageToFile",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "wb");
#endif
#else
fp = fopen(filename, "wb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
unsigned char *mem = NULL;
size_t mem_size = SaveEXRMultipartImageToMemory(exr_images, exr_headers, num_parts, &mem, err);
if (mem_size == 0) {
return TINYEXR_ERROR_SERIALZATION_FAILED;
}
size_t written_size = 0;
if ((mem_size > 0) && mem) {
written_size = fwrite(mem, 1, mem_size, fp);
}
free(mem);
fclose(fp);
if (written_size != mem_size) {
tinyexr::SetErrorMessage("Cannot write a file", err);
return TINYEXR_ERROR_CANT_WRITE_FILE;
}
return TINYEXR_SUCCESS;
}
int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) {
if (deep_image == NULL) {
tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
#ifdef _WIN32
FILE *fp = NULL;
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
FILE *fp = fopen(filename, "rb");
if (!fp) {
tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename),
err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#endif
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (filesize == 0) {
fclose(fp);
tinyexr::SetErrorMessage("File size is zero : " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
std::vector<char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
(void)ret;
}
fclose(fp);
const char *head = &buf[0];
const char *marker = &buf[0];
// Header check.
{
const char header[] = { 0x76, 0x2f, 0x31, 0x01 };
if (memcmp(marker, header, 4) != 0) {
tinyexr::SetErrorMessage("Invalid magic number", err);
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
// Version, scanline.
{
// ver 2.0, scanline, deep bit on(0x800)
// must be [2, 0, 0, 0]
if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) {
tinyexr::SetErrorMessage("Unsupported version or scanline", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
marker += 4;
}
int dx = -1;
int dy = -1;
int dw = -1;
int dh = -1;
int num_scanline_blocks = 1; // 16 for ZIP compression.
int compression_type = -1;
int num_channels = -1;
std::vector<tinyexr::ChannelInfo> channels;
// Read attributes
size_t size = filesize - tinyexr::kEXRVersionSize;
for (;;) {
if (0 == size) {
return TINYEXR_ERROR_INVALID_DATA;
}
else if (marker[0] == '\0') {
marker++;
size--;
break;
}
std::string attr_name;
std::string attr_type;
std::vector<unsigned char> data;
size_t marker_size;
if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size,
marker, size)) {
std::stringstream ss;
ss << "Failed to parse attribute\n";
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_DATA;
}
marker += marker_size;
size -= marker_size;
if (attr_name.compare("compression") == 0) {
compression_type = data[0];
if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) {
std::stringstream ss;
ss << "Unsupported compression type : " << compression_type;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
}
}
else if (attr_name.compare("channels") == 0) {
// name: zero-terminated string, from 1 to 255 bytes long
// pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2
// pLinear: unsigned char, possible values are 0 and 1
// reserved: three chars, should be zero
// xSampling: int
// ySampling: int
if (!tinyexr::ReadChannelInfo(channels, data)) {
tinyexr::SetErrorMessage("Failed to parse channel info", err);
return TINYEXR_ERROR_INVALID_DATA;
}
num_channels = static_cast<int>(channels.size());
if (num_channels < 1) {
tinyexr::SetErrorMessage("Invalid channels format", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
else if (attr_name.compare("dataWindow") == 0) {
memcpy(&dx, &data.at(0), sizeof(int));
memcpy(&dy, &data.at(4), sizeof(int));
memcpy(&dw, &data.at(8), sizeof(int));
memcpy(&dh, &data.at(12), sizeof(int));
tinyexr::swap4(&dx);
tinyexr::swap4(&dy);
tinyexr::swap4(&dw);
tinyexr::swap4(&dh);
}
else if (attr_name.compare("displayWindow") == 0) {
int x;
int y;
int w;
int h;
memcpy(&x, &data.at(0), sizeof(int));
memcpy(&y, &data.at(4), sizeof(int));
memcpy(&w, &data.at(8), sizeof(int));
memcpy(&h, &data.at(12), sizeof(int));
tinyexr::swap4(&x);
tinyexr::swap4(&y);
tinyexr::swap4(&w);
tinyexr::swap4(&h);
}
}
assert(dx >= 0);
assert(dy >= 0);
assert(dw >= 0);
assert(dh >= 0);
assert(num_channels >= 1);
int data_width = dw - dx + 1;
int data_height = dh - dy + 1;
std::vector<float> image(
static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA
// Read offset tables.
int num_blocks = data_height / num_scanline_blocks;
if (num_blocks * num_scanline_blocks < data_height) {
num_blocks++;
}
std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks));
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
tinyexr::tinyexr_int64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64));
tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset));
marker += sizeof(tinyexr::tinyexr_int64); // = 8
offsets[y] = offset;
}
#if TINYEXR_USE_PIZ
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) {
#else
if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_RLE) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) ||
(compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) {
#endif
// OK
}
else {
tinyexr::SetErrorMessage("Unsupported compression format", err);
return TINYEXR_ERROR_UNSUPPORTED_FORMAT;
}
deep_image->image = static_cast<float ***>(
malloc(sizeof(float **) * static_cast<size_t>(num_channels)));
for (int c = 0; c < num_channels; c++) {
deep_image->image[c] = static_cast<float **>(
malloc(sizeof(float *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
}
}
deep_image->offset_table = static_cast<int **>(
malloc(sizeof(int *) * static_cast<size_t>(data_height)));
for (int y = 0; y < data_height; y++) {
deep_image->offset_table[y] = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(data_width)));
}
for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) {
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y]);
// int: y coordinate
// int64: packed size of pixel offset table
// int64: packed size of sample data
// int64: unpacked size of sample data
// compressed pixel offset table
// compressed sample data
int line_no;
tinyexr::tinyexr_int64 packedOffsetTableSize;
tinyexr::tinyexr_int64 packedSampleDataSize;
tinyexr::tinyexr_int64 unpackedSampleDataSize;
memcpy(&line_no, data_ptr, sizeof(int));
memcpy(&packedOffsetTableSize, data_ptr + 4,
sizeof(tinyexr::tinyexr_int64));
memcpy(&packedSampleDataSize, data_ptr + 12,
sizeof(tinyexr::tinyexr_int64));
memcpy(&unpackedSampleDataSize, data_ptr + 20,
sizeof(tinyexr::tinyexr_int64));
tinyexr::swap4(&line_no);
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize));
tinyexr::swap8(
reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize));
std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width));
// decode pixel offset table.
{
unsigned long dstLen =
static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int));
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)),
&dstLen, data_ptr + 28,
static_cast<unsigned long>(packedOffsetTableSize))) {
return false;
}
assert(dstLen == pixelOffsetTable.size() * sizeof(int));
for (size_t i = 0; i < static_cast<size_t>(data_width); i++) {
deep_image->offset_table[y][i] = pixelOffsetTable[i];
}
}
std::vector<unsigned char> sample_data(
static_cast<size_t>(unpackedSampleDataSize));
// decode sample data.
{
unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize);
if (dstLen) {
if (!tinyexr::DecompressZip(
reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen,
data_ptr + 28 + packedOffsetTableSize,
static_cast<unsigned long>(packedSampleDataSize))) {
return false;
}
assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize));
}
}
// decode sample
int sampleSize = -1;
std::vector<int> channel_offset_list(static_cast<size_t>(num_channels));
{
int channel_offset = 0;
for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) {
channel_offset_list[i] = channel_offset;
if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT
channel_offset += 4;
}
else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half
channel_offset += 2;
}
else if (channels[i].pixel_type ==
TINYEXR_PIXELTYPE_FLOAT) { // float
channel_offset += 4;
}
else {
assert(0);
}
}
sampleSize = channel_offset;
}
assert(sampleSize >= 2);
assert(static_cast<size_t>(
pixelOffsetTable[static_cast<size_t>(data_width - 1)] *
sampleSize) == sample_data.size());
int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize;
//
// Alloc memory
//
//
// pixel data is stored as image[channels][pixel_samples]
//
{
tinyexr::tinyexr_uint64 data_offset = 0;
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
deep_image->image[c][y] = static_cast<float *>(
malloc(sizeof(float) * static_cast<size_t>(samples_per_line)));
if (channels[c].pixel_type == 0) { // UINT
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
unsigned int ui;
unsigned int *src_ptr = reinterpret_cast<unsigned int *>(
&sample_data.at(size_t(data_offset) + x * sizeof(int)));
tinyexr::cpy4(&ui, src_ptr);
deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme
}
data_offset +=
sizeof(unsigned int) * static_cast<size_t>(samples_per_line);
}
else if (channels[c].pixel_type == 1) { // half
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
tinyexr::FP16 f16;
const unsigned short *src_ptr = reinterpret_cast<unsigned short *>(
&sample_data.at(size_t(data_offset) + x * sizeof(short)));
tinyexr::cpy2(&(f16.u), src_ptr);
tinyexr::FP32 f32 = half_to_float(f16);
deep_image->image[c][y][x] = f32.f;
}
data_offset += sizeof(short) * static_cast<size_t>(samples_per_line);
}
else { // float
for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) {
float f;
const float *src_ptr = reinterpret_cast<float *>(
&sample_data.at(size_t(data_offset) + x * sizeof(float)));
tinyexr::cpy4(&f, src_ptr);
deep_image->image[c][y][x] = f;
}
data_offset += sizeof(float) * static_cast<size_t>(samples_per_line);
}
}
}
} // y
deep_image->width = data_width;
deep_image->height = data_height;
deep_image->channel_names = static_cast<const char **>(
malloc(sizeof(const char *) * static_cast<size_t>(num_channels)));
for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) {
#ifdef _WIN32
deep_image->channel_names[c] = _strdup(channels[c].name.c_str());
#else
deep_image->channel_names[c] = strdup(channels[c].name.c_str());
#endif
}
deep_image->num_channels = num_channels;
return TINYEXR_SUCCESS;
}
void InitEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return;
}
exr_image->width = 0;
exr_image->height = 0;
exr_image->num_channels = 0;
exr_image->images = NULL;
exr_image->tiles = NULL;
exr_image->next_level = NULL;
exr_image->level_x = 0;
exr_image->level_y = 0;
exr_image->num_tiles = 0;
}
void FreeEXRErrorMessage(const char *msg) {
if (msg) {
free(reinterpret_cast<void *>(const_cast<char *>(msg)));
}
return;
}
void InitEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return;
}
memset(exr_header, 0, sizeof(EXRHeader));
}
int FreeEXRHeader(EXRHeader *exr_header) {
if (exr_header == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_header->channels) {
free(exr_header->channels);
}
if (exr_header->pixel_types) {
free(exr_header->pixel_types);
}
if (exr_header->requested_pixel_types) {
free(exr_header->requested_pixel_types);
}
for (int i = 0; i < exr_header->num_custom_attributes; i++) {
if (exr_header->custom_attributes[i].value) {
free(exr_header->custom_attributes[i].value);
}
}
if (exr_header->custom_attributes) {
free(exr_header->custom_attributes);
}
EXRSetNameAttr(exr_header, NULL);
return TINYEXR_SUCCESS;
}
void EXRSetNameAttr(EXRHeader* exr_header, const char* name) {
if (exr_header == NULL) {
return;
}
memset(exr_header->name, 0, 256);
if (name != NULL) {
size_t len = std::min(strlen(name), (size_t)255);
if (len) {
memcpy(exr_header->name, name, len);
}
}
}
int EXRNumLevels(const EXRImage* exr_image) {
if (exr_image == NULL) return 0;
if (exr_image->images) return 1; // scanlines
int levels = 1;
const EXRImage* level_image = exr_image;
while ((level_image = level_image->next_level)) ++levels;
return levels;
}
int FreeEXRImage(EXRImage *exr_image) {
if (exr_image == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (exr_image->next_level) {
FreeEXRImage(exr_image->next_level);
delete exr_image->next_level;
}
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->images && exr_image->images[i]) {
free(exr_image->images[i]);
}
}
if (exr_image->images) {
free(exr_image->images);
}
if (exr_image->tiles) {
for (int tid = 0; tid < exr_image->num_tiles; tid++) {
for (int i = 0; i < exr_image->num_channels; i++) {
if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) {
free(exr_image->tiles[tid].images[i]);
}
}
if (exr_image->tiles[tid].images) {
free(exr_image->tiles[tid].images);
}
}
free(exr_image->tiles);
}
return TINYEXR_SUCCESS;
}
int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_header == NULL || exr_version == NULL || filename == NULL) {
tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile",
err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_INVALID_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("fread() error on " + std::string(filename),
err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize,
err);
}
int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers,
int *num_headers,
const EXRVersion *exr_version,
const unsigned char *memory, size_t size,
const char **err) {
if (memory == NULL || exr_headers == NULL || num_headers == NULL ||
exr_version == NULL) {
// Invalid argument
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromMemory", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
tinyexr::SetErrorMessage("Data size too short", err);
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory + tinyexr::kEXRVersionSize;
size_t marker_size = size - tinyexr::kEXRVersionSize;
std::vector<tinyexr::HeaderInfo> infos;
for (;;) {
tinyexr::HeaderInfo info;
info.clear();
std::string err_str;
bool empty_header = false;
int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str,
marker, marker_size);
if (ret != TINYEXR_SUCCESS) {
tinyexr::SetErrorMessage(err_str, err);
return ret;
}
if (empty_header) {
marker += 1; // skip '\0'
break;
}
// `chunkCount` must exist in the header.
if (info.chunk_count == 0) {
tinyexr::SetErrorMessage(
"`chunkCount' attribute is not found in the header.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
infos.push_back(info);
// move to next header.
marker += info.header_len;
size -= info.header_len;
}
// allocate memory for EXRHeader and create array of EXRHeader pointers.
(*exr_headers) =
static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size()));
for (size_t i = 0; i < infos.size(); i++) {
EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader)));
memset(exr_header, 0, sizeof(EXRHeader));
ConvertHeader(exr_header, infos[i]);
exr_header->multipart = exr_version->multipart ? 1 : 0;
(*exr_headers)[i] = exr_header;
}
(*num_headers) = static_cast<int>(infos.size());
return TINYEXR_SUCCESS;
}
int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers,
const EXRVersion *exr_version,
const char *filename, const char **err) {
if (exr_headers == NULL || num_headers == NULL || exr_version == NULL ||
filename == NULL) {
tinyexr::SetErrorMessage(
"Invalid argument for ParseEXRMultipartHeaderFromFile()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_INVALID_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
if (ret != filesize) {
tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err);
return TINYEXR_ERROR_INVALID_FILE;
}
}
return ParseEXRMultipartHeaderFromMemory(
exr_headers, num_headers, exr_version, &buf.at(0), filesize, err);
}
int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory,
size_t size) {
if (version == NULL || memory == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
if (size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_DATA;
}
const unsigned char *marker = memory;
// Header check.
{
const char header[] = { 0x76, 0x2f, 0x31, 0x01 };
if (memcmp(marker, header, 4) != 0) {
return TINYEXR_ERROR_INVALID_MAGIC_NUMBER;
}
marker += 4;
}
version->tiled = false;
version->long_name = false;
version->non_image = false;
version->multipart = false;
// Parse version header.
{
// must be 2
if (marker[0] != 2) {
return TINYEXR_ERROR_INVALID_EXR_VERSION;
}
if (version == NULL) {
return TINYEXR_SUCCESS; // May OK
}
version->version = 2;
if (marker[1] & 0x2) { // 9th bit
version->tiled = true;
}
if (marker[1] & 0x4) { // 10th bit
version->long_name = true;
}
if (marker[1] & 0x8) { // 11th bit
version->non_image = true; // (deep image)
}
if (marker[1] & 0x10) { // 12th bit
version->multipart = true;
}
}
return TINYEXR_SUCCESS;
}
int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) {
if (filename == NULL) {
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t err = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (err != 0) {
// TODO(syoyo): return wfopen_s erro code
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t file_size;
// Compute size
fseek(fp, 0, SEEK_END);
file_size = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
if (file_size < tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
unsigned char buf[tinyexr::kEXRVersionSize];
size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp);
fclose(fp);
if (ret != tinyexr::kEXRVersionSize) {
return TINYEXR_ERROR_INVALID_FILE;
}
return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize);
}
int LoadEXRMultipartImageFromMemory(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts,
const unsigned char *memory,
const size_t size, const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0 ||
memory == NULL || (size <= tinyexr::kEXRVersionSize)) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromMemory()", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
// compute total header size.
size_t total_header_size = 0;
for (unsigned int i = 0; i < num_parts; i++) {
if (exr_headers[i]->header_len == 0) {
tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
total_header_size += exr_headers[i]->header_len;
}
const char *marker = reinterpret_cast<const char *>(
memory + total_header_size + 4 +
4); // +8 for magic number and version header.
marker += 1; // Skip empty header.
// NOTE 1:
// In multipart image, There is 'part number' before chunk data.
// 4 byte : part number
// 4+ : chunk
//
// NOTE 2:
// EXR spec says 'part number' is 'unsigned long' but actually this is
// 'unsigned int(4 bytes)' in OpenEXR implementation...
// http://www.openexr.com/openexrfilelayout.pdf
// Load chunk offset table.
std::vector<tinyexr::OffsetData> chunk_offset_table_list;
chunk_offset_table_list.reserve(num_parts);
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
chunk_offset_table_list.resize(chunk_offset_table_list.size() + 1);
tinyexr::OffsetData& offset_data = chunk_offset_table_list.back();
if (!exr_headers[i]->tiled || exr_headers[i]->tile_level_mode == TINYEXR_TILE_ONE_LEVEL) {
tinyexr::InitSingleResolutionOffsets(offset_data, exr_headers[i]->chunk_count);
std::vector<tinyexr::tinyexr_uint64>& offset_table = offset_data.offsets[0][0];
for (size_t c = 0; c < offset_table.size(); c++) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, 8);
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
offset_table[c] = offset + 4; // +4 to skip 'part number'
marker += 8;
}
}
else {
{
std::vector<int> num_x_tiles, num_y_tiles;
tinyexr::PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]);
int num_blocks = InitTileOffsets(offset_data, exr_headers[i], num_x_tiles, num_y_tiles);
if (num_blocks != exr_headers[i]->chunk_count) {
tinyexr::SetErrorMessage("Invalid offset table size.", err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) {
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) {
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
tinyexr::tinyexr_uint64 offset;
memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64));
tinyexr::swap8(&offset);
if (offset >= size) {
tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
offset_data.offsets[l][dy][dx] = offset + 4; // +4 to skip 'part number'
marker += sizeof(tinyexr::tinyexr_uint64); // = 8
}
}
}
}
}
// Decode image.
for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) {
tinyexr::OffsetData &offset_data = chunk_offset_table_list[i];
// First check 'part number' is identitical to 'i'
for (unsigned int l = 0; l < offset_data.offsets.size(); ++l)
for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy)
for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) {
const unsigned char *part_number_addr =
memory + offset_data.offsets[l][dy][dx] - 4; // -4 to move to 'part number' field.
unsigned int part_no;
memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4
tinyexr::swap4(&part_no);
if (part_no != i) {
tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.",
err);
return TINYEXR_ERROR_INVALID_DATA;
}
}
std::string e;
int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_data,
memory, size, &e);
if (ret != TINYEXR_SUCCESS) {
if (!e.empty()) {
tinyexr::SetErrorMessage(e, err);
}
return ret;
}
}
return TINYEXR_SUCCESS;
}
int LoadEXRMultipartImageFromFile(EXRImage *exr_images,
const EXRHeader **exr_headers,
unsigned int num_parts, const char *filename,
const char **err) {
if (exr_images == NULL || exr_headers == NULL || num_parts == 0) {
tinyexr::SetErrorMessage(
"Invalid argument for LoadEXRMultipartImageFromFile", err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
FILE *fp = NULL;
#ifdef _WIN32
#if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang
errno_t errcode =
_wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb");
if (errcode != 0) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
#else
// Unknown compiler
fp = fopen(filename, "rb");
#endif
#else
fp = fopen(filename, "rb");
#endif
if (!fp) {
tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err);
return TINYEXR_ERROR_CANT_OPEN_FILE;
}
size_t filesize;
// Compute size
fseek(fp, 0, SEEK_END);
filesize = static_cast<size_t>(ftell(fp));
fseek(fp, 0, SEEK_SET);
std::vector<unsigned char> buf(filesize); // @todo { use mmap }
{
size_t ret;
ret = fread(&buf[0], 1, filesize, fp);
assert(ret == filesize);
fclose(fp);
(void)ret;
}
return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts,
&buf.at(0), filesize, err);
}
int SaveEXR(const float *data, int width, int height, int components,
const int save_as_fp16, const char *outfilename, const char **err) {
if ((components == 1) || components == 3 || components == 4) {
// OK
}
else {
std::stringstream ss;
ss << "Unsupported component value : " << components << std::endl;
tinyexr::SetErrorMessage(ss.str(), err);
return TINYEXR_ERROR_INVALID_ARGUMENT;
}
EXRHeader header;
InitEXRHeader(&header);
if ((width < 16) && (height < 16)) {
// No compression for small image.
header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE;
}
else {
header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP;
}
EXRImage image;
InitEXRImage(&image);
image.num_channels = components;
std::vector<float> images[4];
if (components == 1) {
images[0].resize(static_cast<size_t>(width * height));
memcpy(images[0].data(), data, sizeof(float) * size_t(width * height));
}
else {
images[0].resize(static_cast<size_t>(width * height));
images[1].resize(static_cast<size_t>(width * height));
images[2].resize(static_cast<size_t>(width * height));
images[3].resize(static_cast<size_t>(width * height));
// Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers
for (size_t i = 0; i < static_cast<size_t>(width * height); i++) {
images[0][i] = data[static_cast<size_t>(components) * i + 0];
images[1][i] = data[static_cast<size_t>(components) * i + 1];
images[2][i] = data[static_cast<size_t>(components) * i + 2];
if (components == 4) {
images[3][i] = data[static_cast<size_t>(components) * i + 3];
}
}
}
float *image_ptr[4] = { 0, 0, 0, 0 };
if (components == 4) {
image_ptr[0] = &(images[3].at(0)); // A
image_ptr[1] = &(images[2].at(0)); // B
image_ptr[2] = &(images[1].at(0)); // G
image_ptr[3] = &(images[0].at(0)); // R
}
else if (components == 3) {
image_ptr[0] = &(images[2].at(0)); // B
image_ptr[1] = &(images[1].at(0)); // G
image_ptr[2] = &(images[0].at(0)); // R
}
else if (components == 1) {
image_ptr[0] = &(images[0].at(0)); // A
}
image.images = reinterpret_cast<unsigned char **>(image_ptr);
image.width = width;
image.height = height;
header.num_channels = components;
header.channels = static_cast<EXRChannelInfo *>(malloc(
sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels)));
// Must be (A)BGR order, since most of EXR viewers expect this channel order.
if (components == 4) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
strncpy_s(header.channels[1].name, "B", 255);
strncpy_s(header.channels[2].name, "G", 255);
strncpy_s(header.channels[3].name, "R", 255);
#else
strncpy(header.channels[0].name, "A", 255);
strncpy(header.channels[1].name, "B", 255);
strncpy(header.channels[2].name, "G", 255);
strncpy(header.channels[3].name, "R", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
header.channels[1].name[strlen("B")] = '\0';
header.channels[2].name[strlen("G")] = '\0';
header.channels[3].name[strlen("R")] = '\0';
}
else if (components == 3) {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "B", 255);
strncpy_s(header.channels[1].name, "G", 255);
strncpy_s(header.channels[2].name, "R", 255);
#else
strncpy(header.channels[0].name, "B", 255);
strncpy(header.channels[1].name, "G", 255);
strncpy(header.channels[2].name, "R", 255);
#endif
header.channels[0].name[strlen("B")] = '\0';
header.channels[1].name[strlen("G")] = '\0';
header.channels[2].name[strlen("R")] = '\0';
}
else {
#ifdef _MSC_VER
strncpy_s(header.channels[0].name, "A", 255);
#else
strncpy(header.channels[0].name, "A", 255);
#endif
header.channels[0].name[strlen("A")] = '\0';
}
header.pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
header.requested_pixel_types = static_cast<int *>(
malloc(sizeof(int) * static_cast<size_t>(header.num_channels)));
for (int i = 0; i < header.num_channels; i++) {
header.pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image
if (save_as_fp16 > 0) {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format
}
else {
header.requested_pixel_types[i] =
TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e.
// no precision reduction)
}
}
int ret = SaveEXRImageToFile(&image, &header, outfilename, err);
if (ret != TINYEXR_SUCCESS) {
return ret;
}
free(header.channels);
free(header.pixel_types);
free(header.requested_pixel_types);
return ret;
}
#ifdef __clang__
// zero-as-null-ppinter-constant
#pragma clang diagnostic pop
#endif
#endif // TINYEXR_IMPLEMENTATION_DEFINED
#endif // TINYEXR_IMPLEMENTATION
|
pzlange.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
#include <plasma_core_blas.h>
#define A(m, n) (plasma_complex64_t*)plasma_tile_addr(A, m, n)
/***************************************************************************//**
* Parallel tile calculation of max, one, infinity or Frobenius matrix norm
* for a general matrix.
******************************************************************************/
void plasma_pzlange(plasma_enum_t norm,
plasma_desc_t A, double *work, double *value,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Return if failed sequence.
if (sequence->status != PlasmaSuccess)
return;
switch (norm) {
double stub;
double *workspace;
double *scale;
double *sumsq;
//================
// PlasmaMaxNorm
//================
case PlasmaMaxNorm:
for (int m = 0; m < A.mt; m++) {
int mvam = plasma_tile_mview(A, m);
int ldam = plasma_tile_mmain(A, m);
for (int n = 0; n < A.nt; n++) {
int nvan = plasma_tile_nview(A, n);
plasma_core_omp_zlange(PlasmaMaxNorm,
mvam, nvan,
A(m, n), ldam,
&stub, &work[A.mt*n+m],
sequence, request);
}
}
#pragma omp taskwait
plasma_core_omp_dlange(PlasmaMaxNorm,
A.mt, A.nt,
work, A.mt,
&stub, value,
sequence, request);
break;
//================
// PlasmaOneNorm
//================
case PlasmaOneNorm:
for (int m = 0; m < A.mt; m++) {
int mvam = plasma_tile_mview(A, m);
int ldam = plasma_tile_mmain(A, m);
for (int n = 0; n < A.nt; n++) {
int nvan = plasma_tile_nview(A, n);
plasma_core_omp_zlange_aux(PlasmaOneNorm,
mvam, nvan,
A(m, n), ldam,
&work[A.n*m+n*A.nb],
sequence, request);
}
}
#pragma omp taskwait
workspace = work + A.mt*A.n;
plasma_core_omp_dlange(PlasmaInfNorm,
A.n, A.mt,
work, A.n,
workspace, value,
sequence, request);
break;
//================
// PlasmaInfNorm
//================
case PlasmaInfNorm:
for (int m = 0; m < A.mt; m++) {
int mvam = plasma_tile_mview(A, m);
int ldam = plasma_tile_mmain(A, m);
for (int n = 0; n < A.nt; n++) {
int nvan = plasma_tile_nview(A, n);
plasma_core_omp_zlange_aux(PlasmaInfNorm,
mvam, nvan,
A(m, n), ldam,
&work[A.m*n+m*A.mb],
sequence, request);
}
}
#pragma omp taskwait
workspace = work + A.nt*A.m;
plasma_core_omp_dlange(PlasmaInfNorm,
A.m, A.nt,
work, A.m,
workspace, value,
sequence, request);
break;
//======================
// PlasmaFrobeniusNorm
//======================
case PlasmaFrobeniusNorm:
scale = work;
sumsq = work + A.mt*A.nt;
for (int m = 0; m < A.mt; m++) {
int mvam = plasma_tile_mview(A, m);
int ldam = plasma_tile_mmain(A, m);
for (int n = 0; n < A.nt; n++) {
int nvan = plasma_tile_nview(A, n);
plasma_core_omp_zgessq(mvam, nvan,
A(m, n), ldam,
&scale[A.mt*n+m], &sumsq[A.mt*n+m],
sequence, request);
}
}
#pragma omp taskwait
plasma_core_omp_dgessq_aux(A.mt*A.nt,
scale, sumsq,
value,
sequence, request);
break;
}
}
|
test_vadd_4.c | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#ifdef __cilk
#include <cilk/cilk.h>
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
#define RESTRICT
double vdiff(int n, const float * RESTRICT a, const float * RESTRICT b)
{
double d = 0.0;
for(int i = 0; i < n; i++) {
d += (a[i] - b[i]);
}
return d;
}
void vadd0(int n, float * RESTRICT a, float * RESTRICT b, float * RESTRICT c)
{
for(int i = 0; i < n; i++)
c[i] = a[i] + b[i];
}
void vadd1(int n, float * RESTRICT a, float * RESTRICT b, float * RESTRICT c)
{
#if defined(_OPENMP) && (_OPENMP >= 201307)
#pragma omp parallel for simd
#elif defined(_OPENMP)
#warning No OpenMP simd support!
#pragma omp parallel for
#else
#warning No OpenMP support!
#endif
for(int i = 0; i < n; i++)
c[i] = a[i] + b[i];
}
void vadd2(int n, float * RESTRICT a, float * RESTRICT b, float * RESTRICT c)
{
#if defined(_OPENMP) && (_OPENMP >= 201307)
//#pragma omp target teams distribute map(to:n,a[0:n],b[0:n]) map(from:c[0:n])
#pragma omp target map(to:n,a[0:n],b[0:n]) map(from:c[0:n])
#pragma omp parallel for simd
#else
#warning No OpenMP target/simd support!
#pragma omp parallel for
#endif
for(int i = 0; i < n; i++)
c[i] = a[i] + b[i];
}
void vadd3(int n, float * RESTRICT a, float * RESTRICT b, float * RESTRICT c)
{
#ifdef __cilk
_Cilk_for(int i = 0; i < n; i++)
#else
#warning No Cilk support. Using sequential for loop.
for(int i = 0; i < n; i++)
#endif
c[i] = a[i] + b[i];
}
int main(int argc, char * argv[])
{
int n = (argc > 1 ) ? atoi(argv[1]) : 1000;
float * x = calloc(n,sizeof(float)); assert(x !=NULL);
float * y = calloc(n,sizeof(float)); assert(y !=NULL);
float * z0 = calloc(n,sizeof(float)); assert(z0!=NULL);
float * z1 = calloc(n,sizeof(float)); assert(z1!=NULL);
float * z2 = calloc(n,sizeof(float)); assert(z2!=NULL);
float * z3 = calloc(n,sizeof(float)); assert(z3!=NULL);
#if 0 && defined(_OPENMP) && (_OPENMP >= 201307)
int nthrd = omp_get_max_threads();
int ndevs = omp_get_num_devices();
printf("OpenMP threads = %d devices = %d\n", nthrd, ndevs);
#endif
for (int i=0; i<n; i++) {
x[i] = (float)i;
}
for (int i=0; i<n; i++) {
y[i] = (float)i;
}
for (int iter=0; iter<10; iter++) {
double t0 = omp_get_wtime();
vadd0(n,x,y,z0);
double t1 = omp_get_wtime();
vadd1(n,x,y,z1);
double t2 = omp_get_wtime();
vadd2(n,x,y,z2);
double t3 = omp_get_wtime();
vadd3(n,x,y,z3);
double t4 = omp_get_wtime();
printf("%20s time = %lf \n", "for", t1-t0);
printf("%20s time = %lf (error=%lf) \n", "OpenMP for", t2-t1, vdiff(n,z0,z1));
printf("%20s time = %lf (error=%lf) \n", "OpenMP offload for", t3-t2, vdiff(n,z0,z2));
#ifdef __cilk
printf("%20s time = %lf (error=%lf) \n", "_Cilk_for", t4-t3, vdiff(n,z0,z3));
#endif
/* prevent compiler from optimizing away anything */
double junk = t0+t1+t2+t3+t4;
for (int i=0; i<n; i++) {
junk += z0[i] + z1[i] + z2[i] + z3[i];
}
printf("junk=%lf\n", junk);
}
free(z3);
free(z2);
free(z1);
free(z0);
free(y);
free(x);
printf("Success\n");
return 0;
}
|
pr79431.c | /* PR c/79431 */
void
foo (void)
{
int a;
#pragma omp declare target (a)
}
|
example_09-StructOfArrays-CellLinkedList-OuterLoop-LoadBalanced.c | /*
* SPDX-License-Identifier: BSD-3-Clause
*
* example_09-StructOfArrays-CellLinkedList-OuterLoop-LoadBalanced.c :
* Example of SPH Density Calculation using
* fast neighbor search the main density loop via
* Cell Linked List method, Struct of Arrays (SoA)
* data layout, OpenMP parallelization at the
* cell-pair level, SIMD directives in the kernel
* and in the inner-most loop. It also implements
* load balancing by moving the parallelism from
* iterating over cells to iterate over cell pairs.
*
* (C) Copyright 2021 José Hugo Elsas
* Author: José Hugo Elsas <jhelsas@gmail.com>
*
* Command Line Options:
* -runs <int> : Set the number of repetitions (runs) for
* calculating the density. The value of
* the density is based on the last
* iteration.
* Default value: 1
* -run_seed <int>: Flag to set an alternative seed use for
* for the PRNG. Instead of feeding seed
* to the PRNG directly, it feeds
* seed + iteration, as to generate different
* configurations for each iteration.
* Default value: 0 - (possible 0/1)
* -seed <int>: Set the seed to use for the SPH particles
* uniform position generation in the box
* Default value: 123123123
*
* -N <int>: Set the number of SPH particles to be used
* Default value: 1e5 = 100,000
* -h <float>: Set the value of the smoothing kernel
* parameter h, which corresponds to half
* of the support of the kernel.
* Default value: 0.05
*
* -Nx <int>: Set the number of Cells in the X direction
* Default value: 10
* -Ny <int>: Set the number of Cells in the Y direction
* Default value: 10
* -Nz <int>: Set the number of Cells in the Z direction
* Default value: 10
*
* -Xmin <float>: Set the lower bound in the X direction for
* the Cell Linked List box
* Default value: 0.0
* -Ymin <float>: Set the lower bound in the Y direction for
* the Cell Linked List box
* Default value: 0.0
* -Ymin <float>: Set the lower bound in the Z direction for
* the Cell Linked List box
* Default value: 0.0
*
* -Xmax <float>: Set the lower bound in the X direction for
* the Cell Linked List box
* Default value: 1.0
* -Ymax <float>: Set the lower bound in the Y direction for
* the Cell Linked List box
* Default value: 1.0
* -Zmax <float>: Set the lower bound in the Z direction for
* the Cell Linked List box
* Default value: 1.0
*/
#include <math.h>
#include <ctype.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <limits.h>
#include <unistd.h>
#include <stdbool.h>
#include <sys/time.h>
#include <inttypes.h>
#include <omp.h>
#include <gsl/gsl_math.h>
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#include <gsl/gsl_heapsort.h>
#include "sph_data_types.h"
#include "sph_linked_list.h"
#include "sph_utils.h"
#ifndef M_PI
#define M_PI (3.14159265358979323846)
#endif
#define COMPUTE_BLOCKS 5
int main_loop(int run, bool run_seed, int64_t N, double h, long int seed,
void *swap_arr, linkedListBox *box, SPHparticle *lsph, double *times);
int compute_density_3d_load_ballanced(int N, double h, SPHparticle *lsph, linkedListBox *box);
int compute_density_3d_chunk_noomp(int64_t node_begin, int64_t node_end,
int64_t nb_begin, int64_t nb_end,double h,
double* restrict x, double* restrict y,
double* restrict z, double* restrict nu,
double* restrict rho);
double w_bspline_3d_constant(double h);
#pragma omp declare simd
double w_bspline_3d_simd(double q);
int main(int argc, char **argv){
bool run_seed = false; // By default the behavior is is to use the same seed
int runs = 1,err; // it only runs once
long int seed = 123123123; // The default seed is 123123123
int64_t N = 100000; // The default number of particles is N = 1e5 = 100,000
double h=0.05; // The default kernel smoothing length is h = 0.05
linkedListBox *box; // Uninitialized Box containing the cells for the cell linked list method
SPHparticle *lsph; // Uninitialized array of SPH particles
box = (linkedListBox*)malloc(1*sizeof(linkedListBox)); // Create a box representing the entire 3d domain
// allow for command line customization of the run
arg_parse(argc,argv,&N,&h,&seed,&runs,&run_seed,box); // Parse the command line options
// line arguments and override default values
err = SPHparticle_SoA_malloc(N,&lsph);
if(err)
fprintf(stderr,"error in SPHparticle_SoA_malloc\n");
void *swap_arr = malloc(N*sizeof(double));
double times[runs*COMPUTE_BLOCKS];
for(int run=0;run<runs;run+=1)
main_loop(run,run_seed,N,h,seed,swap_arr,box,lsph,times);
bool is_cll = true;
const char *prefix = "ex09,cll,SoA,outer,simd,loadBallance";
print_time_stats(prefix,is_cll,N,h,seed,runs,lsph,box,times);
print_sph_particles_density(prefix,is_cll,N,h,seed,runs,lsph,box);
SPHparticleSOA_safe_free(N,&lsph);
safe_free_box(box);
free(swap_arr);
return 0;
}
/*
* Function main_loop:
* Runs the main loop of the program, including the particle array generation,
* density calculation and the timings annotations.
*
* Arguments:
* run <int> : index (or value) or the present iteration
* run_seed <bool> : boolean defining whether to use run index for seed or not
* N <int> : Number of SPH particles to be used in the run
* h <double> : Smoothing Length for the Smoothing Kernel w_bspline
* seed <long int> : seed for GSL PRNG generator to generate particle positions
* box <linkedListBox> : Box of linked list cells, encapsulating the 3d domain
* lsph <SPHparticle> : Array (pointer) of SPH particles to be updated
* times <double> : Array to store the computation timings to be updated
* Returns:
* 0 : error code returned
* lsph <SPHparticle> : SPH particle array is updated in the rho field by reference
* times <double> : Times is updated by reference
*/
int main_loop(int run, bool run_seed, int64_t N, double h, long int seed,
void *swap_arr, linkedListBox *box, SPHparticle *lsph, double *times)
{
int err;
if(run_seed)
err = gen_unif_rdn_pos_box(N,seed+run,box,lsph);
else
err = gen_unif_rdn_pos_box(N,seed,box,lsph);
if(err)
fprintf(stderr,"error in gen_unif_rdn_pos\n");
// ------------------------------------------------------ //
double t0,t1,t2,t3,t4,t5;
t0 = omp_get_wtime();
err = compute_hash_MC3D(N,lsph,box); // Compute Morton Z 3D hash based on the
if(err) // cell index for each of the X, Y and Z
fprintf(stderr,"error in compute_hash_MC3D\n"); // directions, in which a given particle reside
t1 = omp_get_wtime();
qsort(lsph->hash,N,2*sizeof(int64_t),compare_int64_t); // Sort the Particle Hash Hashes, getting the shuffled
// index necessary to re-shuffle the remaining arrays
t2 = omp_get_wtime();
err = reorder_lsph_SoA(N,lsph,swap_arr); // Reorder all arrays according to the sorted hash,
if(err) // As to have a quick way to retrieve a cell
fprintf(stderr,"error in reorder_lsph_SoA\n"); // given its hash.
t3 = omp_get_wtime();
err = setup_interval_hashtables(N,lsph,box); // Annotate the begining and end of each cell
if(err) // on the cell linked list method for fast
fprintf(stderr,"error in setup_interval_hashtables\n"); // neighbor search
t4 = omp_get_wtime();
err = compute_density_3d_load_ballanced(N,h,lsph,box); // Compute the density of the particles based
if(err) // on the cell linked list method for fast
fprintf(stderr,"error in compute_density\n"); // neighbor search
// ------------------------------------------------------ //
t5 = omp_get_wtime();
times[COMPUTE_BLOCKS*run+0] = t1-t0; // Time for compute morton Z 3d hash
times[COMPUTE_BLOCKS*run+1] = t2-t1; // Time for sorting the particles' hashes
times[COMPUTE_BLOCKS*run+2] = t3-t2; // Time for reordering all other arrays accordingly
times[COMPUTE_BLOCKS*run+3] = t4-t3; // Time for setting up the interval hash tables
times[COMPUTE_BLOCKS*run+4] = t5-t4; // Time for computing the SPH particle densities
return 0;
}
/*
* Function compute_density_3d_load_ballanced:
* Computes the SPH density from the particles using cell linked list with
* vectorization at the compute_density_3d_chunk level, but the parallelization
* done at the level of the outer-most loop of the compute_density_3d_cll_outerOmp
* function, not at the chunk level.
*
* The parallelization is done at the level of cell pair instead of cells, with
* the indexes for the cell pairs pre-computed before parallelization.
*
* Arguments:
* N <int> : Number of SPH particles to be used in the run
* h <double> : Smoothing Length for the Smoothing Kernel w_bspline
* lsph <SPHparticle> : Array (pointer) of SPH particles to be updated
* Returns:
* 0 : error code returned
* lsph <SPHparticle> : SPH particle array is updated in the rho field by reference
*/
int compute_density_3d_load_ballanced(int N, double h, SPHparticle *lsph, linkedListBox *box){
int64_t *node_begin,*node_end,*nb_begin,*nb_end; // Define the arrays for cell boundaries
int64_t max_cell_pair_count = 0; // and the number of cell pairs
max_cell_pair_count = count_box_pairs(box); // compute the number of cell pairs
node_begin = (int64_t*)malloc(max_cell_pair_count*sizeof(int64_t)); // allocate space for node_begin
node_end = (int64_t*)malloc(max_cell_pair_count*sizeof(int64_t)); // allocate space for node_end
nb_begin = (int64_t*)malloc(max_cell_pair_count*sizeof(int64_t)); // allocate space for nb_begin
nb_end = (int64_t*)malloc(max_cell_pair_count*sizeof(int64_t)); // allocate space for nb_end
setup_box_pairs(box,node_begin,node_end,nb_begin,nb_end); // set the values for cell pairs
memset(lsph->rho,(int)0,N*sizeof(double)); // Pre-initialize the density to zero
#pragma omp parallel for // execute in parallel
for(size_t i=0;i<max_cell_pair_count;i+=1){ // iterate over cell pairs' array
compute_density_3d_chunk_noomp(node_begin[i],node_end[i], // compute the cell pair contribution
nb_begin[i],nb_end[i],
h,lsph->x,lsph->y,lsph->z,
lsph->nu,lsph->rho);
}
free(node_begin);
free(node_end);
free(nb_begin);
free(nb_end);
return 0;
}
/*
* Function compute_density_3d_noomp:
* Computes the SPH density contribution to the node_ cell from the nb_ cell.
* Vectorization in the inner-most loop, but no parallelization.
*
* Arguments:
* node_begin <int64_t> : Begin index for the cell the contribution is made to
* node_end <int64_t> : End index for the cell the contribution is made to
* nb_begin <int64_t> : Begin index for the cell the contribution is made from
* nb_end <int64_t> : End index for the cell the contribution is made from
* h <double> : Smoothing Length for the Smoothing Kernel w_bspline
* x <double*> : Array of particles' X positions
* y <double*> : Array of particles' Y positions
* z <double*> : Array of particles' Z positions
* nu <double*> : Array of particles' density weights (i.e. masses)
* Returns:
* 0 : error code returned
* rho <double*> : Array of particles' densities
*/
int compute_density_3d_chunk_noomp(int64_t node_begin, int64_t node_end,
int64_t nb_begin, int64_t nb_end,double h,
double* restrict x, double* restrict y,
double* restrict z, double* restrict nu,
double* restrict rho){
const double inv_h = 1./h;
const double kernel_constant = w_bspline_3d_constant(h);
for(int64_t ii=node_begin;ii<node_end;ii+=1){ // Iterate over the ii index of the chunk
double xii = x[ii]; // Load the X component of the ii particle position
double yii = y[ii]; // Load the Y component of the ii particle position
double zii = z[ii]; // Load the Z component of the ii particle position
double rhoii = 0.0; // Initialize the chunk contribution to density
#pragma omp simd // Hint at the compiler to vectorize
for(int64_t jj=nb_begin;jj<nb_end;jj+=1){ // Iterate over the each other particle in jj loop
double q = 0.; // Initialize the distance
double xij = xii-x[jj]; // Load and subtract jj particle's X position component
double yij = yii-y[jj]; // Load and subtract jj particle's Y position component
double zij = zii-z[jj]; // Load and subtract jj particle's Z position component
q += xij*xij; // Add the jj contribution to the ii distance in X
q += yij*yij; // Add the jj contribution to the ii distance in X
q += zij*zij; // Add the jj contribution to the ii distance in X
q = sqrt(q)*inv_h; // Sqrt to compute the distance
rhoii += nu[jj]*w_bspline_3d_simd(q); // Add up the contribution from the jj particle
} // to the intermediary density and then
rho[ii] += rhoii*kernel_constant; // add the intermediary density to the full density
}
return 0;
}
/*
* Function w_bspline_3d_constant:
* Returns the 3d normalization constant for the cubic b-spline SPH smoothing kernel
*
* Arguments:
* h <double> : Smoothing Length for the Smoothing Kernel w_bspline
* Returns:
* 3d bspline normalization density <double>
*/
double w_bspline_3d_constant(double h){
return 3./(2.*M_PI*h*h*h); // 3d normalization value for the b-spline kernel
}
/*
* Function w_bspline_3d_simd:
* Returns the un-normalized value of the cubic b-spline SPH smoothing kernel
*
* Arguments:
* q <double> : Distance between particles normalized by the smoothing length h
* Returns:
* wq <double> : Unnormalized value of the kernel
*
* Observation:
* Why not else if(q<2.)?
* Because if you use "else if", the compiler refuses to vectorize,
* This results in a large slowdown, as of 2.5x slower for example_04
*/
#pragma omp declare simd
double w_bspline_3d_simd(double q){
double wq=0;
double wq1 = (0.6666666666666666 - q*q + 0.5*q*q*q); // The first polynomial of the spline
double wq2 = 0.16666666666666666*(2.-q)*(2.-q)*(2.-q); // The second polynomial of the spline
if(q<2.) // If the distance is below 2
wq = wq2; // Use the 2nd polynomial for the spline
if(q<1.) // If the distance is below 1
wq = wq1; // Use the 1st polynomial for the spline
return wq; // return which ever value corresponds to the distance
} |
spmm.h | /*!
* Copyright (c) 2020 by Contributors
* \file array/cpu/spmm.h
* \brief SPMM CPU kernel function header.
*/
#ifndef DGL_ARRAY_CPU_SPMM_H_
#define DGL_ARRAY_CPU_SPMM_H_
#include <dgl/array.h>
#include <dgl/bcast.h>
#include <limits>
#include <algorithm>
namespace dgl {
namespace aten {
namespace cpu {
/*!
* \brief CPU kernel of SpMM on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes.
*/
template <typename IdType, typename DType, typename Op>
void SpMMSumCsr(
const BcastOff& bcast,
const CSRMatrix& csr,
NDArray ufeat, NDArray efeat,
NDArray out) {
const bool has_idx = !IsNullArray(csr.data);
const IdType* indptr = csr.indptr.Ptr<IdType>();
const IdType* indices = csr.indices.Ptr<IdType>();
const IdType* edges = csr.data.Ptr<IdType>();
const DType* X = ufeat.Ptr<DType>();
const DType* W = efeat.Ptr<DType>();
int64_t dim = bcast.out_len,
lhs_dim = bcast.lhs_len,
rhs_dim = bcast.rhs_len;
DType* O = out.Ptr<DType>();
#pragma omp parallel for
for (IdType rid = 0; rid < csr.num_rows; ++rid) {
const IdType row_start = indptr[rid], row_end = indptr[rid + 1];
DType* out_off = O + rid * dim;
for (int64_t k = 0; k < dim; ++k) {
DType accum = 0;
for (IdType j = row_start; j < row_end; ++j) {
const IdType cid = indices[j];
const IdType eid = has_idx? edges[j] : j;
const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
const DType* lhs_off = Op::use_lhs? X + cid * lhs_dim + lhs_add : nullptr;
const DType* rhs_off = Op::use_rhs? W + eid * rhs_dim + rhs_add : nullptr;
accum += Op::Call(lhs_off, rhs_off);
}
out_off[k] = accum;
}
}
}
/*!
* \brief CPU kernel of SpMM on Coo format.
* \param bcast Broadcast information.
* \param coo The Coo matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes. To avoid possible data hazard,
* we use atomic operators in the reduction phase.
*/
template <typename IdType, typename DType, typename Op>
void SpMMSumCoo(
const BcastOff& bcast,
const COOMatrix& coo,
NDArray ufeat, NDArray efeat,
NDArray out) {
const bool has_idx = !IsNullArray(coo.data);
const IdType* row = coo.row.Ptr<IdType>();
const IdType* col = coo.col.Ptr<IdType>();
const IdType* edges = coo.data.Ptr<IdType>();
const DType* X = ufeat.Ptr<DType>();
const DType* W = efeat.Ptr<DType>();
int64_t dim = bcast.out_len,
lhs_dim = bcast.lhs_len,
rhs_dim = bcast.rhs_len;
DType* O = out.Ptr<DType>();
const int64_t nnz = coo.row->shape[0];
// fill zero elements
memset(O, 0, out.GetSize());
// spmm
#pragma omp parallel for
for (IdType i = 0; i < nnz; ++i) {
const IdType rid = row[i];
const IdType cid = col[i];
const IdType eid = has_idx? edges[i] : i;
DType* out_off = O + cid * dim;
for (int64_t k = 0; k < dim; ++k) {
const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
const DType* lhs_off = Op::use_lhs? X + rid * lhs_dim + lhs_add : nullptr;
const DType* rhs_off = Op::use_rhs? W + eid * rhs_dim + rhs_add : nullptr;
const DType val = Op::Call(lhs_off, rhs_off);
#pragma omp atomic
out_off[k] += val;
}
}
}
/*!
* \brief CPU kernel of SpMM-Min/Max on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes, which refers the source node indices
* correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max reducer.
* \param arge Arg-Min/Max on edges. which refers the source node indices
* correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max reducer.
* \note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes.
*/
template <typename IdType, typename DType, typename Op, typename Cmp>
void SpMMCmpCsr(
const BcastOff& bcast,
const CSRMatrix& csr,
NDArray ufeat, NDArray efeat,
NDArray out, NDArray argu, NDArray arge) {
const bool has_idx = !IsNullArray(csr.data);
const IdType* indptr = static_cast<IdType*>(csr.indptr->data);
const IdType* indices = static_cast<IdType*>(csr.indices->data);
const IdType* edges = has_idx ? static_cast<IdType*>(csr.data->data) : nullptr;
const DType* X = Op::use_lhs? static_cast<DType*>(ufeat->data) : nullptr;
const DType* W = Op::use_rhs? static_cast<DType*>(efeat->data) : nullptr;
const int64_t dim = bcast.out_len,
lhs_dim = bcast.lhs_len,
rhs_dim = bcast.rhs_len;
DType* O = static_cast<DType*>(out->data);
IdType* argX = Op::use_lhs? static_cast<IdType*>(argu->data) : nullptr;
IdType* argW = Op::use_rhs? static_cast<IdType*>(arge->data) : nullptr;
#pragma omp parallel for
for (IdType rid = 0; rid < csr.num_rows; ++rid) {
const IdType row_start = indptr[rid], row_end = indptr[rid + 1];
DType* out_off = O + rid * dim;
IdType* argx_off = argX + rid * dim;
IdType* argw_off = argW + rid * dim;
for (int64_t k = 0; k < dim; ++k) {
DType accum = Cmp::zero;
IdType ax = 0, aw = 0;
for (IdType j = row_start; j < row_end; ++j) {
const IdType cid = indices[j];
const IdType eid = has_idx? edges[j] : j;
const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
const DType* lhs_off = Op::use_lhs? X + cid * lhs_dim + lhs_add : nullptr;
const DType* rhs_off = Op::use_rhs? W + eid * rhs_dim + rhs_add : nullptr;
const DType val = Op::Call(lhs_off, rhs_off);
if (Cmp::Call(accum, val)) {
accum = val;
if (Op::use_lhs)
ax = cid;
if (Op::use_rhs)
aw = eid;
}
}
out_off[k] = accum;
if (Op::use_lhs)
argx_off[k] = ax;
if (Op::use_rhs)
argw_off[k] = aw;
}
}
}
/*!
* \brief CPU kernel of SpMM-Min/Max on Coo format.
* \param bcast Broadcast information.
* \param coo The Coo matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes, which refers the source node indices
* correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max reducer.
* \param arge Arg-Min/Max on edges. which refers the source node indices
* correspond to the minimum/maximum values of reduction result on
* destination nodes. It's useful in computing gradients of Min/Max reducer.
* \note it uses node parallel strategy, different threads are responsible
* for the computation of different nodes. To avoid possible data hazard,
* we use atomic operators in the reduction phase.
*/
template <typename IdType, typename DType, typename Op, typename Cmp>
void SpMMCmpCoo(
const BcastOff& bcast,
const COOMatrix& coo,
NDArray ufeat, NDArray efeat,
NDArray out, NDArray argu, NDArray arge) {
const bool has_idx = !IsNullArray(coo.data);
const IdType* row = static_cast<IdType*>(coo.row->data);
const IdType* col = static_cast<IdType*>(coo.col->data);
const IdType* edges = has_idx? static_cast<IdType*>(coo.data->data) : nullptr;
const DType* X = Op::use_lhs? static_cast<DType*>(ufeat->data) : nullptr;
const DType* W = Op::use_rhs? static_cast<DType*>(efeat->data) : nullptr;
const int64_t dim = bcast.out_len,
lhs_dim = bcast.lhs_len,
rhs_dim = bcast.rhs_len;
DType* O = static_cast<DType*>(out->data);
IdType* argX = Op::use_lhs? static_cast<IdType*>(argu->data) : nullptr;
IdType* argW = Op::use_rhs? static_cast<IdType*>(arge->data) : nullptr;
const int64_t nnz = coo.row->shape[0];
// fill zero elements
std::fill(O, O + out.NumElements(), Cmp::zero);
// spmm
#pragma omp parallel for
for (IdType i = 0; i < nnz; ++i) {
const IdType rid = row[i];
const IdType cid = col[i];
const IdType eid = has_idx? edges[i] : i;
DType* out_off = O + cid * dim;
IdType* argx_off = Op::use_lhs? argX + cid * dim : nullptr;
IdType* argw_off = Op::use_rhs? argW + cid * dim : nullptr;
for (int64_t k = 0; k < dim; ++k) {
const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k;
const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k;
const DType* lhs_off = Op::use_lhs? X + rid * lhs_dim + lhs_add : nullptr;
const DType* rhs_off = Op::use_rhs? W + eid * rhs_dim + rhs_add : nullptr;
const DType val = Op::Call(lhs_off, rhs_off);
#pragma omp critical
if (Cmp::Call(out_off[k], val)) {
out_off[k] = val;
if (Op::use_lhs)
argx_off[k] = rid;
if (Op::use_rhs)
argw_off[k] = eid;
}
}
}
}
namespace op {
//////////////////////////////// binary operators on CPU ////////////////////////////////
template <typename DType>
struct Add {
static constexpr bool use_lhs = true;
static constexpr bool use_rhs = true;
inline static DType Call(const DType* lhs_off, const DType* rhs_off) {
return *lhs_off + *rhs_off;
}
};
template <typename DType>
struct Sub {
static constexpr bool use_lhs = true;
static constexpr bool use_rhs = true;
inline static DType Call(const DType* lhs_off, const DType* rhs_off) {
return *lhs_off - *rhs_off;
}
};
template <typename DType>
struct Mul {
static constexpr bool use_lhs = true;
static constexpr bool use_rhs = true;
inline static DType Call(const DType* lhs_off, const DType* rhs_off) {
return *lhs_off * *rhs_off;
}
};
template <typename DType>
struct Div {
static constexpr bool use_lhs = true;
static constexpr bool use_rhs = true;
inline static DType Call(const DType* lhs_off, const DType* rhs_off) {
return *lhs_off / *rhs_off;
}
};
template <typename DType>
struct CopyLhs {
static constexpr bool use_lhs = true;
static constexpr bool use_rhs = false;
inline static DType Call(const DType* lhs_off, const DType* ) {
return *lhs_off;
}
};
template <typename DType>
struct CopyRhs {
static constexpr bool use_lhs = false;
static constexpr bool use_rhs = true;
inline static DType Call(const DType* , const DType* rhs_off) {
return *rhs_off;
}
};
//////////////////////////////// Reduce operators on CPU ////////////////////////////////
template <typename DType>
struct Max {
static constexpr DType zero = std::numeric_limits<DType>::lowest();
// return true if accum should be replaced
inline static DType Call(DType accum, DType val) {
return accum < val;
}
};
template <typename DType>
struct Min {
static constexpr DType zero = std::numeric_limits<DType>::max();
// return true if accum should be replaced
inline static DType Call(DType accum, DType val) {
return accum > val;
}
};
#define SWITCH_OP(op, Op, ...) \
do { \
if ((op) == "add") { \
typedef dgl::aten::cpu::op::Add<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "sub") { \
typedef dgl::aten::cpu::op::Sub<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "mul") { \
typedef dgl::aten::cpu::op::Mul<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "div") { \
typedef dgl::aten::cpu::op::Div<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "copy_u") { \
typedef dgl::aten::cpu::op::CopyLhs<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "copy_e") { \
typedef dgl::aten::cpu::op::CopyRhs<DType> Op; \
{ __VA_ARGS__ } \
} else { \
LOG(FATAL) << "Unsupported SpMM binary operator: " << op; \
} \
} while (0)
} // namespace op
} // namespace cpu
} // namespace aten
} // namespace dgl
#endif // DGL_ARRAY_CPU_SPMM_H_
|
GB_binop__lxor_bool.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_bool)
// A.*B function (eWiseMult): GB (_AemultB_01__lxor_bool)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_bool)
// A.*B function (eWiseMult): GB (_AemultB_03__lxor_bool)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_bool)
// A*D function (colscale): GB (_AxD__lxor_bool)
// D*A function (rowscale): GB (_DxB__lxor_bool)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_bool)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_bool)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_bool)
// C=scalar+B GB (_bind1st__lxor_bool)
// C=scalar+B' GB (_bind1st_tran__lxor_bool)
// C=A+scalar GB (_bind2nd__lxor_bool)
// C=A'+scalar GB (_bind2nd_tran__lxor_bool)
// C type: bool
// A type: bool
// B,b type: bool
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
bool aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
bool bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_BOOL || GxB_NO_LXOR_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lxor_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_bool)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_bool)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_bool)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_bool)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_bool)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lxor_bool)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lxor_bool)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_bool)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_bool)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
bool bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_bool)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
bool aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__lxor_bool)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
flow.c | #include "flow.h"
#include "../../comms.h"
#include "../../params.h"
#include "../flow_interface.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
// Solve a single timestep on the given mesh
void solve_hydro_2d(Mesh* mesh, int tt, double* pressure, double* density,
double* density_old, double* energy, double* velocity_x,
double* velocity_y, double* momentum_x, double* momentum_y,
double* Qxx, double* Qyy, double* mass_flux_x,
double* mass_flux_y, double* momentum_x_flux_x,
double* momentum_x_flux_y, double* momentum_y_flux_x,
double* momentum_y_flux_y, double* reduce_array) {
if (mesh->rank == MASTER) {
printf("Timestep: %.12e\n", mesh->dt);
}
equation_of_state(mesh->local_nx, mesh->local_ny, pressure, density, energy);
pressure_acceleration(mesh->local_nx, mesh->local_ny, mesh, mesh->dt,
momentum_x, momentum_y, velocity_x, velocity_y,
pressure, density, mesh->edgedx, mesh->edgedy,
mesh->celldx, mesh->celldy);
artificial_viscosity(mesh->local_nx, mesh->local_ny, mesh, mesh->dt, Qxx, Qyy,
velocity_x, velocity_y, momentum_x, momentum_y, density,
mesh->edgedx, mesh->edgedy, mesh->celldx, mesh->celldy);
shock_heating_and_work(mesh->local_nx, mesh->local_ny, mesh, mesh->dt_h,
energy, pressure, velocity_x, velocity_y, density, Qxx,
Qyy, mesh->celldx, mesh->celldy);
set_timestep(mesh->local_nx, mesh->local_ny, Qxx, Qyy, density, energy, mesh,
reduce_array, tt == 0, mesh->celldx, mesh->celldy);
// Perform advection
advect_mass_and_energy(mesh->local_nx, mesh->local_ny, mesh, tt, mesh->dt,
mesh->dt_h, density, energy, density_old, mass_flux_x,
mass_flux_y, momentum_x_flux_x, momentum_x_flux_y,
velocity_x, velocity_y, mesh->edgedx, mesh->edgedy,
mesh->celldx, mesh->celldy);
advect_momentum(mesh->local_nx, mesh->local_ny, tt, mesh, mesh->dt_h,
mesh->dt, velocity_x, velocity_y, momentum_x_flux_x,
momentum_x_flux_y, momentum_y_flux_x, momentum_y_flux_y,
momentum_x, momentum_y, density, mass_flux_x, mass_flux_y,
mesh->edgedx, mesh->edgedy, mesh->celldx, mesh->celldy);
}
// Calculate the pressure from GAMma law equation of state
void equation_of_state(const int nx, const int ny, double* pressure,
const double* density, const double* energy) {
START_PROFILING(&compute_profile);
#pragma omp parallel for
for (int ii = 0; ii < ny; ++ii) {
#pragma omp simd
for (int jj = 0; jj < nx; ++jj) {
// Only invoke simple GAMma law at the moment
pressure[(ii * nx + jj)] =
(GAM - 1.0) * density[(ii * nx + jj)] * energy[(ii * nx + jj)];
}
}
STOP_PROFILING(&compute_profile, __func__);
}
// Calculates the timestep from the current state
void set_timestep(const int nx, const int ny, double* Qxx, double* Qyy,
const double* density, const double* energy, Mesh* mesh,
double* reduce_array, const int first_step,
const double* celldx, const double* celldy) {
const int pad = mesh->pad;
double local_min_dt = mesh->max_dt;
START_PROFILING(&compute_profile);
// Check the minimum timestep from the sound speed in the nx and ny directions
#pragma omp parallel for reduction(min : local_min_dt)
for (int ii = pad; ii < ny - pad; ++ii) {
#pragma omp simd
for (int jj = pad; jj < nx - pad; ++jj) {
// Constrain based on the sound speed within the system
const double c_s = sqrt(GAM * (GAM - 1.0) * energy[(ii * nx + jj)]);
const double thread_min_dt_x =
celldx[jj] /
sqrt(c_s * c_s + 2.0 * Qxx[(ii * nx + jj)] / density[(ii * nx + jj)]);
const double thread_min_dt_y =
celldy[ii] /
sqrt(c_s * c_s + 2.0 * Qyy[(ii * nx + jj)] / density[(ii * nx + jj)]);
const double thread_min_dt = min(thread_min_dt_x, thread_min_dt_y);
local_min_dt = min(local_min_dt, thread_min_dt);
}
}
STOP_PROFILING(&compute_profile, __func__);
double global_min_dt = reduce_all_min(local_min_dt);
// Ensure that the timestep does not jump too far from one step to the next
const double final_min_dt = min(global_min_dt, C_M * mesh->dt_h);
mesh->dt = 0.5 * (C_T * final_min_dt + mesh->dt_h);
mesh->dt_h = (first_step) ? mesh->dt : C_T * final_min_dt;
}
// Calculate change in momentum caused by pressure gradients, and then extract
// the velocities using edge centered density approximations
void pressure_acceleration(const int nx, const int ny, Mesh* mesh,
const double dt, double* momentum_x,
double* momentum_y, double* velocity_x,
double* velocity_y, const double* pressure,
const double* density, const double* edgedx,
const double* edgedy, const double* celldx,
const double* celldy) {
START_PROFILING(&compute_profile);
const int pad = mesh->pad;
#pragma omp parallel for
for (int ii = pad; ii < (ny + 1) - pad; ++ii) {
#pragma omp simd
for (int jj = pad; jj < (nx + 1) - pad; ++jj) {
// Update the momenta using the pressure gradients
momentum_x[(ii * (nx + 1) + jj)] -=
dt * (pressure[(ii * nx + jj)] - pressure[(ii * nx + jj) - 1]) /
edgedx[jj];
momentum_y[(ii * nx + jj)] -=
dt * (pressure[(ii * nx + jj)] - pressure[(ii * nx + jj) - nx]) /
edgedy[ii];
// Calculate the zone edge centered density
const double density_edge_x =
(density[(ii * nx + jj)] * celldx[jj] * celldy[ii] +
density[(ii * nx + jj) - 1] * celldx[jj - 1] * celldy[ii]) /
(2.0 * edgedx[jj] * celldy[ii]);
const double density_edge_y =
(density[(ii * nx + jj)] * celldx[jj] * celldy[ii] +
density[(ii * nx + jj) - nx] * celldx[jj] * celldy[ii - 1]) /
(2.0 * celldx[jj] * edgedy[ii]);
// Find the velocities from the momenta and edge centered mass densities
velocity_x[(ii * (nx + 1) + jj)] =
(density_edge_x == 0.0)
? 0.0
: momentum_x[(ii * (nx + 1) + jj)] / density_edge_x;
velocity_y[(ii * nx + jj)] =
(density_edge_y == 0.0) ? 0.0
: momentum_y[(ii * nx + jj)] / density_edge_y;
}
}
STOP_PROFILING(&compute_profile, __func__);
handle_boundary_2d(nx + 1, ny, mesh, velocity_x, INVERT_X, PACK);
handle_boundary_2d(nx, ny + 1, mesh, velocity_y, INVERT_Y, PACK);
}
void artificial_viscosity(const int nx, const int ny, Mesh* mesh,
const double dt, double* Qxx, double* Qyy,
double* velocity_x, double* velocity_y,
double* momentum_x, double* momentum_y,
const double* density, const double* edgedx,
const double* edgedy, const double* celldx,
const double* celldy) {
START_PROFILING(&compute_profile);
const int pad = mesh->pad;
// Calculate the artificial viscous stresses
// PLPC Hydro Paper
#pragma omp parallel for
for (int ii = pad; ii < ny - pad; ++ii) {
#pragma omp simd
for (int jj = pad; jj < nx - pad; ++jj) {
const double u_i = min(0.0, velocity_x[(ii * (nx + 1) + jj) + 1] -
velocity_x[(ii * (nx + 1) + jj)]);
const double u_ii =
0.5 * (fabs(min(0.0, (velocity_x[(ii * (nx + 1) + jj) + 2] -
velocity_x[(ii * (nx + 1) + jj) + 1])) -
min(0.0, (velocity_x[(ii * (nx + 1) + jj) + 1] -
velocity_x[(ii * (nx + 1) + jj)]))) +
fabs(min(0.0, (velocity_x[(ii * (nx + 1) + jj) + 1] -
velocity_x[(ii * (nx + 1) + jj)])) -
min(0.0, (velocity_x[(ii * (nx + 1) + jj)] -
velocity_x[(ii * (nx + 1) + jj) - 1]))));
const double v_i = min(0.0, velocity_y[(ii * nx + jj) + nx] -
velocity_y[(ii * nx + jj)]);
const double v_ii =
0.5 * (fabs(min(0.0, (velocity_y[(ii * nx + jj) + 2 * nx] -
velocity_y[(ii * nx + jj) + nx])) -
min(0.0, (velocity_y[(ii * nx + jj) + nx] -
velocity_y[(ii * nx + jj)]))) +
fabs(min(0.0, (velocity_y[(ii * nx + jj) + nx] -
velocity_y[(ii * nx + jj)])) -
min(0.0, (velocity_y[(ii * nx + jj)] -
velocity_y[(ii * nx + jj) - nx]))));
Qxx[(ii * nx + jj)] = -C_Q * density[(ii * nx + jj)] * u_i * u_ii;
Qyy[(ii * nx + jj)] = -C_Q * density[(ii * nx + jj)] * v_i * v_ii;
}
}
STOP_PROFILING(&compute_profile, __func__);
handle_boundary_2d(nx, ny, mesh, Qxx, NO_INVERT, PACK);
handle_boundary_2d(nx, ny, mesh, Qyy, NO_INVERT, PACK);
START_PROFILING(&compute_profile);
// Update the momenta by the artificial viscous stresses
#pragma omp parallel for
for (int ii = pad; ii < (ny + 1) - pad; ++ii) {
#pragma omp simd
for (int jj = pad; jj < (nx + 1) - pad; ++jj) {
momentum_x[(ii * (nx + 1) + jj)] -=
dt * (Qxx[(ii * nx + jj)] - Qxx[(ii * nx + jj) - 1]) / celldx[jj];
momentum_y[(ii * nx + jj)] -=
dt * (Qyy[(ii * nx + jj)] - Qyy[(ii * nx + jj) - nx]) / celldy[ii];
// Calculate the zone edge centered density
const double density_edge_x =
(density[(ii * nx + jj)] * celldx[jj] * celldy[ii] +
density[(ii * nx + jj) - 1] * celldx[jj - 1] * celldy[ii]) /
(2.0 * edgedx[jj] * celldy[ii]);
const double density_edge_y =
(density[(ii * nx + jj)] * celldx[jj] * celldy[ii] +
density[(ii * nx + jj) - nx] * celldx[jj] * celldy[ii - 1]) /
(2.0 * celldx[jj] * edgedy[ii]);
// Find the velocities from the momenta and edge centered mass densities
velocity_x[(ii * (nx + 1) + jj)] =
(density_edge_x == 0.0)
? 0.0
: momentum_x[(ii * (nx + 1) + jj)] / density_edge_x;
velocity_y[(ii * nx + jj)] =
(density_edge_y == 0.0) ? 0.0
: momentum_y[(ii * nx + jj)] / density_edge_y;
}
}
STOP_PROFILING(&compute_profile, __func__);
handle_boundary_2d(nx + 1, ny, mesh, velocity_x, INVERT_X, PACK);
handle_boundary_2d(nx, ny + 1, mesh, velocity_y, INVERT_Y, PACK);
}
// Calculates the work done due to forces within the element
void shock_heating_and_work(const int nx, const int ny, Mesh* mesh,
const double dt_h, double* energy,
const double* pressure, const double* velocity_x,
const double* velocity_y, const double* density,
const double* Qxx, const double* Qyy,
const double* celldx, const double* celldy) {
START_PROFILING(&compute_profile);
const int pad = mesh->pad;
#pragma omp parallel for
for (int ii = pad; ii < ny - pad; ++ii) {
#pragma omp simd
for (int jj = pad; jj < nx - pad; ++jj) {
const double div_vel_x = (velocity_x[(ii * (nx + 1) + jj) + 1] -
velocity_x[(ii * (nx + 1) + jj)]) /
celldx[jj];
const double div_vel_y =
(velocity_y[(ii * nx + jj) + nx] - velocity_y[(ii * nx + jj)]) /
celldy[ii];
const double div_vel_dt = (div_vel_x + div_vel_y) * dt_h;
const double e_q = energy[(ii * nx + jj)] -
dt_h * (Qxx[(ii * nx + jj)] * div_vel_x +
Qyy[(ii * nx + jj)] * div_vel_y) /
density[(ii * nx + jj)];
/// A working formulation that is second order in time for Pressure!?
const double density_c = density[(ii * nx + jj)] / (1.0 + div_vel_dt);
const double e_c =
e_q -
(pressure[(ii * nx + jj)] * div_vel_dt) / density[(ii * nx + jj)];
const double work = 0.5 * div_vel_dt * (pressure[(ii * nx + jj)] +
(GAM - 1.0) * e_c * density_c) /
density[(ii * nx + jj)];
energy[(ii * nx + jj)] =
(density[(ii * nx + jj)] == 0.0) ? 0.0 : e_q - work;
}
}
STOP_PROFILING(&compute_profile, __func__);
handle_boundary_2d(nx, ny, mesh, energy, NO_INVERT, PACK);
}
// Perform advection with monotonicity improvement
void advect_mass_and_energy(const int nx, const int ny, Mesh* mesh,
const int tt, const double dt, const double dt_h,
double* density, double* energy,
double* density_old, double* mass_flux_x,
double* mass_flux_y, double* eF_x, double* eF_y,
const double* velocity_x, const double* velocity_y,
const double* edgedx, const double* edgedy,
const double* celldx, const double* celldy) {
START_PROFILING(&compute_profile);
#pragma omp parallel for
for (int ii = 0; ii < nx * ny; ++ii) {
density_old[ii] = density[ii];
}
STOP_PROFILING(&compute_profile, "storing_old_density");
if (tt % 2 == 0) {
x_mass_and_energy_flux(nx, ny, 1, mesh, dt, dt_h, density, density_old,
energy, velocity_x, mass_flux_x, eF_x, celldx,
edgedx, celldy, edgedy);
y_mass_and_energy_flux(nx, ny, 0, mesh, dt, dt_h, density, density_old,
energy, velocity_y, mass_flux_y, eF_y, celldx,
edgedx, celldy, edgedy);
} else {
y_mass_and_energy_flux(nx, ny, 1, mesh, dt, dt_h, density, density_old,
energy, velocity_y, mass_flux_y, eF_y, celldx,
edgedx, celldy, edgedy);
x_mass_and_energy_flux(nx, ny, 0, mesh, dt, dt_h, density, density_old,
energy, velocity_x, mass_flux_x, eF_x, celldx,
edgedx, celldy, edgedy);
}
}
// Calculate the flux in the x direction
void x_mass_and_energy_flux(const int nx, const int ny, const int first,
Mesh* mesh, const double dt, const double dt_h,
double* density, double* density_old,
double* energy, const double* velocity_x,
double* mass_flux_x, double* eF_x,
const double* celldx, const double* edgedx,
const double* celldy, const double* edgedy) {
const int pad = mesh->pad;
// Compute the mass fluxes along the x edges
// In the ghost cells flux is left as 0.0
START_PROFILING(&compute_profile);
#pragma omp parallel for
for (int ii = pad; ii < ny - pad; ++ii) {
#pragma omp simd
for (int jj = pad; jj < (nx + 1) - pad; ++jj) {
// Interpolate to make second order in time
const double invdx = 1.0 / edgedx[jj];
const double suc0 = 0.5 * invdx * (velocity_x[(ii * (nx + 1) + jj) + 1] -
velocity_x[(ii * (nx + 1) + jj) - 1]);
const double sur0 = 2.0 * invdx * (velocity_x[(ii * (nx + 1) + jj)] -
velocity_x[(ii * (nx + 1) + jj) - 1]);
const double sul0 = 2.0 * invdx * (velocity_x[(ii * (nx + 1) + jj) + 1] -
velocity_x[(ii * (nx + 1) + jj)]);
const double u_tc = velocity_x[(ii * (nx + 1) + jj)] -
0.5 * velocity_x[(ii * (nx + 1) + jj)] * dt *
minmod(suc0, minmod(sur0, sul0));
// Van leer limiter
double limiter = 0.0;
const double density_diff =
(density[(ii * nx + jj)] - density[(ii * nx + jj) - 1]);
if (density_diff) {
const double smoothness =
(u_tc >= 0.0)
? (density[(ii * nx + jj) - 1] - density[(ii * nx + jj) - 2]) /
density_diff
: (density[(ii * nx + jj) + 1] - density[(ii * nx + jj)]) /
density_diff;
limiter = (smoothness + fabs(smoothness)) / (1.0 + fabs(smoothness));
}
// Calculate the flux
const double density_upwind =
(u_tc >= 0.0) ? density[(ii * nx + jj) - 1] : density[(ii * nx + jj)];
mass_flux_x[(ii * (nx + 1) + jj)] =
(u_tc * density_upwind +
0.5 * fabs(u_tc) * (1.0 - fabs((u_tc * dt_h) / celldx[jj])) *
limiter * density_diff);
// Use MC limiter to get slope of energy
const double a_x_0 =
0.5 * invdx * (energy[(ii * nx + jj)] - energy[(ii * nx + jj) - 2]);
const double b_x_0 = 2.0 * invdx * (energy[(ii * nx + jj) - 1] -
energy[(ii * nx + jj) - 2]);
const double c_x_0 =
2.0 * invdx * (energy[(ii * nx + jj)] - energy[(ii * nx + jj) - 1]);
const double a_x_1 = 0.5 * invdx * (energy[(ii * nx + jj) + 1] -
energy[(ii * nx + jj) - 1]);
const double b_x_1 =
2.0 * invdx * (energy[(ii * nx + jj)] - energy[(ii * nx + jj) - 1]);
const double c_x_1 =
2.0 * invdx * (energy[(ii * nx + jj) + 1] - energy[(ii * nx + jj)]);
// Calculate the interpolated densities
const double edge_e_x =
(u_tc > 0.0)
? energy[(ii * nx + jj) - 1] +
0.5 * minmod(minmod(a_x_0, b_x_0), c_x_0) *
(celldx[jj - 1] - u_tc * dt_h)
: energy[(ii * nx + jj)] -
0.5 * minmod(minmod(a_x_1, b_x_1), c_x_1) *
(celldx[jj] + u_tc * dt_h);
// Update the fluxes to now include the contribution from energy
eF_x[(ii * (nx + 1) + jj)] =
edgedy[ii] * edge_e_x * mass_flux_x[(ii * (nx + 1) + jj)];
}
}
STOP_PROFILING(&compute_profile, "advect_mass_and_energy");
handle_boundary_2d(nx + 1, ny, mesh, mass_flux_x, INVERT_X, PACK);
// Calculate the new density values
START_PROFILING(&compute_profile);
#pragma omp parallel for
for (int ii = pad; ii < ny - pad; ++ii) {
#pragma omp simd
for (int jj = pad; jj < nx - pad; ++jj) {
density[(ii * nx + jj)] -=
dt_h * (edgedy[ii + 1] * mass_flux_x[(ii * (nx + 1) + jj) + 1] -
edgedy[ii] * mass_flux_x[(ii * (nx + 1) + jj)]) /
(celldx[jj] * celldy[ii]);
const double density_e =
(density_old[(ii * nx + jj)] * energy[(ii * nx + jj)] -
(dt_h *
(eF_x[(ii * (nx + 1) + jj) + 1] - eF_x[(ii * (nx + 1) + jj)])) /
(celldx[jj] * celldy[ii]));
energy[(ii * nx + jj)] =
(first)
? (density_old[(ii * nx + jj)] == 0.0)
? 0.0
: density_e / density_old[(ii * nx + jj)]
: (density[(ii * nx + jj)] == 0.0)
? 0.0
: density_e / density[(ii * nx + jj)];
}
}
STOP_PROFILING(&compute_profile, "advect_mass_and_energy");
handle_boundary_2d(nx, ny, mesh, density, NO_INVERT, PACK);
handle_boundary_2d(nx, ny, mesh, energy, NO_INVERT, PACK);
}
// Calculate the flux in the y direction
void y_mass_and_energy_flux(const int nx, const int ny, const int first,
Mesh* mesh, const double dt, const double dt_h,
double* density, double* density_old,
double* energy, const double* velocity_y,
double* mass_flux_y, double* eF_y,
const double* celldx, const double* edgedx,
const double* celldy, const double* edgedy) {
const int pad = mesh->pad;
// Compute the mass flux along the y edges
// In the ghost cells flux is left as 0.0
START_PROFILING(&compute_profile);
#pragma omp parallel for
for (int ii = pad; ii < (ny + 1) - pad; ++ii) {
#pragma omp simd
for (int jj = pad; jj < nx - pad; ++jj) {
// Interpolate the velocity to make second order in time
const double invdy = 1.0 / edgedy[ii];
const double svc0 = 0.5 * invdy * (velocity_y[(ii * nx + jj) + nx] -
velocity_y[(ii * nx + jj) - nx]);
const double svr0 = 2.0 * invdy * (velocity_y[(ii * nx + jj)] -
velocity_y[(ii * nx + jj) - nx]);
const double svl0 = 2.0 * invdy * (velocity_y[(ii * nx + jj) + nx] -
velocity_y[(ii * nx + jj)]);
const double v_tc = velocity_y[(ii * nx + jj)] -
0.5 * velocity_y[(ii * nx + jj)] * dt *
minmod(svc0, minmod(svr0, svl0));
// Van leer limiter
const double density_diff =
(density[(ii * nx + jj)] - density[(ii * nx + jj) - nx]);
double limiter = 0.0;
if (density_diff) {
const double smoothness =
(velocity_y[(ii * nx + jj)] >= 0.0)
? (density[(ii * nx + jj) - nx] -
density[(ii * nx + jj) - 2 * nx]) /
density_diff
: (density[(ii * nx + jj) + nx] - density[(ii * nx + jj)]) /
density_diff;
limiter = (smoothness + fabs(smoothness)) / (1.0 + fabs(smoothness));
}
// Calculate the flux
const double density_upwind = (v_tc >= 0.0) ? density[(ii * nx + jj) - nx]
: density[(ii * nx + jj)];
mass_flux_y[(ii * nx + jj)] =
(v_tc * density_upwind +
0.5 * fabs(v_tc) * (1.0 - fabs((v_tc * dt_h) / celldy[ii])) *
limiter * density_diff);
// Use MC limiter to get slope of energy
const double a_y_0 = 0.5 * invdy * (energy[(ii * nx + jj)] -
energy[(ii * nx + jj) - 2 * nx]);
const double b_y_0 = 2.0 * invdy * (energy[(ii * nx + jj) - nx] -
energy[(ii * nx + jj) - 2 * nx]);
const double c_y_0 =
2.0 * invdy * (energy[(ii * nx + jj)] - energy[(ii * nx + jj) - nx]);
const double a_y_1 = 0.5 * invdy * (energy[(ii * nx + jj) + nx] -
energy[(ii * nx + jj) - nx]);
const double b_y_1 =
2.0 * invdy * (energy[(ii * nx + jj)] - energy[(ii * nx + jj) - nx]);
const double c_y_1 =
2.0 * invdy * (energy[(ii * nx + jj) + nx] - energy[(ii * nx + jj)]);
const double edge_e_y =
(v_tc > 0.0)
? energy[(ii * nx + jj) - nx] +
0.5 * minmod(minmod(a_y_0, b_y_0), c_y_0) *
(celldy[ii - 1] - v_tc * dt_h)
: energy[(ii * nx + jj)] -
0.5 * minmod(minmod(a_y_1, b_y_1), c_y_1) *
(celldy[ii] + v_tc * dt_h);
// Update the fluxes to now include the contribution from energy
eF_y[(ii * nx + jj)] =
edgedx[jj] * edge_e_y * mass_flux_y[(ii * nx + jj)];
}
}
STOP_PROFILING(&compute_profile, "advect_mass_and_energy");
handle_boundary_2d(nx, ny + 1, mesh, mass_flux_y, INVERT_Y, PACK);
// Calculate the new density values
START_PROFILING(&compute_profile);
#pragma omp parallel for
for (int ii = pad; ii < ny - pad; ++ii) {
#pragma omp simd
for (int jj = pad; jj < nx - pad; ++jj) {
density[(ii * nx + jj)] -=
dt_h * (edgedx[jj + 1] * mass_flux_y[(ii * nx + jj) + nx] -
edgedx[jj] * mass_flux_y[(ii * nx + jj)]) /
(celldx[jj] * celldy[ii]);
const double density_e =
(density_old[(ii * nx + jj)] * energy[(ii * nx + jj)] -
(dt_h * (eF_y[(ii * nx + jj) + nx] - eF_y[(ii * nx + jj)])) /
(celldx[jj] * celldy[ii]));
energy[(ii * nx + jj)] =
(first)
? (density_old[(ii * nx + jj)] == 0.0)
? 0.0
: density_e / density_old[(ii * nx + jj)]
: (density[(ii * nx + jj)] == 0.0)
? 0.0
: density_e / density[(ii * nx + jj)];
}
}
STOP_PROFILING(&compute_profile, "advect_mass_and_energy");
handle_boundary_2d(nx, ny, mesh, density, NO_INVERT, PACK);
handle_boundary_2d(nx, ny, mesh, energy, NO_INVERT, PACK);
}
// Advect momentum according to the velocity
void advect_momentum(const int nx, const int ny, const int tt, Mesh* mesh,
const double dt_h, const double dt, double* velocity_x,
double* velocity_y, double* momentum_x_flux_x,
double* momentum_x_flux_y, double* momentum_y_flux_x,
double* momentum_y_flux_y, double* momentum_x,
double* momentum_y, const double* density,
const double* mass_flux_x, const double* mass_flux_y,
const double* edgedx, const double* edgedy,
const double* celldx, const double* celldy) {
const int pad = mesh->pad;
if (tt % 2) {
START_PROFILING(&compute_profile);
momentum_x_flux_in_x(nx, ny, mesh, dt_h, velocity_x, momentum_x_flux_x,
mass_flux_x, edgedx, edgedy, celldx);
STOP_PROFILING(&compute_profile, __func__);
handle_boundary_2d(nx, ny, mesh, momentum_x_flux_x, NO_INVERT, PACK);
START_PROFILING(&compute_profile);
#pragma omp parallel for
for (int ii = pad; ii < ny - pad; ++ii) {
#pragma omp simd
for (int jj = pad; jj < (nx + 1) - pad; ++jj) {
momentum_x[(ii * (nx + 1) + jj)] -=
dt_h * (momentum_x_flux_x[(ii * nx + jj)] -
momentum_x_flux_x[(ii * nx + jj) - 1]) /
(edgedx[jj] * celldy[ii]);
const double density_edge_x =
(density[(ii * nx + jj)] * celldx[jj] * celldy[ii] +
density[(ii * nx + jj) - 1] * celldx[jj - 1] * celldy[ii]) /
(2.0 * edgedx[jj] * celldy[ii]);
velocity_x[(ii * (nx + 1) + jj)] =
(density_edge_x == 0.0)
? 0.0
: momentum_x[(ii * (nx + 1) + jj)] / density_edge_x;
}
}
STOP_PROFILING(&compute_profile, __func__);
handle_boundary_2d(nx + 1, ny, mesh, velocity_x, INVERT_X, PACK);
START_PROFILING(&compute_profile);
momentum_x_flux_in_y(nx, ny, mesh, dt_h, velocity_x, velocity_y,
momentum_x_flux_y, mass_flux_y, edgedx, edgedy,
celldy);
STOP_PROFILING(&compute_profile, __func__);
handle_boundary_2d(nx + 1, ny + 1, mesh, momentum_x_flux_y, NO_INVERT,
PACK);
START_PROFILING(&compute_profile);
// Calculate the axial momentum
#pragma omp parallel for
for (int ii = pad; ii < ny - pad; ++ii) {
#pragma omp simd
for (int jj = pad; jj < (nx + 1) - pad; ++jj) {
momentum_x[(ii * (nx + 1) + jj)] -=
dt_h * (momentum_x_flux_y[(ii * (nx + 1) + jj) + (nx + 1)] -
momentum_x_flux_y[(ii * (nx + 1) + jj)]) /
(celldx[jj] * edgedy[ii]);
}
}
momentum_y_flux_in_x(nx, ny, mesh, dt_h, velocity_x, velocity_y,
momentum_y_flux_x, mass_flux_x, edgedx, celldy,
celldx);
STOP_PROFILING(&compute_profile, __func__);
handle_boundary_2d(nx + 1, ny + 1, mesh, momentum_y_flux_x, NO_INVERT,
PACK);
START_PROFILING(&compute_profile);
#pragma omp parallel for
for (int ii = pad; ii < (ny + 1) - pad; ++ii) {
#pragma omp simd
for (int jj = pad; jj < nx - pad; ++jj) {
momentum_y[(ii * nx + jj)] -=
dt_h * (momentum_y_flux_x[(ii * (nx + 1) + jj) + 1] -
momentum_y_flux_x[(ii * (nx + 1) + jj)]) /
(edgedx[jj] * celldy[ii]);
const double density_edge_y =
(density[(ii * nx + jj)] * celldx[jj] * celldy[ii] +
density[(ii * nx + jj) - nx] * celldx[jj] * celldy[ii - 1]) /
(2.0 * celldx[jj] * edgedy[ii]);
velocity_y[(ii * nx + jj)] =
(density_edge_y == 0.0)
? 0.0
: momentum_y[(ii * nx + jj)] / density_edge_y;
}
}
STOP_PROFILING(&compute_profile, __func__);
handle_boundary_2d(nx, ny + 1, mesh, velocity_y, INVERT_Y, PACK);
START_PROFILING(&compute_profile);
momentum_y_flux_in_y(nx, ny, mesh, dt_h, velocity_y, momentum_y_flux_y,
mass_flux_y, edgedy, celldx, celldy);
STOP_PROFILING(&compute_profile, __func__);
handle_boundary_2d(nx, ny, mesh, momentum_y_flux_y, NO_INVERT, PACK);
START_PROFILING(&compute_profile);
#pragma omp parallel for
for (int ii = pad; ii < (ny + 1) - pad; ++ii) {
#pragma omp simd
for (int jj = pad; jj < nx - pad; ++jj) {
momentum_y[(ii * nx + jj)] -= dt_h *
(momentum_y_flux_y[(ii * nx + jj)] -
momentum_y_flux_y[(ii * nx + jj) - nx]) /
(celldx[jj] * edgedy[ii]);
}
}
STOP_PROFILING(&compute_profile, __func__);
} else {
START_PROFILING(&compute_profile);
momentum_x_flux_in_y(nx, ny, mesh, dt_h, velocity_x, velocity_y,
momentum_x_flux_y, mass_flux_y, edgedx, edgedy,
celldy);
STOP_PROFILING(&compute_profile, __func__);
handle_boundary_2d(nx + 1, ny + 1, mesh, momentum_x_flux_y, NO_INVERT,
PACK);
START_PROFILING(&compute_profile);
// Calculate the axial momentum
#pragma omp parallel for
for (int ii = pad; ii < ny - pad; ++ii) {
#pragma omp simd
for (int jj = pad; jj < (nx + 1) - pad; ++jj) {
momentum_x[(ii * (nx + 1) + jj)] -=
dt_h * (momentum_x_flux_y[(ii * (nx + 1) + jj) + (nx + 1)] -
momentum_x_flux_y[(ii * (nx + 1) + jj)]) /
(celldx[jj] * edgedy[ii]);
const double density_edge_x =
(density[(ii * nx + jj)] * celldx[jj] * celldy[ii] +
density[(ii * nx + jj) - 1] * celldx[jj - 1] * celldy[ii]) /
(2.0 * edgedx[jj] * celldy[ii]);
velocity_x[(ii * (nx + 1) + jj)] =
(density_edge_x == 0.0)
? 0.0
: momentum_x[(ii * (nx + 1) + jj)] / density_edge_x;
}
}
STOP_PROFILING(&compute_profile, __func__);
handle_boundary_2d(nx + 1, ny, mesh, velocity_x, INVERT_X, PACK);
START_PROFILING(&compute_profile);
momentum_x_flux_in_x(nx, ny, mesh, dt_h, velocity_x, momentum_x_flux_x,
mass_flux_x, edgedx, edgedy, celldx);
STOP_PROFILING(&compute_profile, __func__);
handle_boundary_2d(nx, ny, mesh, momentum_x_flux_x, NO_INVERT, PACK);
START_PROFILING(&compute_profile);
#pragma omp parallel for
for (int ii = pad; ii < ny - pad; ++ii) {
#pragma omp simd
for (int jj = pad; jj < (nx + 1) - pad; ++jj) {
momentum_x[(ii * (nx + 1) + jj)] -=
dt_h * (momentum_x_flux_x[(ii * nx + jj)] -
momentum_x_flux_x[(ii * nx + jj) - 1]) /
(edgedx[jj] * celldy[ii]);
}
}
momentum_y_flux_in_y(nx, ny, mesh, dt_h, velocity_y, momentum_y_flux_y,
mass_flux_y, edgedy, celldx, celldy);
STOP_PROFILING(&compute_profile, __func__);
handle_boundary_2d(nx, ny, mesh, momentum_y_flux_y, NO_INVERT, PACK);
START_PROFILING(&compute_profile);
#pragma omp parallel for
for (int ii = pad; ii < (ny + 1) - pad; ++ii) {
#pragma omp simd
for (int jj = pad; jj < nx - pad; ++jj) {
momentum_y[(ii * nx + jj)] -= dt_h *
(momentum_y_flux_y[(ii * nx + jj)] -
momentum_y_flux_y[(ii * nx + jj) - nx]) /
(celldx[jj] * edgedy[ii]);
const double density_edge_y =
(density[(ii * nx + jj)] * celldx[jj] * celldy[ii] +
density[(ii * nx + jj) - nx] * celldx[jj] * celldy[ii - 1]) /
(2.0 * celldx[jj] * edgedy[ii]);
velocity_y[(ii * nx + jj)] =
(density_edge_y == 0.0)
? 0.0
: momentum_y[(ii * nx + jj)] / density_edge_y;
}
}
STOP_PROFILING(&compute_profile, __func__);
handle_boundary_2d(nx, ny + 1, mesh, velocity_y, INVERT_Y, PACK);
START_PROFILING(&compute_profile);
momentum_y_flux_in_x(nx, ny, mesh, dt_h, velocity_x, velocity_y,
momentum_y_flux_x, mass_flux_x, edgedx, celldy,
celldx);
STOP_PROFILING(&compute_profile, __func__);
handle_boundary_2d(nx + 1, ny + 1, mesh, momentum_y_flux_x, NO_INVERT,
PACK);
START_PROFILING(&compute_profile);
#pragma omp parallel for
for (int ii = pad; ii < (ny + 1) - pad; ++ii) {
#pragma omp simd
for (int jj = pad; jj < nx - pad; ++jj) {
momentum_y[(ii * nx + jj)] -=
dt_h * (momentum_y_flux_x[(ii * (nx + 1) + jj) + 1] -
momentum_y_flux_x[(ii * (nx + 1) + jj)]) /
(edgedx[jj] * celldy[ii]);
}
}
STOP_PROFILING(&compute_profile, __func__);
}
}
// Calculates the x momentum flux along the x dimension
void momentum_x_flux_in_x(const int nx, const int ny, Mesh* mesh,
const double dt_h, double* velocity_x,
double* momentum_x_flux_x, const double* mass_flux_x,
const double* edgedx, const double* edgedy,
const double* celldx) {
const int pad = mesh->pad;
// Calculate the cell centered x momentum fluxes in the x direction
#pragma omp parallel for
for (int ii = pad; ii < ny - pad; ++ii) {
#pragma omp simd
for (int jj = pad; jj < nx - pad; ++jj) {
// Use MC limiter to get slope of velocity
const double invdx = 1.0 / edgedx[jj];
const double a_x_0 = 0.5 * invdx * (velocity_x[(ii * (nx + 1) + jj) + 1] -
velocity_x[(ii * (nx + 1) + jj) - 1]);
const double b_x_0 = 2.0 * invdx * (velocity_x[(ii * (nx + 1) + jj)] -
velocity_x[(ii * (nx + 1) + jj) - 1]);
const double c_x_0 = 2.0 * invdx * (velocity_x[(ii * (nx + 1) + jj) + 1] -
velocity_x[(ii * (nx + 1) + jj)]);
const double a_x_1 = 0.5 * invdx * (velocity_x[(ii * (nx + 1) + jj) + 2] -
velocity_x[(ii * (nx + 1) + jj)]);
const double b_x_1 = 2.0 * invdx * (velocity_x[(ii * (nx + 1) + jj) + 1] -
velocity_x[(ii * (nx + 1) + jj)]);
const double c_x_1 = 2.0 * invdx * (velocity_x[(ii * (nx + 1) + jj) + 2] -
velocity_x[(ii * (nx + 1) + jj) + 1]);
// Calculate the interpolated densities
const double u_cell_x = 0.5 * (velocity_x[(ii * (nx + 1) + jj)] +
velocity_x[(ii * (nx + 1) + jj) + 1]);
const double F_x =
edgedy[ii] * 0.5 * (mass_flux_x[(ii * (nx + 1) + jj)] +
mass_flux_x[(ii * (nx + 1) + jj) + 1]);
const double u_cell_x_interp =
(u_cell_x > 0.0)
? velocity_x[(ii * (nx + 1) + jj)] +
0.5 * minmod(minmod(a_x_0, b_x_0), c_x_0) *
(celldx[jj - 1] - u_cell_x * dt_h)
: velocity_x[(ii * (nx + 1) + jj) + 1] -
0.5 * minmod(minmod(a_x_1, b_x_1), c_x_1) *
(celldx[jj] + u_cell_x * dt_h);
momentum_x_flux_x[(ii * nx + jj)] = F_x * u_cell_x_interp;
}
}
}
// Calculates the x momentum flux in the y dimension
void momentum_x_flux_in_y(const int nx, const int ny, Mesh* mesh,
const double dt_h, double* velocity_x,
double* velocity_y, double* momentum_x_flux_y,
const double* mass_flux_y, const double* edgedx,
const double* edgedy, const double* celldy) {
const int pad = mesh->pad;
#pragma omp parallel for
for (int ii = pad; ii < (ny + 1) - pad; ++ii) {
#pragma omp simd
for (int jj = pad; jj < (nx + 1) - pad; ++jj) {
// Use MC limiter to get slope of velocity
const double invdy = 1.0 / edgedy[ii];
const double a_y_0 =
0.5 * invdy * (velocity_x[(ii * (nx + 1) + jj)] -
velocity_x[(ii * (nx + 1) + jj) - 2 * (nx + 1)]);
const double b_y_0 =
2.0 * invdy * (velocity_x[(ii * (nx + 1) + jj) - (nx + 1)] -
velocity_x[(ii * (nx + 1) + jj) - 2 * (nx + 1)]);
const double c_y_0 =
2.0 * invdy * (velocity_x[(ii * (nx + 1) + jj)] -
velocity_x[(ii * (nx + 1) + jj) - (nx + 1)]);
const double a_y_1 =
0.5 * invdy * (velocity_x[(ii * (nx + 1) + jj) + (nx + 1)] -
velocity_x[(ii * (nx + 1) + jj) - (nx + 1)]);
const double b_y_1 =
2.0 * invdy * (velocity_x[(ii * (nx + 1) + jj)] -
velocity_x[(ii * (nx + 1) + jj) - (nx + 1)]);
const double c_y_1 =
2.0 * invdy * (velocity_x[(ii * (nx + 1) + jj) + (nx + 1)] -
velocity_x[(ii * (nx + 1) + jj)]);
const double v_cell_y =
0.5 * (velocity_y[(ii * nx + jj) - 1] + velocity_y[(ii * nx + jj)]);
const double F_y = edgedx[jj] * 0.5 * (mass_flux_y[(ii * nx + jj)] +
mass_flux_y[(ii * nx + jj) - 1]);
const double u_corner_y =
(v_cell_y > 0.0)
? velocity_x[(ii * (nx + 1) + jj) - (nx + 1)] +
0.5 * minmod(minmod(a_y_0, b_y_0), c_y_0) *
(celldy[ii - 1] - v_cell_y * dt_h)
: velocity_x[(ii * (nx + 1) + jj)] -
0.5 * minmod(minmod(a_y_1, b_y_1), c_y_1) *
(celldy[ii] + v_cell_y * dt_h);
momentum_x_flux_y[(ii * (nx + 1) + jj)] = F_y * u_corner_y;
}
}
}
// Calculates the y momentum flux in the x dimension
void momentum_y_flux_in_x(const int nx, const int ny, Mesh* mesh,
const double dt_h, const double* velocity_x,
double* velocity_y, double* momentum_y_flux_x,
const double* mass_flux_x, const double* edgedx,
const double* celldy, const double* celldx) {
const int pad = mesh->pad;
// Calculate the corner centered y momentum fluxes in the x direction
// Calculate the cell centered y momentum fluxes in the y direction
#pragma omp parallel for
for (int ii = pad; ii < (ny + 1) - pad; ++ii) {
#pragma omp simd
for (int jj = pad; jj < (nx + 1) - pad; ++jj) {
// Use MC limiter to get slope of velocity
const double invdx = 1.0 / edgedx[jj];
const double a_x_0 = 0.5 * invdx * (velocity_y[(ii * nx + jj)] -
velocity_y[(ii * nx + jj) - 2]);
const double b_x_0 = 2.0 * invdx * (velocity_y[(ii * nx + jj) - 1] -
velocity_y[(ii * nx + jj) - 2]);
const double c_x_0 = 2.0 * invdx * (velocity_y[(ii * nx + jj)] -
velocity_y[(ii * nx + jj) - 1]);
const double a_x_1 = 0.5 * invdx * (velocity_y[(ii * nx + jj) + 1] -
velocity_y[(ii * nx + jj) - 1]);
const double b_x_1 = 2.0 * invdx * (velocity_y[(ii * nx + jj)] -
velocity_y[(ii * nx + jj) - 1]);
const double c_x_1 = 2.0 * invdx * (velocity_y[(ii * nx + jj) + 1] -
velocity_y[(ii * nx + jj)]);
// Calculate the interpolated densities
const double F_x =
celldy[ii] * 0.5 * (mass_flux_x[(ii * (nx + 1) + jj)] +
mass_flux_x[(ii * (nx + 1) + jj) - (nx + 1)]);
const double u_cell_x =
0.5 * (velocity_x[(ii * (nx + 1) + jj)] +
velocity_x[(ii * (nx + 1) + jj) - (nx + 1)]);
const double v_cell_x_interp =
(u_cell_x > 0.0)
? velocity_y[(ii * nx + jj) - 1] +
0.5 * minmod(minmod(a_x_0, b_x_0), c_x_0) *
(celldx[jj - 1] - u_cell_x * dt_h)
: velocity_y[(ii * nx + jj)] -
0.5 * minmod(minmod(a_x_1, b_x_1), c_x_1) *
(celldx[jj] + u_cell_x * dt_h);
momentum_y_flux_x[(ii * (nx + 1) + jj)] = F_x * v_cell_x_interp;
}
}
}
// Calculates the y momentum flux in the y dimension
void momentum_y_flux_in_y(const int nx, const int ny, Mesh* mesh,
const double dt_h, double* velocity_y,
double* momentum_y_flux_y, const double* mass_flux_y,
const double* edgedy, const double* celldx,
const double* celldy) {
const int pad = mesh->pad;
#pragma omp parallel for
for (int ii = pad; ii < ny - pad; ++ii) {
#pragma omp simd
for (int jj = pad; jj < nx - pad; ++jj) {
// Use MC limiter to get slope of velocity
const double invdy = 1.0 / edgedy[ii];
const double a_y_0 = 0.5 * invdy * (velocity_y[(ii * nx + jj) + nx] -
velocity_y[(ii * nx + jj) - nx]);
const double b_y_0 = 2.0 * invdy * (velocity_y[(ii * nx + jj)] -
velocity_y[(ii * nx + jj) - nx]);
const double c_y_0 = 2.0 * invdy * (velocity_y[(ii * nx + jj) + nx] -
velocity_y[(ii * nx + jj)]);
const double a_y_1 = 0.5 * invdy * (velocity_y[(ii * nx + jj) + 2 * nx] -
velocity_y[(ii * nx + jj)]);
const double b_y_1 = 2.0 * invdy * (velocity_y[(ii * nx + jj) + nx] -
velocity_y[(ii * nx + jj)]);
const double c_y_1 = 2.0 * invdy * (velocity_y[(ii * nx + jj) + 2 * nx] -
velocity_y[(ii * nx + jj) + nx]);
const double F_y = celldx[jj] * 0.5 * (mass_flux_y[(ii * nx + jj)] +
mass_flux_y[(ii * nx + jj) + nx]);
const double v_cell_y =
0.5 * (velocity_y[(ii * nx + jj)] + velocity_y[(ii * nx + jj) + nx]);
const double v_cell_y_interp =
(v_cell_y > 0.0)
? velocity_y[(ii * nx + jj)] +
0.5 * minmod(minmod(a_y_0, b_y_0), c_y_0) *
(celldy[ii - 1] - v_cell_y * dt_h)
: velocity_y[(ii * nx + jj) + nx] -
0.5 * minmod(minmod(a_y_1, b_y_1), c_y_1) *
(celldy[ii] + v_cell_y * dt_h);
momentum_y_flux_y[(ii * nx + jj)] = F_y * v_cell_y_interp;
}
}
}
// Prints some conservation values
void print_conservation(const int nx, const int ny, double* density,
double* energy, double* reduce_array, Mesh* mesh) {
double mass_tot = 0.0;
double energy_tot = 0.0;
const int pad = mesh->pad;
#pragma omp parallel for reduction(+ : mass_tot, energy_tot)
for (int ii = pad; ii < ny - pad; ++ii) {
#pragma omp simd
for (int jj = pad; jj < nx - pad; ++jj) {
mass_tot += density[(ii * nx + jj)];
energy_tot += density[(ii * nx + jj)] * energy[(ii * nx + jj)];
}
}
double global_mass_tot = reduce_to_master(mass_tot);
double global_energy_tot = reduce_to_master(energy_tot);
if (mesh->rank == MASTER) {
printf("Total mass: %.12e\n", global_mass_tot);
printf("Total energy: %.12e\n", global_energy_tot);
}
}
|
ten_tusscher_2004_epi_S2_12.c | //Original Ten Tusscher
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_2004_epi_S2_12.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
//TODO: this should be called only once for the whole mesh, like in the GPU code
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6743585456438,0.00126116515238777,0.782285143101146,0.781885737321280,0.000172267497323657,0.486193660951379,0.00291820808108493,0.999998382455018,1.89973078307127e-08,1.86451321167615e-05,0.999780198191440,1.00782702931804,0.999999754763967,2.76599036686923e-05,0.357538249293263,10.7085717792583,139.021384569998};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
/// real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
//#ifdef EPI
real Gto=0.294;
//#endif
// #ifdef ENDO
// real Gto=0.073;
//#endif
//#ifdef MCELL
// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.4941061664816,0.000306940351318330,0.000126486160649835,0.000251593758331556,0.231852653636147,0.170492615868249,0.109036079095606,4.44796487754522,0.0111149661882113,1.23956736157302,1099.91017026794,0.000314927815763443,0.381236416535235,0.0193513922111542,0.00539385037460332,9.81890868796030e-06};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
Oy_Fl_Shader_Box.h | #ifndef Oy_Fl_Shader_Box_H
#define Oy_Fl_Shader_Box_H
#if defined(__APPLE__)
# include <OpenGL/gl.h>
# include <OpenGL/glu.h>
# include <OpenGL/glext.h>
#else
# include <GL/gl.h>
# include <GL/glu.h>
# include <GL/glext.h>
#endif
#include <FL/Fl_Gl_Window.H>
#include <FL/Fl.H>
#include <FL/gl.h>
#include "Oy_Fl_Image_Widget.h"
#include "oyranos_devices.h"
#ifndef _DBG_FORMAT_
#define _DBG_FORMAT_ "%s:%d %s() "
#define _DBG_ARGS_ (strrchr(__FILE__,'/') ? strrchr(__FILE__,'/')+1 : __FILE__),__LINE__,__func__
#endif
class Oy_Fl_Shader_Box : public Fl_Gl_Window,
public Oy_Fl_Image_Widget
{
/* texture storage */
char * frame_data;
int frame_height, frame_width;
/* widget dimensions */
int W,H;
/* sub texture dimensions */
int sw = 0, sh = 0;
int grid_points;
int max_texture_size;
oyImage_s * clut_image,
* image,
* display_image;
int load, init, adraw;
float background[3];
oyRectangle_s * old_rect;
public:
Oy_Fl_Shader_Box(int x, int y, int w, int h)
: Fl_Gl_Window(x,y,w,h), Oy_Fl_Image_Widget(x,y,w,h)
{ frame_data= NULL; frame_width= frame_height= W= H= sw= sh=0; clut_image= image= display_image= NULL;
grid_points= 0; clut= NULL; img_texture= clut_texture= 0; load= init= adraw= 0;
max_texture_size= 0,tick= 0; old_rect= NULL;
# define TEST_GL(modus) { \
this->mode(modus); \
if(this->can_do()) { \
mod |= modus; \
this->mode(mod); \
fprintf(stderr, "OpenGL understand: yes %s %d\n", #modus, this->mode() ); \
} else { printf("can_do() false: %d\n", modus); \
fprintf(stderr, "OpenGL understand: no %s %d\n", #modus, this->mode() ); \
} \
}
long mod = 0;
TEST_GL(FL_RGB)
//TEST_GL(FL_DOUBLE)
TEST_GL(FL_ALPHA)
//TEST_GL(FL_DEPTH)
//TEST_GL(FL_MULTISAMPLE)
mode(mod);
};
~Oy_Fl_Shader_Box(void) { oyImage_Release( &clut_image );
oyImage_Release( &image ); oyImage_Release( &display_image );
colorServerRegionSet( dynamic_cast<Fl_Widget*>(dynamic_cast<Oy_Fl_Image_Widget*>(this)), NULL, old_rect, 1 );
oyRectangle_Release( &old_rect );
freeGL();
};
void damage( char c )
{
if(c & FL_DAMAGE_USER1)
dirty = 1;
Oy_Fl_Image_Widget::damage( c );
}
private:
GLushort * clut;
GLuint img_texture;
GLuint clut_texture;
GLfloat clut_scale;
GLfloat clut_offset;
GLuint cmm_prog;
GLuint cmm_shader;
static const char * cmm_shader_source_rgba;
static const char * cmm_shader_source_4c;
void print_log (GLuint obj)
{
int len = 0;
int nwritten = 0;
char *log;
glGetShaderiv (obj, GL_INFO_LOG_LENGTH, &len);
if (len > 0) {
log = (char*) malloc (len);
glGetShaderInfoLog (obj, len, &nwritten, log);
fprintf (stderr, "%s\n", log);
free (log);
}
}
int check_error (const char *text)
{
GLenum err = glGetError ();
if (err != GL_NO_ERROR)
{
if (text)
fprintf (stderr, _DBG_FORMAT_ "%s:\n",_DBG_ARGS_, text);
while (err != GL_NO_ERROR)
{
const char * t = "";
switch(err) {
case GL_INVALID_ENUM: t = "GL_INVALID_ENUM"; break;
case GL_INVALID_OPERATION: t = "GL_INVALID_OPERATION"; break; }
fprintf (stderr, _DBG_FORMAT_ "GL ERROR %s %#x\n",_DBG_ARGS_, t, (int) err);
err = glGetError ();
}
return 1;
}
return 0;
}
void initShaders (int cchannels )
{
GLint loc;
/* compile shader program */
if(oy_debug == 0 && oy_display_verbose == 0)
fprintf(stderr,"iS");
cmm_shader = glCreateShader (GL_FRAGMENT_SHADER);
if(cchannels == 4)
glShaderSource (cmm_shader, 1, &cmm_shader_source_4c, NULL);
else
glShaderSource (cmm_shader, 1, &cmm_shader_source_rgba, NULL);
glCompileShader (cmm_shader);
print_log (cmm_shader);
cmm_prog = glCreateProgram ();
glAttachShader (cmm_prog, cmm_shader);
glLinkProgram (cmm_prog);
print_log (cmm_prog);
glUseProgram (cmm_prog);
loc = glGetUniformLocation (cmm_prog, "scale");
glUniform1f (loc, clut_scale);
loc = glGetUniformLocation (cmm_prog, "offset");
glUniform1f (loc, clut_offset);
/* texture 0 = image */
glActiveTexture (GL_TEXTURE0 + 0);
glBindTexture (GL_TEXTURE_2D, img_texture);
loc = glGetUniformLocation (cmm_prog, "image");
glUniform1i (loc, 0);
/* texture 1 = clut */
glActiveTexture (GL_TEXTURE0 + 1);
glBindTexture (GL_TEXTURE_3D, clut_texture);
loc = glGetUniformLocation (cmm_prog, "clut");
glUniform1i (loc, 1);
}
void initImageTexture ()
{
oyPixel_t pt = oyImage_GetPixelLayout( image, oyLAYOUT );
if(!image)
pt = OY_TYPE_123_8;
oyDATATYPE_e data_type = oyToDataType_m( pt );
int channels = oyToChannels_m( pt );
int gl_channels = oyToGlChannelType(channels);
int gl_data_type = oyToGlDataType(data_type);
int gl_type = oyToGlPixelType(pt);
if(oy_debug == 0 && oy_display_verbose == 0)
fprintf(stderr,"iIT");
/* texture 0 (image) */
glGenTextures (1, &img_texture);
if(oy_display_verbose || !img_texture) fprintf(stderr,_DBG_FORMAT_ "image_texture: %d\n", _DBG_ARGS_, img_texture);
if(img_texture <= 0) return;
glActiveTexture (GL_TEXTURE0 + 0);
glBindTexture (GL_TEXTURE_2D, img_texture);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D (GL_TEXTURE_2D, 0, gl_type, frame_width, frame_height,
0, gl_channels, gl_data_type, frame_data);
/* size may be too big */
if(check_error("glTexImage2D failed"))
{
fprintf( stderr,_DBG_FORMAT_"texture:(%dx%d)%dc %s %s %s %s\n", _DBG_ARGS_,
frame_width,frame_height, channels, oyDataTypeToText(data_type),
printType( gl_type ), printDataType(gl_data_type), printChannelType( gl_channels));
}
glBindTexture (GL_TEXTURE_2D, 0);
if(0&&oy_display_verbose)
oyImage_ToFile( display_image, "display_image.ppm", 0 );
}
void initClutTexture(int cchannels)
{
glGenTextures (1, &clut_texture);
if(oy_display_verbose || !clut_texture) fprintf(stderr,_DBG_FORMAT_ "clut_texture: %d\n", _DBG_ARGS_, clut_texture);
if(clut_texture <= 0) return;
if(oy_debug == 0 && oy_display_verbose == 0)
fprintf(stderr,"iCT");
/* texture 1 = clut */
glActiveTexture (GL_TEXTURE0 + 1);
glBindTexture (GL_TEXTURE_3D, clut_texture);
glTexParameteri (GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri (GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri (GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glTexParameteri (GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri (GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
/*
* Don't use FP luts, but only 16-bit integer ones, since the ATI card
* does not support GL_LINEAR for GL_RGB32F_ARB, but ony GL_NEAREST
*/
int n = oyImage_GetWidth(clut_image);
glTexImage3D (GL_TEXTURE_3D, 0, GL_RGB16, n, n, n,
0, GL_RGB, GL_UNSIGNED_SHORT, clut);
check_error("init glTexImage3D(clut) failed");
glBindTexture (GL_TEXTURE_3D, 0);
}
void initGL()
{
oyProfile_s * p = oyImage_GetProfile(image);
int cchannels = oyProfile_GetChannelsCount( p );
oyProfile_Release( &p );
if(oy_debug == 0 && oy_display_verbose == 0)
fprintf(stderr,"iGL");
initImageTexture();
initClutTexture( cchannels );
initShaders ( cchannels );
}
void freeGL()
{
if( cmm_prog > 0 )
glDeleteProgram( cmm_prog );
if(cmm_shader > 0)
glDeleteShader( cmm_shader );
if(clut_texture > 0)
glDeleteTextures( 1, &clut_texture);
if( img_texture )
glDeleteTextures( 1, &img_texture);
}
oyProfile_s * getFirstMonitorDeviceProfile()
{
oyProfile_s * prof = NULL;
oyOptions_s * cs_options = NULL,
* options = NULL;
oyConfigs_s * devices = NULL;
oyConfig_s * c = NULL;
int n = 0;
/* get XCM_ICC_COLOR_SERVER_TARGET_PROFILE_IN_X_BASE */
int error = oyOptions_SetFromString( &cs_options,
"//" OY_TYPE_STD "/config/icc_profile.x_color_region_target", "yes", OY_CREATE_NEW );
error = oyOptions_SetFromString( &options,
"//" OY_TYPE_STD "/config/command",
"properties", OY_CREATE_NEW );
error = oyDevicesGet( 0, "monitor", options, &devices );
n = oyConfigs_Count( devices );
if(n && error <= 0)
{
c = oyConfigs_Get( devices, 0 );
oyDeviceAskProfile2( c, cs_options, &prof );
size_t size = 0;
void * data = oyProfile_GetMem( prof, &size, 0, oyAllocateFunc_);
if(size && data)
oyDeAllocateFunc_( data );
oyConfig_Release( &c );
}
oyConfigs_Release( &devices );
oyOptions_Release( &options );
oyOptions_Release( &cs_options );
return prof;
}
void sRgbToMonitor( float srgb_in[3], float * rgb_moni )
{
oyProfile_s * monitor_icc = getFirstMonitorDeviceProfile();
// now convert some colors from sRGB to monitor space
// find the appropriate profile flags
uint32_t icc_profile_flags =oyICCProfileSelectionFlagsFromOptions( OY_CMM_STD,
"//" OY_TYPE_STD "/icc_color", NULL, 0 );
oyProfile_s * srgb = oyProfile_FromStd( oyASSUMED_WEB, icc_profile_flags, 0 );
// setup the color context
oyConversion_s * cc = oyConversion_CreateBasicPixelsFromBuffers(
srgb, srgb_in, OY_TYPE_123_FLOAT,
monitor_icc, rgb_moni, OY_TYPE_123_FLOAT,
NULL, 1 );
oyConversion_Correct( cc, "//" OY_TYPE_STD "/icc_color", oyOPTIONATTRIBUTE_ADVANCED, NULL );
// convert by running the conversion graph
oyConversion_RunPixels( cc, NULL );
oyProfile_Release( &monitor_icc );
oyProfile_Release( &srgb );
oyConversion_Release( &cc );
}
int load3DTexture( )
{
int w,h;
oyPixel_t pt;
oyDATATYPE_e data_type;
if(oy_debug == 0 && oy_display_verbose == 0)
fprintf(stderr,"l3DT");
pt = oyImage_GetPixelLayout( clut_image, oyLAYOUT );
data_type = oyToDataType_m(pt);
w = grid_points = oyImage_GetWidth( clut_image );
h = oyImage_GetHeight( clut_image );
int is_allocated = 0;
clut = (GLushort*)oyImage_GetLineF( clut_image )( clut_image, 0,0, 0, &is_allocated );
if(!clut)
fprintf( stderr,_DBG_FORMAT_"obtained no data from clut image\n%s\n", _DBG_ARGS_, oyStruct_GetText( (oyStruct_s*)clut_image, oyNAME_NAME, 0));
if( h != w*w || data_type != oyUINT16 )
{
fprintf( stderr,_DBG_FORMAT_
"Cannot use clut file: %dx%d %s\nneed %dx%d oyUINT16\n",
_DBG_ARGS_, w,h,oyDataTypeToText(data_type), w,w*w);
return 1;
}
clut_scale = (double) (w - 1) / w;
clut_offset = 1.0 / (2 * w);
if( oy_display_verbose )
{
fprintf( stderr,_DBG_FORMAT_
"loaded clut: grid_points:%d h:%d %s need %dx%d oyUINT16\n",
_DBG_ARGS_,
grid_points, h,oyDataTypeToText(data_type), w,w*w);
}
++load;
valid(0);
redraw();
return 0;
}
int load3DTextureDefault( oyOptions_s * module_options )
{
int error = 0;
int icc_profile_flags = oyICCProfileSelectionFlagsFromOptions(
OY_CMM_STD, "//" OY_TYPE_STD "/icc_color",
NULL, 0 );
fprintf( stderr, "loading default monitor profile and image profile\n" );
{
int width = 64,
size, l,a,b,j;
uint16_t * buf = 0;
uint16_t in[3];
char comment[80];
oyProfile_s * p = NULL;
int cchannels = 3;
size = width*width;
buf = (uint16_t*)calloc(size*width*3, sizeof(uint16_t));
#pragma omp parallel for private(in,a,b,j)
for(l = 0; l < width; ++l)
{
in[0] = (int)((double) l / (width - 1) * 65535.0 + 0.5);
for(a = 0; a < width; ++a) {
in[1] = (int)((double) a / (width - 1) * 65535.0 + 0.5);
for(b = 0; b < width; ++b)
{
in[2] = (int)((double) b / (width - 1) * 65535.0 + 0.5);
for(j = 0; j < 3; ++j)
/* BGR */
buf[b*size*3+a*+width*3+l*3+j] = in[j];
}
}
}
if(image)
{
p = oyImage_GetProfile(image);
if(!p)
{
WARNc_S("Could not open profile from image");
error = 1;
}
else
{
fprintf(stderr, "found image profile: %s\n", oyProfile_GetText(p,oyNAME_DESCRIPTION));
cchannels = oyProfile_GetChannelsCount( p );
}
if( cchannels != 3 &&
cchannels != 4 )
{
fprintf(stderr, "found unsupported channel count: %d\n", oyProfile_GetChannelsCount(p));
oyProfile_Release( &p );
}
}
if(!p)
{
p = oyProfile_FromStd( oyASSUMED_WEB, icc_profile_flags, 0 );
fprintf(stderr, "falling back to default profile: %s\n", oyProfile_GetText(p,oyNAME_DESCRIPTION));
}
oyImage_s * cimage = oyImage_Create( width,width*width, buf,
oyChannels_m(cchannels<=3?3:cchannels)|oyDataType_m(oyUINT16),
p, 0 );
oyProfile_Release( &p );
sprintf( comment, "clut with %d levels", width );
oyDATATYPE_e data_type = oyUINT16;
p = getFirstMonitorDeviceProfile();
int flags = 0;
oyConversion_s * cc = oyConversion_CreateFromImage (
cimage, module_options,
p, data_type, flags, 0 );
error = oyConversion_RunPixels( cc, 0 );
if(clut_image) oyImage_Release( &clut_image );
clut_image = oyConversion_GetImage( cc, OY_OUTPUT );
oyConversion_Release( &cc );
oyImage_Release( &cimage );
free(buf);
//error = oyImage_WritePPM( clut_image, "dbg-clut.ppm", comment);
if(!clut_image)
{ WARNc_S("Could not open clut image");
} else
error = load3DTexture( );
}
return error;
}
int load3DTextureFromFile( const char * file_name )
{
if (file_name == NULL)
{
fprintf(stderr, _DBG_FORMAT_"Cannot open clut file\n", _DBG_ARGS_);
return 1;
}
oyImage_Release( &clut_image );
oyImage_FromFile( file_name, 0, &clut_image, 0 );
if(!clut_image)
{
fprintf(stderr, _DBG_FORMAT_"Cannot open clut file: %s\n", _DBG_ARGS_,
file_name);
return 1;
}
int error = load3DTexture( );
return error;
}
void releaseFrame()
{
if(frame_width && frame_height && frame_data)
free(frame_data);
frame_width = frame_height = 0;
frame_data = NULL;
}
int createFrame( int width, int height )
{
int changed = 0;
int old_frame_width = frame_width,
old_frame_height = frame_height;
if(width > W)
width = W;
if(height > H)
height = H;
#if 0
frame_width = width + width%128;
frame_height = height + height%128;
#else
/* multiply of 2 is up to twice as fast */
frame_width = 8;
frame_height = 8;
while (frame_width < width)
{
frame_width *= 2;
if(frame_width > width)
break;
}
while (frame_height < height)
{
frame_height *= 2;
if(frame_height > height)
break;
}
#endif
changed = old_frame_width != frame_width || old_frame_height != frame_height;
if(frame_width == 0 || frame_height == 0)
{
fprintf( stderr,_DBG_FORMAT_"%dx%d no frame dimensions(%dx%d)\n", _DBG_ARGS_, width,height, frame_width, frame_height);
return changed;
}
if(!changed)
return changed;
if((frame_width > max_texture_size ||
frame_height > max_texture_size ) && max_texture_size)
fprintf( stderr,_DBG_FORMAT_"frame size is likely too big frame(%dx%d) sub(%dx%d) max:%d\n", _DBG_ARGS_, frame_width, frame_height, sw,sh, max_texture_size);
else if(oy_display_verbose)
fprintf( stderr,_DBG_FORMAT_"frame size (%dx%d) max:%d\n", _DBG_ARGS_, frame_width, frame_height, max_texture_size);
oyPixel_t pt = oyImage_GetPixelLayout( image, oyLAYOUT );
if(!image)
pt = OY_TYPE_123_8;
oyDATATYPE_e data_type = oyToDataType_m( pt );
int channels = oyToChannels_m( pt );
int sample_size = oyDataTypeGetSize( data_type );
if(frame_data) free(frame_data);
frame_data = (char*)calloc(frame_width*frame_height*channels*sample_size, sizeof(char));
if(!frame_data)
fprintf( stderr,_DBG_FORMAT_"failed to allocate frame_data (%dx%d)%dc\n", _DBG_ARGS_, frame_width, frame_height, channels);
return changed;
}
void loadDAG()
{
oyPixel_t pt = oyImage_GetPixelLayout( image, oyLAYOUT );
oyDATATYPE_e data_type = oyToDataType_m( pt );
int channels = oyToChannels_m( pt );
oyConversion_s * cc;
if(oy_debug == 0 && oy_display_verbose == 0)
fprintf(stderr,"lDAG");
oyOptions_s * old_tags = oyImage_GetTags( display_image ), * tags;
oyImage_Release( &display_image );
display_image = oyImage_Create( 640, 480,
0,
oyChannels_m(channels) | oyDataType_m(data_type),
oyImage_GetProfile( image ), // TODO: who owns the profile ref?
0 );
tags = oyImage_GetTags( display_image );
oyOptions_AppendOpts( tags, old_tags );
oyOptions_Release( &old_tags );
oyOptions_Release( &tags );
if(oy_display_verbose)
fprintf( stderr,_DBG_FORMAT_"Wid:%dx%d+%d+%d Parent:%dx%d+%d+%d texture:(%dx%d)%dc %s\n",
_DBG_ARGS_,
W,H,Oy_Fl_Image_Widget::x(),Oy_Fl_Image_Widget::y(),
Oy_Fl_Image_Widget::parent()->w(), Oy_Fl_Image_Widget::parent()->h(),
Oy_Fl_Image_Widget::parent()->x(), Oy_Fl_Image_Widget::parent()->y(),
oyImage_GetWidth(display_image),oyImage_GetHeight(display_image), channels, oyDataTypeToText(data_type));
cc = oyConversion_FromImageForDisplay( image, display_image,
0, OY_SKIP_ICC, data_type, 0, 0 );
conversion( cc );
}
void loadClut()
{
/* clut must be present */
/* GL needs first to be initialised */
if(!clut || !clut_texture || !cmm_prog)
{ fprintf( stderr,_DBG_FORMAT_ "%s %s %s\n", _DBG_ARGS_, clut?"":"no clut", clut_texture?"":"not clut_texture initialised", cmm_prog?"":"no cmm_prog initialised" );
return;
}
if(oy_debug == 0 && oy_display_verbose == 0)
fprintf(stderr,"lClut");
//glActiveTexture (GL_TEXTURE0 + 1);
glBindTexture (GL_TEXTURE_3D, clut_texture);
/*
* Don't use FP luts, but only 16-bit integer ones, since the ATI card
* does not support GL_LINEAR for GL_RGB32F_ARB, but ony GL_NEAREST
*/
int n = oyImage_GetWidth(clut_image);
glTexImage3D (GL_TEXTURE_3D, 0, GL_RGB16, n, n, n,
0, GL_RGB, GL_UNSIGNED_SHORT, clut);
if(check_error("load glTexImage3D(clut) failed"))
fprintf( stderr,_DBG_FORMAT_"clut_image::width: %d\n", _DBG_ARGS_,
n );
return; // TODO make the following work
/* shader parameter update */
GLint loc;
loc = glGetUniformLocation (cmm_prog, "scale");
glUniform1f (loc, clut_scale);
loc = glGetUniformLocation (cmm_prog, "offset");
glUniform1f (loc, clut_offset);
glBindTexture (GL_TEXTURE_3D, 0);
}
int loadContext()
{
/* update textures and shader */
if(oy_debug == 0 && oy_display_verbose == 0)
fprintf(stderr,"lContext");
/* prepare Oyranos */
loadDAG();
oyPixel_t pt = oyImage_GetPixelLayout( display_image, oyLAYOUT );
oyDATATYPE_e data_type = oyToDataType_m( pt );
int dirty = 0;
oyImage_s * draw_image = NULL;
// centerd == 0x01 skip_server_region_upload == 0x02;
if(drawPrepare( &draw_image, data_type, 1 | 2 ) == -1)
++dirty;
int sw = OY_MIN(oyImage_GetWidth( draw_image), W);
int sh = OY_MIN(oyImage_GetHeight(draw_image), H);
oyImage_Release( &draw_image );
/* intermediate buffer */
if(init == 0)
createFrame( sw, sh );
else
releaseFrame();
return dirty;
}
int tick;
double second;
void draw()
{
if(!context_valid())
return;
int W_ = Oy_Fl_Image_Widget::w(),
H_ = Oy_Fl_Image_Widget::h();
int need_draw = 0;
if( W_ != W ||
H_ != H )
{
if(!old_rect) old_rect = oyRectangle_New(NULL);
colorServerRegionSet( dynamic_cast<Fl_Widget*>(dynamic_cast<Oy_Fl_Image_Widget*>(this)), NULL, old_rect, 0 );
W = W_;
H = H_;
oyRectangle_SetGeo( old_rect, 0,0,W,H );
}
/*if(oy_display_verbose)
fprintf( stderr, _DBG_FORMAT_"Widget:%dx%d+%d+%d Parent:%dx%d+%d+%d\n",_DBG_ARGS_,
W,H,Oy_Fl_Image_Widget::x(),Oy_Fl_Image_Widget::y(),
Oy_Fl_Image_Widget::parent()->w(), Oy_Fl_Image_Widget::parent()->h(),
Oy_Fl_Image_Widget::parent()->x(), Oy_Fl_Image_Widget::parent()->y());*/
if(0&&oy_debug == 0 && oy_display_verbose == 0 && load == 0)
fprintf( stderr, _DBG_FORMAT_"\nw window change or no frame data or invalid\nf frame size changes\n0 skip redraw\n. do redraw\nP dirty before oyDrawScreenImage()\np oyDrawScreenImage() returns dirty\nD damaged\n",_DBG_ARGS_);
if(oy_debug == 0 && oy_display_verbose == 0 && load == 0 && !valid())
fprintf( stderr,"v");
if(max_texture_size == 0)
glGetIntegerv( GL_MAX_TEXTURE_SIZE, &max_texture_size );
if(load > 0)
{
valid(1);
need_draw = loadContext();
float bg[3] = {0.5, 0.5, 0.5};
sRgbToMonitor( bg, background );
}
if(clut)
{
if(init == 0)
{
initGL(); /* textures and shaders */
++init;
adraw = 2;
} else if(load)
{
freeGL();
initGL(); /* textures and shaders */
loadClut();
}
}
load = 0;
oyImage_s * draw_image = NULL;
oyPixel_t pt;
oyDATATYPE_e data_type = oyUINT8;
int channels = 0;
int sample_size = 0;
if(conversion())
{
pt = oyImage_GetPixelLayout( display_image, oyLAYOUT );
data_type = oyToDataType_m( pt );
sample_size = oyDataTypeGetSize( data_type );
/* detect Oyranos DAG changes and process teh graph of needed */
// centerd == 0x01 skip_server_region_upload == 0x02;
int result = drawPrepare( &draw_image, data_type, 1 | 2 );
if(!draw_image)
{ fprintf( stderr, _DBG_FORMAT_ "no draw image\n", _DBG_ARGS_ );
return;
}
if(result < 0)
++need_draw;
pt = oyImage_GetPixelLayout( draw_image, oyLAYOUT );
channels = oyToChannels_m( pt );
if(oy_debug && draw_image)
fprintf(stdout, _DBG_FORMAT_"pixellayout: %d width: %d channels: %d\n",
_DBG_ARGS_,
pt, oyImage_GetWidth(draw_image), oyToChannels_m(pt) );
#ifdef __APPLE__
glDrawBuffer(GL_FRONT_AND_BACK);
#endif // !MESA
} else
fprintf( stderr, _DBG_FORMAT_ "conversion not ready\n", _DBG_ARGS_ );
if(draw_image)
{
if(need_draw == 0 && !adraw)
{
if(oy_debug == 0 && oy_display_verbose == 0)
fprintf(stderr,"0");
oyImage_Release( &draw_image );
return;
} else
{
/* glTexSubImage2D texture dimensions */
sw = OY_MIN(oyImage_GetWidth( draw_image), W);
sh = OY_MIN(oyImage_GetHeight(draw_image), H);
if( frame_width < sw || frame_height < sh )
{
if(oy_debug == 0 && oy_display_verbose == 0)
fprintf(stderr,"f");
/* update intermediate buffer */
if(createFrame( sw, sh ))
{
glBindTexture (GL_TEXTURE_2D, img_texture);
int gl_channels = oyToGlChannelType(channels);
int gl_data_type = oyToGlDataType(data_type);
int gl_type = oyToGlPixelType(pt);
/* tell GL about buffer geometry changes */
glTexImage2D (GL_TEXTURE_2D, 0, gl_type, frame_width, frame_height,
0, gl_channels, gl_data_type, frame_data);
if(check_error("glTexImage2D failed (image too large?)"))
{
fprintf( stderr,_DBG_FORMAT_"texture:(%dx%d)%dc %s %s %s %s\n", _DBG_ARGS_,
frame_width,frame_height, channels, oyDataTypeToText(data_type),
printType( gl_type ), printDataType(gl_data_type), printChannelType( gl_channels));
}
glBindTexture (GL_TEXTURE_2D, 0);
}
}
if(adraw)
--adraw;
}
if(frame_data)
{
/* buffer alignment of 4 byte is what most GPUs prefer */
if((sw*channels*sample_size)%4)
glPixelStorei(GL_UNPACK_ALIGNMENT,1);
else
glPixelStorei(GL_UNPACK_ALIGNMENT,4);
/* fill the intermediate buffer from Oyranos image */
if(draw_image && frame_data)
{
if(sh > frame_height)
fprintf( stderr, _DBG_FORMAT_"draw_image height: %d frame_height: %d\n",
_DBG_ARGS_, sh, frame_height );
pt = oyImage_GetPixelLayout( image, oyLAYOUT );
for(int y = 0; y < sh; ++y)
{
int is_allocated = 0, height = 0;
void * image_data = oyImage_GetLineF(draw_image)( draw_image, y, &height, -1, &is_allocated );
memcpy( &frame_data[sw*(sh-y-1)*channels*sample_size], image_data,
sw*channels*sample_size );
if(pt & OY_BYTESWAPED && sample_size > 1)
oyByteSwap(&frame_data[sw*(sh-y-1)*channels*sample_size], sample_size, sw*channels);
if(is_allocated)
free( image_data );
}
if(pt & OY_BYTESWAPED && sample_size > 1)
printf("%u %s\n", pt, oyPixelPrint(pt, malloc));
}
#if 1
/* performance test */
double s = oySeconds();
if(tick)
{
if(oy_debug == 0 && oy_display_verbose == 0)
fprintf(stderr,".");
if((int)s > (int)second)
{
fprintf(stderr, " %f t/s\n", tick/(s-second));
second = s;
tick = 0;
}
} else second = s;
++tick;
#endif
}
oyImage_Release( &draw_image );
}
/* border values */
float bw = OY_MAX( 0, W - sw),
bh = OY_MAX( 0, H - sh);
/* maintain texture ratio */
float tw = (W - bw)/(float)frame_width;
float th = (H - bh)/(float)frame_height;
if(oy_display_verbose)
fprintf( stderr, _DBG_FORMAT_"frame: (%dx%d)%dc WxH(%dx%d border %g,%g) parent:%dx%d\n"
"img:%d clut:%d(scale:%f offset:%f) prog:%tx shader:%tx texture ratio:%gx%g align:%d\n",
_DBG_ARGS_, frame_width,frame_height,channels, W,H, bw,bh,
Oy_Fl_Image_Widget::parent()->w(),
Oy_Fl_Image_Widget::parent()->h(),
img_texture, clut_texture, clut_scale, clut_offset,
(ptrdiff_t)cmm_prog, (ptrdiff_t)cmm_shader, tw,th,
(sw*channels*sample_size)%4);
// - not needed, but might improve speed - START //
glShadeModel (GL_FLAT);
glDisable (GL_DITHER);
glDisable (GL_BLEND);
glDisable (GL_DEPTH_TEST);
glMatrixMode (GL_PROJECTION);
glLoadIdentity ();
// - not needed, but might improve speed - END //
glMatrixMode (GL_MODELVIEW);
glLoadIdentity ();
glViewport( 0,0, W,H );
glOrtho( -W,W, -H,H, -1.0,1.0);
/* draw background */
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glClearColor (0.5, 0.5, 0.5, 0.0);
glClearColor (background[0], background[1], background[2], 0.0);
glColor3f(1.0, 1.0, 1.0);
glBegin(GL_LINE_STRIP); glVertex2f(W, H); glVertex2f(-W,-H); glEnd();
glBegin(GL_LINE_STRIP); glVertex2f(W,-H); glVertex2f(-W, H); glEnd();
if(!frame_data)
{ fprintf(stderr,_DBG_FORMAT_ "frame_data not yet ready\n",_DBG_ARGS_); return; }
if(channels == 4)
{
/* draw a checker board as background for alpha images */
Oy_Fl_Image_Widget * ow = dynamic_cast<Oy_Fl_Image_Widget*>(this);
Oy_Fl_Window_Base * win = dynamic_cast<Oy_Fl_Window_Base*>(ow->window());
double scale = win->scale();
int len = 32 * scale;
int start_w = -W+bw,
start_h = -H+bh;
/* checker board */
glColor3f(.0, .0, .0);
glBegin (GL_QUADS);
for(int w = start_w; w < W-bw; w += len)
for(int h = start_h; h < H-bh; h += len)
{
if(w+len < (W-bw) &&
h+len < (H-bh))
{
/* tr */
glVertex2f ( w+len/2, h+len/2);
glVertex2f ( w+len/2, h+len );
glVertex2f ( w+len, h+len );
glVertex2f ( w+len, h+len/2);
/* bl */
glVertex2f ( w, h );
glVertex2f ( w, h+len/2);
glVertex2f ( w+len/2, h+len/2);
glVertex2f ( w+len/2, h );
} else
{
/* tr */
glVertex2f ( OY_MIN( w+len/2, W-bw), OY_MIN( h+len/2, H-bh) );
glVertex2f ( OY_MIN( w+len/2, W-bw), OY_MIN( h+len , H-bh) );
glVertex2f ( OY_MIN( w+len , W-bw), OY_MIN( h+len , H-bh) );
glVertex2f ( OY_MIN( w+len , W-bw), OY_MIN( h+len/2, H-bh) );
/* bl */
glVertex2f ( w , h );
glVertex2f ( w , OY_MIN( h+len/2, H-bh) );
glVertex2f ( OY_MIN( w+len/2, W-bw), OY_MIN( h+len/2, H-bh) );
glVertex2f ( OY_MIN( w+len/2, W-bw), h );
}
}
glEnd ();
}
glEnable (GL_TEXTURE_2D);
glEnable (GL_TEXTURE_3D);
glTexEnvf (GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
int gl_data_type = oyToGlDataType(data_type);
int gl_channels = oyToGlChannelType(channels);
/* upload texture 0 (image) */
glBindTexture (GL_TEXTURE_2D, img_texture);
glTexSubImage2D (GL_TEXTURE_2D, 0, 0, 0, sw, sh,
gl_channels, gl_data_type, frame_data);
glBindTexture (GL_TEXTURE_2D, 0);
if(oy_display_verbose)
fprintf(stderr, _DBG_FORMAT_"subImage(%dx%d)%dc frame(%dx%d)\n", _DBG_ARGS_, sw,sh,channels, frame_width, frame_height );
/* draw surface */
if(gl_channels == GL_RGBA)
{
glEnable (GL_BLEND);
glBlendFunc(GL_SRC_ALPHA,GL_ONE_MINUS_SRC_ALPHA);
}
glBegin (GL_QUADS);
glTexCoord2f (0.0, 0.0); glVertex2f (-W + bw, -H + bh);
glTexCoord2f (0.0, th ); glVertex2f (-W + bw, H - bh);
glTexCoord2f (tw, th ); glVertex2f ( W - bw, H - bh);
glTexCoord2f (tw, 0.0); glVertex2f ( W - bw, -H + bh);
glEnd ();
if(gl_channels == GL_RGBA)
glDisable (GL_BLEND);
glDisable (GL_TEXTURE_2D);
glDisable (GL_TEXTURE_3D);
}
int handle (int e)
{
int ret = 1;
redraw();
ret = Oy_Fl_Image_Widget::handle( e );
if(!ret)
ret = Fl_Gl_Window::handle( e );
return ret;
}
void redraw()
{
Fl::awake(this);
Fl_Gl_Window::redraw();
}
public:
int setImage ( const char * file_name,
oyOptions_s * cc_options,
const char * clut_name )
{
oyImage_Release( &image );
conversion( NULL );
int error = oyImage_FromFile( file_name, 0, &image, 0 );
if(oy_display_verbose)
fprintf(stderr, _DBG_FORMAT_"loaded image: %s\n%s\n", _DBG_ARGS_,
file_name, oyStruct_GetText( (oyStruct_s*)image, oyNAME_NAME, 0));
if(!image)
{
error = 1;
return error;
}
else
{
oyPixel_t pt = oyImage_GetPixelLayout( image, oyLAYOUT );
int channels = oyToChannels_m( pt );
if(channels == 1 || channels == 3 || channels == 4)
{
load = 1;
valid(0);
redraw();
} else
{
fprintf(stderr, _DBG_FORMAT_"image channels not supported: %d %s\n", _DBG_ARGS_,
channels, file_name);
return 1;
}
}
int lerror = 0;
if(clut_name[0] == 0)
lerror = load3DTextureDefault( cc_options );
else
lerror = load3DTextureFromFile( clut_name );
if(!error && !lerror && oy_display_verbose)
fprintf(stderr, _DBG_FORMAT_"successfully loaded clut: %s\n", _DBG_ARGS_,
clut_name );
return error + lerror;
}
const char * printDataType( int gl_data_type )
{
static char t[64] = {0};
switch( gl_data_type )
{
case GL_UNSIGNED_SHORT: strcpy(t, "GL_UNSIGNED_SHORT"); break;
case GL_UNSIGNED_BYTE: strcpy(t, "GL_UNSIGNED_BYTE"); break;
case GL_HALF_FLOAT: strcpy(t, "GL_HALF_FLOAT"); break;
case GL_FLOAT: strcpy(t, "GL_FLOAT"); break;
case GL_DOUBLE: strcpy(t, "GL_DOUBLE"); break;
default: sprintf( t, "%d", gl_data_type );
}
return t;
}
const char * printChannelType( int type )
{
static char t[64] = {0};
switch( type )
{
case GL_LUMINANCE: strcpy(t, "GL_LUMINANCE"); break;
case GL_LUMINANCE_ALPHA: strcpy(t, "GL_LUMINANCE_ALPHA"); break;
case GL_RGB: strcpy(t, "GL_RGB"); break;
case GL_RGBA: strcpy(t, "GL_RGBA"); break;
default: sprintf( t, "%d", type );
}
return t;
}
const char * printType( int type )
{
static char t[64] = {0};
switch( type )
{
case GL_LUMINANCE8: strcpy(t, "GL_LUMINANCE8"); break;
//case GL_LUMINANCE_ALPHA8: strcpy(t, "GL_LUMINANCE_ALPHA8"); break;
case GL_RGB8: strcpy(t, "GL_RGB8"); break;
case GL_RGBA8: strcpy(t, "GL_RGBA8"); break;
case GL_LUMINANCE16: strcpy(t, "GL_LUMINANCE16"); break;
//case GL_LUMINANCE_ALPHA16: strcpy(t, "GL_LUMINANCE_ALPHA16"); break;
case GL_RGB16: strcpy(t, "GL_RGB16"); break;
case GL_RGBA16: strcpy(t, "GL_RGBA16"); break;
case GL_RGB16F: strcpy(t, "GL_RGB16F"); break;
case GL_RGBA16F: strcpy(t, "GL_RGBA16F"); break;
case GL_RGB32F: strcpy(t, "GL_RGB32F"); break;
case GL_RGBA32F: strcpy(t, "GL_RGBA32F"); break;
default: sprintf( t, "%d", type );
}
return t;
}
static int oyToGlChannelType( int channels )
{
switch(channels)
{
case 1: return GL_LUMINANCE;
//case 2: return GL_LUMINANCE_ALPHA;
case 3: return GL_RGB;
case 4: return GL_RGBA;
default: fprintf(stderr, "channels not supported: %d", channels); return 0;
}
}
static int oyToGlDataType( oyDATATYPE_e type )
{
switch(type)
{
case oyUINT8: return GL_UNSIGNED_BYTE;
case oyUINT16: return GL_UNSIGNED_SHORT;
case oyUINT32: return GL_UNSIGNED_INT;
case oyHALF: return GL_HALF_FLOAT;
case oyFLOAT: return GL_FLOAT;
case oyDOUBLE: return GL_DOUBLE;
default: fprintf(stderr, "data type is not supported: %s", oyDataTypeToText(type)); return 0;
}
}
static GLuint oyToGlPixelType( unsigned int type )
{
switch( type & (~OY_BYTESWAPED) )
{
case OY_TYPE_1_8: return GL_LUMINANCE8;
//case OY_TYPE_1A_8: return GL_LUMINANCE_ALPHA8;
case OY_TYPE_123_8: return GL_RGB8;
case OY_TYPE_123A_8: return GL_RGBA8;
case OY_TYPE_1_16: return GL_LUMINANCE16;
case OY_TYPE_123_16: return GL_RGB16;
case OY_TYPE_123A_16: return GL_RGBA16;
case OY_TYPE_123_HALF: return GL_RGB16F;
case OY_TYPE_123A_HALF: return GL_RGBA16F;
case OY_TYPE_123_FLOAT: return GL_RGB32F;
case OY_TYPE_123A_FLOAT: return GL_RGBA32F;
default: { char * t = oyPixelPrint(type,malloc); fprintf(stderr, "pixel type is not supported: %u %s", type, t?t:"????"); if(t)free(t); return 0; }
}
}
};
const char * Oy_Fl_Shader_Box::cmm_shader_source_rgba =
"uniform sampler2D image; \n"
"uniform sampler3D clut; \n"
"uniform float scale; \n"
"uniform float offset; \n"
" \n"
"void main() \n"
"{ \n"
" vec4 img = texture2D(image, gl_TexCoord[0].xy).rgba; \n"
" \n"
" // interpolate CLUT \n"
" img = img * scale + offset; \n"
" gl_FragColor = texture3D(clut, img.rgb); \n"
" gl_FragColor.a = img.a; \n"
" \n"
" // w/o color management \n"
" // gl_FragColor = vec4(img, 0.5); \n"
"} \n"
;
const char * Oy_Fl_Shader_Box::cmm_shader_source_4c =
"uniform sampler2D image; \n"
"uniform sampler3D clut; \n"
"uniform float scale; \n"
"uniform float offset; \n"
" \n"
"void main() \n"
"{ \n"
" vec4 img = texture2D(image, gl_TexCoord[0].xy).rgba; \n"
" gl_FragColor.r = 1.0-img.r-img.a; \n"
" gl_FragColor.g = 1.0-img.g-img.a; \n"
" gl_FragColor.b = 1.0-img.b-img.a; \n"
" gl_FragColor.a = 1.0; \n"
"} \n"
;
#endif /* Oy_Fl_Shader_Box_H */
|
nonNested.c |
// OpenMP Non-Nested Example
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main( int argc, char** argv ) {
// Not turning on nesting or turning off dynamic threads.
// Outer Level Parallel Region - 2 Threads
#pragma omp parallel num_threads( 2 )
{
printf( "Outer Level - You will see this twice.\n" );
// Inner Level Parallel Region - 2 Threads Each
#pragma omp parallel num_threads( 2 )
{
printf( "Inner Level - You will NOT see this four times! Important steps are not complete!\n" );
}
}
return 0;
}
// End nonNested.c - EWG SDG
|
sniffle.h | // copyright 2016 john howard (orthopteroid@gmail.com)
// MIT license
//
// Genetic Algorithim Maximizer
#ifndef PSYCHICSNIFFLE_SNIFFLE_H
#define PSYCHICSNIFFLE_SNIFFLE_H
#include <iostream>
#include <cmath>
#include <cstring>
#include <omp.h>
#include <functional>
#include <assert.h>
#include "nselector.h"
#include "samplertable.h"
#include "splice.h"
#include "taus88.h"
//////////////////////////////////
using namespace util;
namespace sniffle {
// The byte-analyser constructs distributions of the population's state bytes
// for byte-level gene selection and jump-mutation.
template<typename StateType>
struct ByteAnalyser {
const static int StateSize = sizeof(StateType);
uint8_t *GetByteArr(StateType &state) { return (uint8_t *) &state; }
uint8_t distr[StateSize][256];
uint8_t dSampler[StateSize][65535];
uint16_t dSamplerN[StateSize];
// local-maxima strategy: occasionally invert byte distributions
int iteration;
bool negated;
void dumpStats() {
for (int ss = 0; ss < StateSize; ss++) {
for (int b = 0; b < 256; b++) putchar('A' + 25 * (distr[ss][b]) / 255);
putchar('\n');
}
putchar('\n');
}
float_t calcSmallestChannelDifference()
{
uint8_t min[StateSize], max[StateSize];
#pragma omp parallel for
for (int ss = 0; ss < StateSize; ss++)
{
min[ss] = 255;
max[ss] = 0;
for (int b = 0; b < 256; b++) {
min[ss] = std::min(min[ss], distr[ss][b]);
max[ss] = std::max(max[ss], distr[ss][b]);
}
}
// smallest difference between channels
uint8_t deltaE = (uint8_t)~0;
for (int s1 = 0; s1 < StateSize; s1++)
for (int s2 = 0; s2 < StateSize; s2++)
deltaE = std::min(deltaE, (uint8_t)(max[s1] - min[s2]));
return (float_t)deltaE / 255.f;
}
void crank(StateType *stateArr, int *eliteArr, const int eliteSamples) {
#if 0
dumpStats();
#endif
if( ++iteration % 10 == 0 ) {
negated = true;
#pragma omp parallel for
for (int ss = 0; ss < StateSize; ss++)
for (int b = 0; b < 256; b++)
distr[ss][b] = ~distr[ss][b];
} else {
if( negated ) {
negated = !negated;
#pragma omp parallel for
for (int ss = 0; ss < StateSize; ss++)
for (int b = 0; b < 256; b++)
distr[ss][b] = ~distr[ss][b];
}
// attenuate - BREATHE OUT
#pragma omp parallel for
for (int ss = 0; ss < StateSize; ss++)
for (int b = 0; b < 256; b++)
if (distr[ss][b] > 1) distr[ss][b] -= 1;
// amplify by sampling from the elite group - BREATHE IN
#pragma omp parallel for
for (int i = 0; i < eliteSamples; i++) {
for (int ss = 0; ss < StateSize; ss++) {
int b = GetByteArr(stateArr[eliteArr[i]])[ss];
if (distr[ss][b] < 250) distr[ss][b] += 5;
// slightly distribute locality
for (int bo = 1; bo < 4; bo++) {
if (b > 0 && distr[ss][b - bo] < 250) distr[ss][b - bo] += 1;
if (b < 255 && distr[ss][b + bo] < 250) distr[ss][b + bo] += 1;
}
}
}
}
// recalc
#pragma omp parallel for
for (int ss = 0; ss < StateSize; ss++) {
dSamplerN[ss] = buildSamplerTable<uint8_t, 65535, uint8_t, 256>(&(dSampler[ss][0]), &(distr[ss][0]));
}
}
void reset() {
iteration = 0;
negated = false;
// The value used here to initialize the distribution may have an effect
// on the convergence rate. A higher value will require more attenuation
// cycles before the S/R ratio get stronger.
memset(distr, UINT8_MAX >> 2, StateSize * 256); // a uniform distribution
#pragma omp parallel for
for (int ss = 0; ss < StateSize; ss++) {
for (int i = 0; i < 256; i++)
dSampler[ss][i] = i; // a uniform distribution
dSamplerN[ss] = ~0;
}
}
void mutatebyte(uint8_t *p, Taus88& fnRand) {
int byte = fnRand() % StateSize;
p[byte] = dSampler[byte][ fnRand() % dSamplerN[byte] ]; // jump mutation - kudos to Andrew Schwartzmeyer
}
void randomize(uint8_t *p, Taus88& fnRand) {
for (int ss = 0; ss < StateSize; ss++) {
p[ss] = dSampler[ss][ fnRand() % dSamplerN[ss] ];
}
}
};
//////////////////////////////////
// The null-analyser performs no analysis of the state population.
template<typename StateType>
struct NullAnalyser {
const static int StateSize = sizeof(StateType);
void crank(StateType *stateArr, int *eliteArr, const int eliteSamples) { }
void reset() {}
void mutatebyte(uint8_t *p, Taus88& fnRand) {
int byte = fnRand() % StateSize;
p[byte] = fnRand();
}
void randomize(uint8_t *p, Taus88& fnRand) {
for (int ss = 0; ss < StateSize; ss++) {
p[ss] = fnRand();
}
}
};
//////////////////////////////////
template<typename StateType, uint Population, template <typename ST> typename StateAnalyser>
struct Maximizer {
const static int StateSize = sizeof(StateType);
const static int Group2End = Population * .30;
const static int Group3End = Population * .50;
const static int Group4End = Population * .70;
const static int Group5End = Population * .80;
const static int Group6End = Population * .90;
const static int EliteSamples = 5 + Group3End * .05;
int eliteSamples[EliteSamples];
int pa, pb;
StateType state[2][Population];
float_t e[Population];
uint16_t eSampler[65535];
uint16_t eSamplerN;
StateAnalyser<StateType> stateAnalyser;
Taus88State taus88State;
uint8_t *GetByteArr(StateType &state) { return (uint8_t *) &state; }
StateType *GetStateArr() { return &(state[pa][0]); }
uint8_t *newPop(int i = 0) { return GetByteArr(state[pb][i]); }
uint8_t *oldPop(int i = 0) { return GetByteArr(state[pa][i]); }
Maximizer() {
taus88State.seed();
}
void dumpStats() {
for (int ss = 0; ss < StateSize; ss++) {
for (int p = 0; p < Population; p++)
putchar('A' + 25 * oldPop(p)[ss] / 255);
putchar('\n');
}
putchar('\n');
}
void reset(int preserve = 0) {
pa = 0;
pb = 1;
stateAnalyser.reset();
#pragma omp parallel
{
Taus88 taus88(taus88State);
#pragma omp for
for (int i = preserve; i < Population; i++) {
stateAnalyser.randomize(oldPop(i), taus88);
}
}
}
void crank(float *f) {
#if 0
dumpStats();
#endif
// find max, and clobber it to prevent saturation
int imax = 0;
int imin = 0;
for (int i = 1; i < Population; i++) {
if (f[i] > f[imax]) imax = i;
if (f[i] < f[imin]) imin = i;
}
for (int i = 1; i < Population; i++) {
if (f[i] == f[imax]) f[i] = f[imin];
}
// calc sampler table
eSamplerN = buildSamplerTable<uint16_t, 65535, float_t, Population>(eSampler, f);
// sample elites
eliteSamples[0] = imax; // add best only once to prevent saturation
#pragma omp parallel
{
Taus88 taus88(taus88State);
#pragma omp for
for (int i = 1; i < EliteSamples; i++)
eliteSamples[i] = eSampler[ taus88() % eSamplerN ];
}
stateAnalyser.crank(GetStateArr(), eliteSamples, EliteSamples);
/////////////////////////////
// build next generation
memcpy(newPop(0), oldPop(imax), (uint) StateSize);
#pragma omp parallel
{
Taus88 taus88(taus88State);
NSelector<2> nselector( eSamplerN );
#pragma omp for nowait
for (int i = 1; i < Group2End; i++) {
// g2: preserve elites
int p = eSampler[ taus88() % eSamplerN ];
memcpy(newPop(i), oldPop(p), (uint) StateSize);
}
#pragma omp for nowait
for (int i = Group2End +1; i < Group3End; i++) {
// g3: semi-preserve elites
int p = eSampler[ taus88() % eSamplerN ];
memcpy(newPop(i), oldPop(p), (uint) StateSize);
stateAnalyser.mutatebyte(newPop(i), taus88);
}
#pragma omp for nowait
for (int i = Group3End +1; i < Group4End; i++) {
// g4: some favourables are spliced with best
int b = eSampler[ taus88() % eSamplerN ];
splice<StateSize>(newPop(i), oldPop(0), oldPop(b), (uint) taus88());
}
#pragma omp for nowait
for (int i = Group4End +1; i < Group5End; i++) {
// g5: some favourables are spliced with best (other way)
int a = eSampler[ taus88() % eSamplerN ];
splice<StateSize>(newPop(i), oldPop(a), oldPop(0), (uint) taus88());
}
#pragma omp for nowait
for (int i = Group5End +1; i < Group6End; i++) {
nselector.reset();
// g6: favourables that are only spliced
int a = eSampler[ nselector.select(taus88) ];
int b = eSampler[ nselector.select(taus88) ];
splice<StateSize>(newPop(i), oldPop(a), oldPop(b), (uint) taus88());
}
#pragma omp for nowait
for (int i = Group6End +1; i < Population; i++) {
// g7: randomize rest using byteAnalyser
stateAnalyser.randomize(newPop(i), taus88);
}
}
std::swap(pa, pb);
}
};
}
#endif //PSYCHICSNIFFLE_SNIFFLE_H
|
scheduled-clauseModificado.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
main(int argc, char **argv) {
int i, n=200,chunk,a[n],suma=0, modifier, dyn, max, limit;
omp_sched_t kind;
if(argc < 3) {
fprintf(stderr,"\nFalta iteraciones o chunk \n");
exit(-1);
}
n = atoi(argv[1]); if (n>200) n=200; chunk = atoi(argv[2]);
for (i=0; i<n; i++) a[i] = i;
#pragma omp parallel for firstprivate(suma) \
lastprivate(suma,dyn,max,limit)schedule(dynamic,chunk)
for (i=0; i<n; i++)
{
suma = suma + a[i];
printf(" thread %d suma a[%d]=%d suma=%d \n",
omp_get_thread_num(),i,a[i],suma);
dyn = omp_get_dynamic();
max = omp_get_max_threads();
limit = omp_get_thread_limit();
omp_get_schedule(&kind, &modifier);
}
printf("Dentro de 'parallel for' dyn-var=%d\n",dyn);
printf("Dentro de 'parallel for' nthreads-var=%d\n",max);
printf("Dentro de 'parallel for' thread-limit-var=%d\n",limit);
printf("Dentro de 'parallel for' run-sched-var=%d\n",kind);
omp_get_schedule(&kind, &modifier);
printf("Fuera de 'parallel for' suma=%d\n",suma);
printf("Fuera de 'parallel for' dyn-var=%d\n",omp_get_dynamic());
printf("Fuera de 'parallel for' nthreads-var=%d\n",omp_get_max_threads());
printf("Fuera de 'parallel for' thread-limit-var=%d\n",omp_get_thread_limit());
printf("Fuera de 'parallel for' run-sched-var=%d\n",kind);
}
|
StmtOpenMP.h | //===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// \brief This file defines OpenMP AST classes for executable directives and
/// clauses.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMTOPENMP_H
#define LLVM_CLANG_AST_STMTOPENMP_H
#include "clang/AST/Expr.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Stmt.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
namespace clang {
//===----------------------------------------------------------------------===//
// AST classes for directives.
//===----------------------------------------------------------------------===//
/// \brief This is a basic class for representing single OpenMP executable
/// directive.
///
class OMPExecutableDirective : public Stmt {
friend class ASTStmtReader;
/// \brief Kind of the directive.
OpenMPDirectiveKind Kind;
/// \brief Starting location of the directive (directive keyword).
SourceLocation StartLoc;
/// \brief Ending location of the directive.
SourceLocation EndLoc;
/// \brief Numbers of clauses.
const unsigned NumClauses;
/// \brief Number of child expressions/stmts.
const unsigned NumChildren;
/// \brief Offset from this to the start of clauses.
/// There are NumClauses pointers to clauses, they are followed by
/// NumChildren pointers to child stmts/exprs (if the directive type
/// requires an associated stmt, then it has to be the first of them).
const unsigned ClausesOffset;
/// \brief Get the clauses storage.
MutableArrayRef<OMPClause *> getClauses() {
OMPClause **ClauseStorage = reinterpret_cast<OMPClause **>(
reinterpret_cast<char *>(this) + ClausesOffset);
return MutableArrayRef<OMPClause *>(ClauseStorage, NumClauses);
}
protected:
/// \brief Build instance of directive of class \a K.
///
/// \param SC Statement class.
/// \param K Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
///
template <typename T>
OMPExecutableDirective(const T *, StmtClass SC, OpenMPDirectiveKind K,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses, unsigned NumChildren)
: Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)),
EndLoc(std::move(EndLoc)), NumClauses(NumClauses),
NumChildren(NumChildren),
ClausesOffset(llvm::alignTo(sizeof(T), alignof(OMPClause *))) {}
/// \brief Sets the list of variables for this clause.
///
/// \param Clauses The list of clauses for the directive.
///
void setClauses(ArrayRef<OMPClause *> Clauses);
/// \brief Set the associated statement for the directive.
///
/// /param S Associated statement.
///
void setAssociatedStmt(Stmt *S) {
assert(hasAssociatedStmt() && "no associated statement.");
*child_begin() = S;
}
public:
/// \brief Iterates over a filtered subrange of clauses applied to a
/// directive.
///
/// This iterator visits only clauses of type SpecificClause.
template <typename SpecificClause>
class specific_clause_iterator
: public llvm::iterator_adaptor_base<
specific_clause_iterator<SpecificClause>,
ArrayRef<OMPClause *>::const_iterator, std::forward_iterator_tag,
const SpecificClause *, ptrdiff_t, const SpecificClause *,
const SpecificClause *> {
ArrayRef<OMPClause *>::const_iterator End;
void SkipToNextClause() {
while (this->I != End && !isa<SpecificClause>(*this->I))
++this->I;
}
public:
explicit specific_clause_iterator(ArrayRef<OMPClause *> Clauses)
: specific_clause_iterator::iterator_adaptor_base(Clauses.begin()),
End(Clauses.end()) {
SkipToNextClause();
}
const SpecificClause *operator*() const {
return cast<SpecificClause>(*this->I);
}
const SpecificClause *operator->() const { return **this; }
specific_clause_iterator &operator++() {
++this->I;
SkipToNextClause();
return *this;
}
};
template <typename SpecificClause>
static llvm::iterator_range<specific_clause_iterator<SpecificClause>>
getClausesOfKind(ArrayRef<OMPClause *> Clauses) {
return {specific_clause_iterator<SpecificClause>(Clauses),
specific_clause_iterator<SpecificClause>(
llvm::makeArrayRef(Clauses.end(), 0))};
}
template <typename SpecificClause>
llvm::iterator_range<specific_clause_iterator<SpecificClause>>
getClausesOfKind() const {
return getClausesOfKind<SpecificClause>(clauses());
}
/// Gets a single clause of the specified kind associated with the
/// current directive iff there is only one clause of this kind (and assertion
/// is fired if there is more than one clause is associated with the
/// directive). Returns nullptr if no clause of this kind is associated with
/// the directive.
template <typename SpecificClause>
const SpecificClause *getSingleClause() const {
auto Clauses = getClausesOfKind<SpecificClause>();
if (Clauses.begin() != Clauses.end()) {
assert(std::next(Clauses.begin()) == Clauses.end() &&
"There are at least 2 clauses of the specified kind");
return *Clauses.begin();
}
return nullptr;
}
/// Returns true if the current directive has one or more clauses of a
/// specific kind.
template <typename SpecificClause>
bool hasClausesOfKind() const {
auto Clauses = getClausesOfKind<SpecificClause>();
return Clauses.begin() != Clauses.end();
}
/// \brief Returns starting location of directive kind.
SourceLocation getLocStart() const { return StartLoc; }
/// \brief Returns ending location of directive.
SourceLocation getLocEnd() const { return EndLoc; }
/// \brief Set starting location of directive kind.
///
/// \param Loc New starting location of directive.
///
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// \brief Set ending location of directive.
///
/// \param Loc New ending location of directive.
///
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// \brief Get number of clauses.
unsigned getNumClauses() const { return NumClauses; }
/// \brief Returns specified clause.
///
/// \param i Number of clause.
///
OMPClause *getClause(unsigned i) const { return clauses()[i]; }
/// \brief Returns true if directive has associated statement.
bool hasAssociatedStmt() const { return NumChildren > 0; }
/// \brief Returns statement associated with the directive.
Stmt *getAssociatedStmt() const {
assert(hasAssociatedStmt() && "no associated statement.");
return const_cast<Stmt *>(*child_begin());
}
/// \brief Returns the captured statement associated with the
/// component region within the (combined) directive.
//
// \param RegionKind Component region kind.
CapturedStmt *getCapturedStmt(OpenMPDirectiveKind RegionKind) const {
SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind());
assert(std::any_of(
CaptureRegions.begin(), CaptureRegions.end(),
[=](const OpenMPDirectiveKind K) { return K == RegionKind; }) &&
"RegionKind not found in OpenMP CaptureRegions.");
auto *CS = cast<CapturedStmt>(getAssociatedStmt());
for (auto ThisCaptureRegion : CaptureRegions) {
if (ThisCaptureRegion == RegionKind)
return CS;
CS = cast<CapturedStmt>(CS->getCapturedStmt());
}
llvm_unreachable("Incorrect RegionKind specified for directive.");
}
OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
static bool classof(const Stmt *S) {
return S->getStmtClass() >= firstOMPExecutableDirectiveConstant &&
S->getStmtClass() <= lastOMPExecutableDirectiveConstant;
}
child_range children() {
if (!hasAssociatedStmt())
return child_range(child_iterator(), child_iterator());
Stmt **ChildStorage = reinterpret_cast<Stmt **>(getClauses().end());
return child_range(ChildStorage, ChildStorage + NumChildren);
}
ArrayRef<OMPClause *> clauses() { return getClauses(); }
ArrayRef<OMPClause *> clauses() const {
return const_cast<OMPExecutableDirective *>(this)->getClauses();
}
};
/// \brief This represents '#pragma omp parallel' directive.
///
/// \code
/// #pragma omp parallel private(a,b) reduction(+: c,d)
/// \endcode
/// In this example directive '#pragma omp parallel' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief true if the construct has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending Location of the directive.
///
OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel,
StartLoc, EndLoc, NumClauses, 1),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPParallelDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel,
SourceLocation(), SourceLocation(), NumClauses,
1),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement associated with the directive.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel);
/// \brief Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelDirectiveClass;
}
};
/// \brief This is a common base class for loop directives ('omp simd', 'omp
/// for', 'omp for simd' etc.). It is responsible for the loop code generation.
///
class OMPLoopDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Number of collapsed loops as specified by 'collapse' clause.
unsigned CollapsedNum;
/// \brief Offsets to the stored exprs.
/// This enumeration contains offsets to all the pointers to children
/// expressions stored in OMPLoopDirective.
/// The first 9 children are necessary for all the loop directives,
/// the next 8 are specific to the worksharing ones, and the next 11 are
/// used for combined constructs containing two pragmas associated to loops.
/// After the fixed children, three arrays of length CollapsedNum are
/// allocated: loop counters, their updates and final values.
/// PrevLowerBound and PrevUpperBound are used to communicate blocking
/// information in composite constructs which require loop blocking
/// DistInc is used to generate the increment expression for the distribute
/// loop when combined with a further nested loop
/// PrevEnsureUpperBound is used as the EnsureUpperBound expression for the
/// for loop when combined with a previous distribute loop in the same pragma
/// (e.g. 'distribute parallel for')
///
enum {
AssociatedStmtOffset = 0,
IterationVariableOffset = 1,
LastIterationOffset = 2,
CalcLastIterationOffset = 3,
PreConditionOffset = 4,
CondOffset = 5,
InitOffset = 6,
IncOffset = 7,
PreInitsOffset = 8,
// The '...End' enumerators do not correspond to child expressions - they
// specify the offset to the end (and start of the following counters/
// updates/finals arrays).
DefaultEnd = 9,
// The following 8 exprs are used by worksharing and distribute loops only.
IsLastIterVariableOffset = 9,
LowerBoundVariableOffset = 10,
UpperBoundVariableOffset = 11,
StrideVariableOffset = 12,
EnsureUpperBoundOffset = 13,
NextLowerBoundOffset = 14,
NextUpperBoundOffset = 15,
NumIterationsOffset = 16,
// Offset to the end for worksharing loop directives.
WorksharingEnd = 17,
PrevLowerBoundVariableOffset = 17,
PrevUpperBoundVariableOffset = 18,
DistIncOffset = 19,
PrevEnsureUpperBoundOffset = 20,
CombinedLowerBoundVariableOffset = 21,
CombinedUpperBoundVariableOffset = 22,
CombinedEnsureUpperBoundOffset = 23,
CombinedInitOffset = 24,
CombinedConditionOffset = 25,
CombinedNextLowerBoundOffset = 26,
CombinedNextUpperBoundOffset = 27,
// Offset to the end (and start of the following counters/updates/finals
// arrays) for combined distribute loop directives.
CombinedDistributeEnd = 28,
};
/// \brief Get the counters storage.
MutableArrayRef<Expr *> getCounters() {
Expr **Storage = reinterpret_cast<Expr **>(
&(*(std::next(child_begin(), getArraysOffset(getDirectiveKind())))));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the private counters storage.
MutableArrayRef<Expr *> getPrivateCounters() {
Expr **Storage = reinterpret_cast<Expr **>(&*std::next(
child_begin(), getArraysOffset(getDirectiveKind()) + CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the updates storage.
MutableArrayRef<Expr *> getInits() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + 2 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the updates storage.
MutableArrayRef<Expr *> getUpdates() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + 3 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the final counter updates storage.
MutableArrayRef<Expr *> getFinals() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + 4 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
protected:
/// \brief Build instance of loop directive of class \a Kind.
///
/// \param SC Statement class.
/// \param Kind Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed loops from 'collapse' clause.
/// \param NumClauses Number of clauses.
/// \param NumSpecialChildren Number of additional directive-specific stmts.
///
template <typename T>
OMPLoopDirective(const T *That, StmtClass SC, OpenMPDirectiveKind Kind,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses,
unsigned NumSpecialChildren = 0)
: OMPExecutableDirective(That, SC, Kind, StartLoc, EndLoc, NumClauses,
numLoopChildren(CollapsedNum, Kind) +
NumSpecialChildren),
CollapsedNum(CollapsedNum) {}
/// \brief Offset to the start of children expression arrays.
static unsigned getArraysOffset(OpenMPDirectiveKind Kind) {
if (isOpenMPLoopBoundSharingDirective(Kind))
return CombinedDistributeEnd;
if (isOpenMPWorksharingDirective(Kind) || isOpenMPTaskLoopDirective(Kind) ||
isOpenMPDistributeDirective(Kind))
return WorksharingEnd;
return DefaultEnd;
}
/// \brief Children number.
static unsigned numLoopChildren(unsigned CollapsedNum,
OpenMPDirectiveKind Kind) {
return getArraysOffset(Kind) + 5 * CollapsedNum; // Counters,
// PrivateCounters, Inits,
// Updates and Finals
}
void setIterationVariable(Expr *IV) {
*std::next(child_begin(), IterationVariableOffset) = IV;
}
void setLastIteration(Expr *LI) {
*std::next(child_begin(), LastIterationOffset) = LI;
}
void setCalcLastIteration(Expr *CLI) {
*std::next(child_begin(), CalcLastIterationOffset) = CLI;
}
void setPreCond(Expr *PC) {
*std::next(child_begin(), PreConditionOffset) = PC;
}
void setCond(Expr *Cond) {
*std::next(child_begin(), CondOffset) = Cond;
}
void setInit(Expr *Init) { *std::next(child_begin(), InitOffset) = Init; }
void setInc(Expr *Inc) { *std::next(child_begin(), IncOffset) = Inc; }
void setPreInits(Stmt *PreInits) {
*std::next(child_begin(), PreInitsOffset) = PreInits;
}
void setIsLastIterVariable(Expr *IL) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), IsLastIterVariableOffset) = IL;
}
void setLowerBoundVariable(Expr *LB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), LowerBoundVariableOffset) = LB;
}
void setUpperBoundVariable(Expr *UB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), UpperBoundVariableOffset) = UB;
}
void setStrideVariable(Expr *ST) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), StrideVariableOffset) = ST;
}
void setEnsureUpperBound(Expr *EUB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), EnsureUpperBoundOffset) = EUB;
}
void setNextLowerBound(Expr *NLB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), NextLowerBoundOffset) = NLB;
}
void setNextUpperBound(Expr *NUB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), NextUpperBoundOffset) = NUB;
}
void setNumIterations(Expr *NI) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), NumIterationsOffset) = NI;
}
void setPrevLowerBoundVariable(Expr *PrevLB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), PrevLowerBoundVariableOffset) = PrevLB;
}
void setPrevUpperBoundVariable(Expr *PrevUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), PrevUpperBoundVariableOffset) = PrevUB;
}
void setDistInc(Expr *DistInc) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), DistIncOffset) = DistInc;
}
void setPrevEnsureUpperBound(Expr *PrevEUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), PrevEnsureUpperBoundOffset) = PrevEUB;
}
void setCombinedLowerBoundVariable(Expr *CombLB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), CombinedLowerBoundVariableOffset) = CombLB;
}
void setCombinedUpperBoundVariable(Expr *CombUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), CombinedUpperBoundVariableOffset) = CombUB;
}
void setCombinedEnsureUpperBound(Expr *CombEUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), CombinedEnsureUpperBoundOffset) = CombEUB;
}
void setCombinedInit(Expr *CombInit) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), CombinedInitOffset) = CombInit;
}
void setCombinedCond(Expr *CombCond) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), CombinedConditionOffset) = CombCond;
}
void setCombinedNextLowerBound(Expr *CombNLB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), CombinedNextLowerBoundOffset) = CombNLB;
}
void setCombinedNextUpperBound(Expr *CombNUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
*std::next(child_begin(), CombinedNextUpperBoundOffset) = CombNUB;
}
void setCounters(ArrayRef<Expr *> A);
void setPrivateCounters(ArrayRef<Expr *> A);
void setInits(ArrayRef<Expr *> A);
void setUpdates(ArrayRef<Expr *> A);
void setFinals(ArrayRef<Expr *> A);
public:
/// The expressions built to support OpenMP loops in combined/composite
/// pragmas (e.g. pragma omp distribute parallel for)
struct DistCombinedHelperExprs {
/// DistributeLowerBound - used when composing 'omp distribute' with
/// 'omp for' in a same construct.
Expr *LB;
/// DistributeUpperBound - used when composing 'omp distribute' with
/// 'omp for' in a same construct.
Expr *UB;
/// DistributeEnsureUpperBound - used when composing 'omp distribute'
/// with 'omp for' in a same construct, EUB depends on DistUB
Expr *EUB;
/// Distribute loop iteration variable init used when composing 'omp
/// distribute'
/// with 'omp for' in a same construct
Expr *Init;
/// Distribute Loop condition used when composing 'omp distribute'
/// with 'omp for' in a same construct
Expr *Cond;
/// Update of LowerBound for statically sheduled omp loops for
/// outer loop in combined constructs (e.g. 'distribute parallel for')
Expr *NLB;
/// Update of UpperBound for statically sheduled omp loops for
/// outer loop in combined constructs (e.g. 'distribute parallel for')
Expr *NUB;
};
/// \brief The expressions built for the OpenMP loop CodeGen for the
/// whole collapsed loop nest.
struct HelperExprs {
/// \brief Loop iteration variable.
Expr *IterationVarRef;
/// \brief Loop last iteration number.
Expr *LastIteration;
/// \brief Loop number of iterations.
Expr *NumIterations;
/// \brief Calculation of last iteration.
Expr *CalcLastIteration;
/// \brief Loop pre-condition.
Expr *PreCond;
/// \brief Loop condition.
Expr *Cond;
/// \brief Loop iteration variable init.
Expr *Init;
/// \brief Loop increment.
Expr *Inc;
/// \brief IsLastIteration - local flag variable passed to runtime.
Expr *IL;
/// \brief LowerBound - local variable passed to runtime.
Expr *LB;
/// \brief UpperBound - local variable passed to runtime.
Expr *UB;
/// \brief Stride - local variable passed to runtime.
Expr *ST;
/// \brief EnsureUpperBound -- expression UB = min(UB, NumIterations).
Expr *EUB;
/// \brief Update of LowerBound for statically sheduled 'omp for' loops.
Expr *NLB;
/// \brief Update of UpperBound for statically sheduled 'omp for' loops.
Expr *NUB;
/// \brief PreviousLowerBound - local variable passed to runtime in the
/// enclosing schedule or null if that does not apply.
Expr *PrevLB;
/// \brief PreviousUpperBound - local variable passed to runtime in the
/// enclosing schedule or null if that does not apply.
Expr *PrevUB;
/// \brief DistInc - increment expression for distribute loop when found
/// combined with a further loop level (e.g. in 'distribute parallel for')
/// expression IV = IV + ST
Expr *DistInc;
/// \brief PrevEUB - expression similar to EUB but to be used when loop
/// scheduling uses PrevLB and PrevUB (e.g. in 'distribute parallel for'
/// when ensuring that the UB is either the calculated UB by the runtime or
/// the end of the assigned distribute chunk)
/// expression UB = min (UB, PrevUB)
Expr *PrevEUB;
/// \brief Counters Loop counters.
SmallVector<Expr *, 4> Counters;
/// \brief PrivateCounters Loop counters.
SmallVector<Expr *, 4> PrivateCounters;
/// \brief Expressions for loop counters inits for CodeGen.
SmallVector<Expr *, 4> Inits;
/// \brief Expressions for loop counters update for CodeGen.
SmallVector<Expr *, 4> Updates;
/// \brief Final loop counter values for GodeGen.
SmallVector<Expr *, 4> Finals;
/// Init statement for all captured expressions.
Stmt *PreInits;
/// Expressions used when combining OpenMP loop pragmas
DistCombinedHelperExprs DistCombinedFields;
/// \brief Check if all the expressions are built (does not check the
/// worksharing ones).
bool builtAll() {
return IterationVarRef != nullptr && LastIteration != nullptr &&
NumIterations != nullptr && PreCond != nullptr &&
Cond != nullptr && Init != nullptr && Inc != nullptr;
}
/// \brief Initialize all the fields to null.
/// \param Size Number of elements in the counters/finals/updates arrays.
void clear(unsigned Size) {
IterationVarRef = nullptr;
LastIteration = nullptr;
CalcLastIteration = nullptr;
PreCond = nullptr;
Cond = nullptr;
Init = nullptr;
Inc = nullptr;
IL = nullptr;
LB = nullptr;
UB = nullptr;
ST = nullptr;
EUB = nullptr;
NLB = nullptr;
NUB = nullptr;
NumIterations = nullptr;
PrevLB = nullptr;
PrevUB = nullptr;
DistInc = nullptr;
PrevEUB = nullptr;
Counters.resize(Size);
PrivateCounters.resize(Size);
Inits.resize(Size);
Updates.resize(Size);
Finals.resize(Size);
for (unsigned i = 0; i < Size; ++i) {
Counters[i] = nullptr;
PrivateCounters[i] = nullptr;
Inits[i] = nullptr;
Updates[i] = nullptr;
Finals[i] = nullptr;
}
PreInits = nullptr;
DistCombinedFields.LB = nullptr;
DistCombinedFields.UB = nullptr;
DistCombinedFields.EUB = nullptr;
DistCombinedFields.Init = nullptr;
DistCombinedFields.Cond = nullptr;
DistCombinedFields.NLB = nullptr;
DistCombinedFields.NUB = nullptr;
}
};
/// \brief Get number of collapsed loops.
unsigned getCollapsedNumber() const { return CollapsedNum; }
Expr *getIterationVariable() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), IterationVariableOffset)));
}
Expr *getLastIteration() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), LastIterationOffset)));
}
Expr *getCalcLastIteration() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CalcLastIterationOffset)));
}
Expr *getPreCond() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), PreConditionOffset)));
}
Expr *getCond() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), CondOffset)));
}
Expr *getInit() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), InitOffset)));
}
Expr *getInc() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), IncOffset)));
}
const Stmt *getPreInits() const {
return *std::next(child_begin(), PreInitsOffset);
}
Stmt *getPreInits() { return *std::next(child_begin(), PreInitsOffset); }
Expr *getIsLastIterVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), IsLastIterVariableOffset)));
}
Expr *getLowerBoundVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), LowerBoundVariableOffset)));
}
Expr *getUpperBoundVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), UpperBoundVariableOffset)));
}
Expr *getStrideVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), StrideVariableOffset)));
}
Expr *getEnsureUpperBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), EnsureUpperBoundOffset)));
}
Expr *getNextLowerBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), NextLowerBoundOffset)));
}
Expr *getNextUpperBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), NextUpperBoundOffset)));
}
Expr *getNumIterations() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), NumIterationsOffset)));
}
Expr *getPrevLowerBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), PrevLowerBoundVariableOffset)));
}
Expr *getPrevUpperBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), PrevUpperBoundVariableOffset)));
}
Expr *getDistInc() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), DistIncOffset)));
}
Expr *getPrevEnsureUpperBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), PrevEnsureUpperBoundOffset)));
}
Expr *getCombinedLowerBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CombinedLowerBoundVariableOffset)));
}
Expr *getCombinedUpperBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CombinedUpperBoundVariableOffset)));
}
Expr *getCombinedEnsureUpperBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CombinedEnsureUpperBoundOffset)));
}
Expr *getCombinedInit() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CombinedInitOffset)));
}
Expr *getCombinedCond() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CombinedConditionOffset)));
}
Expr *getCombinedNextLowerBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CombinedNextLowerBoundOffset)));
}
Expr *getCombinedNextUpperBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CombinedNextUpperBoundOffset)));
}
const Stmt *getBody() const {
// This relies on the loop form is already checked by Sema.
Stmt *Body = getAssociatedStmt()->IgnoreContainers(true);
Body = cast<ForStmt>(Body)->getBody();
for (unsigned Cnt = 1; Cnt < CollapsedNum; ++Cnt) {
Body = Body->IgnoreContainers();
Body = cast<ForStmt>(Body)->getBody();
}
return Body;
}
ArrayRef<Expr *> counters() { return getCounters(); }
ArrayRef<Expr *> counters() const {
return const_cast<OMPLoopDirective *>(this)->getCounters();
}
ArrayRef<Expr *> private_counters() { return getPrivateCounters(); }
ArrayRef<Expr *> private_counters() const {
return const_cast<OMPLoopDirective *>(this)->getPrivateCounters();
}
ArrayRef<Expr *> inits() { return getInits(); }
ArrayRef<Expr *> inits() const {
return const_cast<OMPLoopDirective *>(this)->getInits();
}
ArrayRef<Expr *> updates() { return getUpdates(); }
ArrayRef<Expr *> updates() const {
return const_cast<OMPLoopDirective *>(this)->getUpdates();
}
ArrayRef<Expr *> finals() { return getFinals(); }
ArrayRef<Expr *> finals() const {
return const_cast<OMPLoopDirective *>(this)->getFinals();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass ||
T->getStmtClass() == OMPForDirectiveClass ||
T->getStmtClass() == OMPForSimdDirectiveClass ||
T->getStmtClass() == OMPParallelForDirectiveClass ||
T->getStmtClass() == OMPParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTaskLoopDirectiveClass ||
T->getStmtClass() == OMPTaskLoopSimdDirectiveClass ||
T->getStmtClass() == OMPDistributeDirectiveClass ||
T->getStmtClass() == OMPTargetParallelForDirectiveClass ||
T->getStmtClass() == OMPDistributeParallelForDirectiveClass ||
T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPDistributeSimdDirectiveClass ||
T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTargetSimdDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass;
}
};
/// \brief This represents '#pragma omp simd' directive.
///
/// \code
/// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, StartLoc,
EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt,
const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp for' directive.
///
/// \code
/// #pragma omp for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for' has clauses 'private' with the
/// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c'
/// and 'd'.
///
class OMPForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief true if current directive has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, StartLoc, EndLoc,
CollapsedNum, NumClauses),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPForDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs,
bool HasCancel);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForDirectiveClass;
}
};
/// \brief This represents '#pragma omp for simd' directive.
///
/// \code
/// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd,
StartLoc, EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPForSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp sections' directive.
///
/// \code
/// #pragma omp sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp sections' has clauses 'private' with
/// the variables 'a' and 'b' and 'reduction' with operator '+' and variables
/// 'c' and 'd'.
///
class OMPSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief true if current directive has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections,
StartLoc, EndLoc, NumClauses, 1),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPSectionsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections,
SourceLocation(), SourceLocation(), NumClauses,
1),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true if current directive has inner directive.
///
static OMPSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSectionsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionsDirectiveClass;
}
};
/// \brief This represents '#pragma omp section' directive.
///
/// \code
/// #pragma omp section
/// \endcode
///
class OMPSectionDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief true if current directive has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section,
StartLoc, EndLoc, 0, 1),
HasCancel(false) {}
/// \brief Build an empty directive.
///
explicit OMPSectionDirective()
: OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section,
SourceLocation(), SourceLocation(), 0, 1),
HasCancel(false) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true if current directive has inner directive.
///
static OMPSectionDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt, bool HasCancel);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell);
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionDirectiveClass;
}
};
/// \brief This represents '#pragma omp single' directive.
///
/// \code
/// #pragma omp single private(a,b) copyprivate(c,d)
/// \endcode
/// In this example directive '#pragma omp single' has clauses 'private' with
/// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'.
///
class OMPSingleDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPSingleDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSingleDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSingleDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSingleDirectiveClass;
}
};
/// \brief This represents '#pragma omp master' directive.
///
/// \code
/// #pragma omp master
/// \endcode
///
class OMPMasterDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPMasterDirective()
: OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPMasterDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterDirectiveClass;
}
};
/// \brief This represents '#pragma omp critical' directive.
///
/// \code
/// #pragma omp critical
/// \endcode
///
class OMPCriticalDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Name of the directive.
DeclarationNameInfo DirName;
/// \brief Build directive with the given start and end location.
///
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned NumClauses)
: OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical,
StartLoc, EndLoc, NumClauses, 1),
DirName(Name) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPCriticalDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical,
SourceLocation(), SourceLocation(), NumClauses,
1),
DirName() {}
/// \brief Set name of the directive.
///
/// \param Name Name of the directive.
///
void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; }
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPCriticalDirective *
Create(const ASTContext &C, const DeclarationNameInfo &Name,
SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPCriticalDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// \brief Return name of the directive.
///
DeclarationNameInfo getDirectiveName() const { return DirName; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCriticalDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel for' directive.
///
/// \code
/// #pragma omp parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief true if current region has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for,
StartLoc, EndLoc, CollapsedNum, NumClauses),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPParallelForDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel for simd' directive.
///
/// \code
/// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for simd' has clauses
/// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j'
/// and linear step 's', 'reduction' with operator '+' and variables 'c' and
/// 'd'.
///
class OMPParallelForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForSimdDirectiveClass,
OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum,
NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPParallelForSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForSimdDirectiveClass,
OMPD_parallel_for_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel sections' directive.
///
/// \code
/// #pragma omp parallel sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel sections' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPParallelSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief true if current directive has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass,
OMPD_parallel_sections, StartLoc, EndLoc,
NumClauses, 1),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPParallelSectionsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass,
OMPD_parallel_sections, SourceLocation(),
SourceLocation(), NumClauses, 1),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPParallelSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelSectionsDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelSectionsDirectiveClass;
}
};
/// \brief This represents '#pragma omp task' directive.
///
/// \code
/// #pragma omp task private(a,b) final(d)
/// \endcode
/// In this example directive '#pragma omp task' has clauses 'private' with the
/// variables 'a' and 'b' and 'final' with condition 'd'.
///
class OMPTaskDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief true if this directive has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, StartLoc,
EndLoc, NumClauses, 1),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTaskDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task,
SourceLocation(), SourceLocation(), NumClauses,
1),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true, if current directive has inner cancel directive.
///
static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, bool HasCancel);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskyield' directive.
///
/// \code
/// #pragma omp taskyield
/// \endcode
///
class OMPTaskyieldDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPTaskyieldDirective()
: OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskyieldDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskyieldDirectiveClass;
}
};
/// \brief This represents '#pragma omp barrier' directive.
///
/// \code
/// #pragma omp barrier
/// \endcode
///
class OMPBarrierDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPBarrierDirective()
: OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPBarrierDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPBarrierDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskwait' directive.
///
/// \code
/// #pragma omp taskwait
/// \endcode
///
class OMPTaskwaitDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPTaskwaitDirective()
: OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskwaitDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskwaitDirectiveClass;
}
};
/// This represents '#pragma omp taskgroup' directive.
///
/// \code
/// #pragma omp taskgroup
/// \endcode
///
class OMPTaskgroupDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup,
StartLoc, EndLoc, NumClauses, 1) {}
/// Build an empty directive.
/// \param NumClauses Number of clauses.
///
explicit OMPTaskgroupDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTaskgroupDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskgroupDirectiveClass;
}
};
/// \brief This represents '#pragma omp flush' directive.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has 2 arguments- variables 'a'
/// and 'b'.
/// 'omp flush' directive does not have clauses but have an optional list of
/// variables to flush. This list of variables is stored within some fake clause
/// FlushClause.
class OMPFlushDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush,
StartLoc, EndLoc, NumClauses, 0) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPFlushDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush,
SourceLocation(), SourceLocation(), NumClauses,
0) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses (only single OMPFlushClause clause is
/// allowed).
///
static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPFlushDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPFlushDirectiveClass;
}
};
/// \brief This represents '#pragma omp ordered' directive.
///
/// \code
/// #pragma omp ordered
/// \endcode
///
class OMPOrderedDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPOrderedDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPOrderedDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPOrderedDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPOrderedDirectiveClass;
}
};
/// \brief This represents '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic capture
/// \endcode
/// In this example directive '#pragma omp atomic' has clause 'capture'.
///
class OMPAtomicDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// x = x binop expr;
/// x = expr binop x;
/// \endcode
/// This field is true for the first form of the expression and false for the
/// second. Required for correct codegen of non-associative operations (like
/// << or >>).
bool IsXLHSInRHSPart;
/// \brief Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// v = x; <update x>;
/// <update x>; v = x;
/// \endcode
/// This field is true for the first(postfix) form of the expression and false
/// otherwise.
bool IsPostfixUpdate;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic,
StartLoc, EndLoc, NumClauses, 5),
IsXLHSInRHSPart(false), IsPostfixUpdate(false) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPAtomicDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic,
SourceLocation(), SourceLocation(), NumClauses,
5),
IsXLHSInRHSPart(false), IsPostfixUpdate(false) {}
/// \brief Set 'x' part of the associated expression/statement.
void setX(Expr *X) { *std::next(child_begin()) = X; }
/// \brief Set helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
void setUpdateExpr(Expr *UE) { *std::next(child_begin(), 2) = UE; }
/// \brief Set 'v' part of the associated expression/statement.
void setV(Expr *V) { *std::next(child_begin(), 3) = V; }
/// \brief Set 'expr' part of the associated expression/statement.
void setExpr(Expr *E) { *std::next(child_begin(), 4) = E; }
public:
/// \brief Creates directive with a list of \a Clauses and 'x', 'v' and 'expr'
/// parts of the atomic construct (see Section 2.12.6, atomic Construct, for
/// detailed description of 'x', 'v' and 'expr').
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param X 'x' part of the associated expression/statement.
/// \param V 'v' part of the associated expression/statement.
/// \param E 'expr' part of the associated expression/statement.
/// \param UE Helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
/// \param IsXLHSInRHSPart true if \a UE has the first form and false if the
/// second.
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
static OMPAtomicDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V,
Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPAtomicDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// \brief Get 'x' part of the associated expression/statement.
Expr *getX() { return cast_or_null<Expr>(*std::next(child_begin())); }
const Expr *getX() const {
return cast_or_null<Expr>(*std::next(child_begin()));
}
/// \brief Get helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
Expr *getUpdateExpr() {
return cast_or_null<Expr>(*std::next(child_begin(), 2));
}
const Expr *getUpdateExpr() const {
return cast_or_null<Expr>(*std::next(child_begin(), 2));
}
/// \brief Return true if helper update expression has form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; }
/// \brief Return true if 'v' expression must be updated to original value of
/// 'x', false if 'v' must be updated to the new value of 'x'.
bool isPostfixUpdate() const { return IsPostfixUpdate; }
/// \brief Get 'v' part of the associated expression/statement.
Expr *getV() { return cast_or_null<Expr>(*std::next(child_begin(), 3)); }
const Expr *getV() const {
return cast_or_null<Expr>(*std::next(child_begin(), 3));
}
/// \brief Get 'expr' part of the associated expression/statement.
Expr *getExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 4)); }
const Expr *getExpr() const {
return cast_or_null<Expr>(*std::next(child_begin(), 4));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPAtomicDirectiveClass;
}
};
/// \brief This represents '#pragma omp target' directive.
///
/// \code
/// #pragma omp target if(a)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'if' with
/// condition 'a'.
///
class OMPTargetDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetDirectiveClass;
}
};
/// \brief This represents '#pragma omp target data' directive.
///
/// \code
/// #pragma omp target data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target data' has clauses 'device'
/// with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param NumClauses The number of clauses.
///
OMPTargetDataDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDataDirectiveClass,
OMPD_target_data, StartLoc, EndLoc, NumClauses,
1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetDataDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDataDirectiveClass,
OMPD_target_data, SourceLocation(),
SourceLocation(), NumClauses, 1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetDataDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetDataDirective *CreateEmpty(const ASTContext &C, unsigned N,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetDataDirectiveClass;
}
};
/// \brief This represents '#pragma omp target enter data' directive.
///
/// \code
/// #pragma omp target enter data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target enter data' has clauses
/// 'device' with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetEnterDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param NumClauses The number of clauses.
///
OMPTargetEnterDataDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass,
OMPD_target_enter_data, StartLoc, EndLoc,
NumClauses, /*NumChildren=*/0) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetEnterDataDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass,
OMPD_target_enter_data, SourceLocation(),
SourceLocation(), NumClauses,
/*NumChildren=*/0) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
///
static OMPTargetEnterDataDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// \brief Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetEnterDataDirective *CreateEmpty(const ASTContext &C,
unsigned N, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetEnterDataDirectiveClass;
}
};
/// \brief This represents '#pragma omp target exit data' directive.
///
/// \code
/// #pragma omp target exit data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target exit data' has clauses
/// 'device' with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetExitDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param NumClauses The number of clauses.
///
OMPTargetExitDataDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass,
OMPD_target_exit_data, StartLoc, EndLoc,
NumClauses, /*NumChildren=*/0) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetExitDataDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass,
OMPD_target_exit_data, SourceLocation(),
SourceLocation(), NumClauses,
/*NumChildren=*/0) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
///
static OMPTargetExitDataDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// \brief Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetExitDataDirective *CreateEmpty(const ASTContext &C,
unsigned N, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetExitDataDirectiveClass;
}
};
/// \brief This represents '#pragma omp target parallel' directive.
///
/// \code
/// #pragma omp target parallel if(a)
/// \endcode
/// In this example directive '#pragma omp target parallel' has clause 'if' with
/// condition 'a'.
///
class OMPTargetParallelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTargetParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetParallelDirectiveClass,
OMPD_target_parallel, StartLoc, EndLoc,
NumClauses, /*NumChildren=*/1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetParallelDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetParallelDirectiveClass,
OMPD_target_parallel, SourceLocation(),
SourceLocation(), NumClauses,
/*NumChildren=*/1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelDirectiveClass;
}
};
/// \brief This represents '#pragma omp target parallel for' directive.
///
/// \code
/// #pragma omp target parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp target parallel for' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPTargetParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief true if current region has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetParallelForDirectiveClass,
OMPD_target_parallel_for, StartLoc, EndLoc,
CollapsedNum, NumClauses),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetParallelForDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetParallelForDirectiveClass,
OMPD_target_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPTargetParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelForDirectiveClass;
}
};
/// \brief This represents '#pragma omp teams' directive.
///
/// \code
/// #pragma omp teams if(a)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'if' with
/// condition 'a'.
///
class OMPTeamsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDirectiveClass;
}
};
/// \brief This represents '#pragma omp cancellation point' directive.
///
/// \code
/// #pragma omp cancellation point for
/// \endcode
///
/// In this example a cancellation point is created for innermost 'for' region.
class OMPCancellationPointDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
OpenMPDirectiveKind CancelRegion;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPCancellationPointDirectiveClass,
OMPD_cancellation_point, StartLoc, EndLoc, 0, 0),
CancelRegion(OMPD_unknown) {}
/// \brief Build an empty directive.
///
explicit OMPCancellationPointDirective()
: OMPExecutableDirective(this, OMPCancellationPointDirectiveClass,
OMPD_cancellation_point, SourceLocation(),
SourceLocation(), 0, 0),
CancelRegion(OMPD_unknown) {}
/// \brief Set cancel region for current cancellation point.
/// \param CR Cancellation region.
void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; }
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPCancellationPointDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C,
EmptyShell);
/// \brief Get cancellation region for the current cancellation point.
OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCancellationPointDirectiveClass;
}
};
/// \brief This represents '#pragma omp cancel' directive.
///
/// \code
/// #pragma omp cancel for
/// \endcode
///
/// In this example a cancel is created for innermost 'for' region.
class OMPCancelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
OpenMPDirectiveKind CancelRegion;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel,
StartLoc, EndLoc, NumClauses, 0),
CancelRegion(OMPD_unknown) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
explicit OMPCancelDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel,
SourceLocation(), SourceLocation(), NumClauses,
0),
CancelRegion(OMPD_unknown) {}
/// \brief Set cancel region for current cancellation point.
/// \param CR Cancellation region.
void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; }
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
///
static OMPCancelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, OpenMPDirectiveKind CancelRegion);
/// \brief Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPCancelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// \brief Get cancellation region for the current cancellation point.
OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCancelDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskloop' directive.
///
/// \code
/// #pragma omp taskloop private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clauses 'private'
/// with the variables 'a' and 'b', 'grainsize' with expression 'val' and
/// 'num_tasks' with expression 'num'.
///
class OMPTaskLoopDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTaskLoopDirectiveClass, OMPD_taskloop,
StartLoc, EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTaskLoopDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTaskLoopDirectiveClass, OMPD_taskloop,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTaskLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTaskLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskLoopDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskloop simd' directive.
///
/// \code
/// #pragma omp taskloop simd private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp taskloop simd' has clauses 'private'
/// with the variables 'a' and 'b', 'grainsize' with expression 'val' and
/// 'num_tasks' with expression 'num'.
///
class OMPTaskLoopSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass,
OMPD_taskloop_simd, StartLoc, EndLoc, CollapsedNum,
NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTaskLoopSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass,
OMPD_taskloop_simd, SourceLocation(), SourceLocation(),
CollapsedNum, NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTaskLoopSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTaskLoopSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskLoopSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp distribute' directive.
///
/// \code
/// #pragma omp distribute private(a,b)
/// \endcode
/// In this example directive '#pragma omp distribute' has clauses 'private'
/// with the variables 'a' and 'b'
///
class OMPDistributeDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeDirectiveClass, OMPD_distribute,
StartLoc, EndLoc, CollapsedNum, NumClauses)
{}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPDistributeDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeDirectiveClass, OMPD_distribute,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses)
{}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeDirectiveClass;
}
};
/// \brief This represents '#pragma omp target update' directive.
///
/// \code
/// #pragma omp target update to(a) from(b) device(1)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'to' with
/// argument 'a', clause 'from' with argument 'b' and clause 'device' with
/// argument '1'.
///
class OMPTargetUpdateDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param NumClauses The number of clauses.
///
OMPTargetUpdateDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass,
OMPD_target_update, StartLoc, EndLoc, NumClauses,
0) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetUpdateDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass,
OMPD_target_update, SourceLocation(),
SourceLocation(), NumClauses, 0) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
///
static OMPTargetUpdateDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses The number of clauses.
///
static OMPTargetUpdateDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetUpdateDirectiveClass;
}
};
/// \brief This represents '#pragma omp distribute parallel for' composite
/// directive.
///
/// \code
/// #pragma omp distribute parallel for private(a,b)
/// \endcode
/// In this example directive '#pragma omp distribute parallel for' has clause
/// 'private' with the variables 'a' and 'b'
///
class OMPDistributeParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass,
OMPD_distribute_parallel_for, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPDistributeParallelForDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass,
OMPD_distribute_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp distribute parallel for simd' composite
/// directive.
///
/// \code
/// #pragma omp distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp distribute parallel for simd' has
/// clause 'private' with the variables 'x'
///
class OMPDistributeParallelForSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass,
OMPD_distribute_parallel_for_simd, StartLoc,
EndLoc, CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPDistributeParallelForSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass,
OMPD_distribute_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeParallelForSimdDirective *Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeParallelForSimdDirective *CreateEmpty(
const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp distribute simd' composite directive.
///
/// \code
/// #pragma omp distribute simd private(x)
/// \endcode
/// In this example directive '#pragma omp distribute simd' has clause
/// 'private' with the variables 'x'
///
class OMPDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeSimdDirectiveClass,
OMPD_distribute_simd, StartLoc, EndLoc, CollapsedNum,
NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPDistributeSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeSimdDirectiveClass,
OMPD_distribute_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeSimdDirectiveClass;
}
};
/// This represents '#pragma omp target parallel for simd' directive.
///
/// \code
/// #pragma omp target parallel for simd private(a) map(b) safelen(c)
/// \endcode
/// In this example directive '#pragma omp target parallel for simd' has clauses
/// 'private' with the variable 'a', 'map' with the variable 'b' and 'safelen'
/// with the variable 'c'.
///
class OMPTargetParallelForSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass,
OMPD_target_parallel_for_simd, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetParallelForSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass,
OMPD_target_parallel_for_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp target simd' directive.
///
/// \code
/// #pragma omp target simd private(a) map(b) safelen(c)
/// \endcode
/// In this example directive '#pragma omp target simd' has clauses 'private'
/// with the variable 'a', 'map' with the variable 'b' and 'safelen' with
/// the variable 'c'.
///
class OMPTargetSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetSimdDirectiveClass,
OMPD_target_simd, StartLoc, EndLoc, CollapsedNum,
NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetSimdDirectiveClass, OMPD_target_simd,
SourceLocation(),SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute' directive.
///
/// \code
/// #pragma omp teams distribute private(a,b)
/// \endcode
/// In this example directive '#pragma omp teams distribute' has clauses
/// 'private' with the variables 'a' and 'b'
///
class OMPTeamsDistributeDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass,
OMPD_teams_distribute, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDistributeDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass,
OMPD_teams_distribute, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute simd'
/// combined directive.
///
/// \code
/// #pragma omp teams distribute simd private(a,b)
/// \endcode
/// In this example directive '#pragma omp teams distribute simd'
/// has clause 'private' with the variables 'a' and 'b'
///
class OMPTeamsDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTeamsDistributeSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass,
OMPD_teams_distribute_simd, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDistributeSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass,
OMPD_teams_distribute_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute parallel for simd' composite
/// directive.
///
/// \code
/// #pragma omp teams distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp teams distribute parallel for simd'
/// has clause 'private' with the variables 'x'
///
class OMPTeamsDistributeParallelForSimdDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass,
OMPD_teams_distribute_parallel_for_simd, StartLoc,
EndLoc, CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDistributeParallelForSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass,
OMPD_teams_distribute_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeParallelForSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute parallel for' composite
/// directive.
///
/// \code
/// #pragma omp teams distribute parallel for private(x)
/// \endcode
/// In this example directive '#pragma omp teams distribute parallel for'
/// has clause 'private' with the variables 'x'
///
class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTeamsDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass,
OMPD_teams_distribute_parallel_for, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDistributeParallelForDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass,
OMPD_teams_distribute_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeParallelForDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp target teams' directive.
///
/// \code
/// #pragma omp target teams if(a>0)
/// \endcode
/// In this example directive '#pragma omp target teams' has clause 'if' with
/// condition 'a>0'.
///
class OMPTargetTeamsDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTargetTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass,
OMPD_target_teams, StartLoc, EndLoc, NumClauses,
1) {}
/// Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetTeamsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass,
OMPD_target_teams, SourceLocation(),
SourceLocation(), NumClauses, 1) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetTeamsDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute' combined directive.
///
/// \code
/// #pragma omp target teams distribute private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute' has clause
/// 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetTeamsDistributeDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass,
OMPD_target_teams_distribute, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetTeamsDistributeDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass,
OMPD_target_teams_distribute, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute parallel for' combined
/// directive.
///
/// \code
/// #pragma omp target teams distribute parallel for private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute parallel
/// for' has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeParallelForDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetTeamsDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this,
OMPTargetTeamsDistributeParallelForDirectiveClass,
OMPD_target_teams_distribute_parallel_for, StartLoc,
EndLoc, CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetTeamsDistributeParallelForDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(
this, OMPTargetTeamsDistributeParallelForDirectiveClass,
OMPD_target_teams_distribute_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeParallelForDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute parallel for simd'
/// combined directive.
///
/// \code
/// #pragma omp target teams distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute parallel
/// for simd' has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeParallelForSimdDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this,
OMPTargetTeamsDistributeParallelForSimdDirectiveClass,
OMPD_target_teams_distribute_parallel_for_simd,
StartLoc, EndLoc, CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetTeamsDistributeParallelForSimdDirective(
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(
this, OMPTargetTeamsDistributeParallelForSimdDirectiveClass,
OMPD_target_teams_distribute_parallel_for_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeParallelForSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute simd' combined
/// directive.
///
/// \code
/// #pragma omp target teams distribute simd private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute simd'
/// has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetTeamsDistributeSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass,
OMPD_target_teams_distribute_simd, StartLoc, EndLoc,
CollapsedNum, NumClauses) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetTeamsDistributeSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass,
OMPD_target_teams_distribute_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass;
}
};
} // end namespace clang
#endif
|
for_simple.c | /* PMSIS includes */
#include "pmsis.h"
#include "omp.h"
#define NB_CORES (8)
static int32_t core_iterations[NB_CORES] = { 0 };
static uint32_t errors = 0;
/* Cluster main entry, executed by core 0. */
void cluster_delegate(void *arg)
{
printf("Cluster master core entry\n");
#pragma omp parallel num_threads(NB_CORES)
{
printf("[%d %d] Fork entry\n", pi_cluster_id(), omp_get_thread_num() );
#pragma omp for
for (int i=0; i<64; i++)
{
int32_t core_id = omp_get_thread_num();
if (core_id > NB_CORES)
{
errors++;
}
else
{
core_iterations[core_id]++;
}
printf("[%d %d] for entry index %d\n", pi_cluster_id(), omp_get_thread_num(), i );
}
}
for (int i = 0; i < NB_CORES; i++)
{
if (core_iterations[i] == 0)
{
errors++;
printf("Core #%d has no iteration\n", i);
}
}
printf("Cluster master core exit\n");
}
void helloworld(void)
{
printf("Entering main controller\n");
uint32_t errors = 0;
uint32_t core_id = pi_core_id(), cluster_id = pi_cluster_id();
printf("[%d %d] Hello World!\n", cluster_id, core_id);
struct pi_device cluster_dev = {0};
struct pi_cluster_conf cl_conf = {0};
/* Init cluster configuration structure. */
pi_cluster_conf_init(&cl_conf);
cl_conf.id = 0; /* Set cluster ID. */
/* Configure & open cluster. */
pi_open_from_conf(&cluster_dev, &cl_conf);
if (pi_cluster_open(&cluster_dev))
{
printf("Cluster open failed !\n");
pmsis_exit(-1);
}
/* Prepare cluster task and send it to cluster. */
struct pi_cluster_task cl_task = {0};
cl_task.entry = cluster_delegate;
cl_task.arg = NULL;
pi_cluster_send_task_to_cl(&cluster_dev, &cl_task);
pi_cluster_close(&cluster_dev);
if (errors)
{
printf("Test failed!\n");
}
else
{
printf("Test success!\n");
}
pmsis_exit(errors);
}
/* Program Entry. */
int main(void)
{
printf("\n\n\t *** PMSIS HelloWorld ***\n\n");
return pmsis_kickoff((void *) helloworld);
}
|
deconvolution_packnto1_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void deconvolution_packnto1_fp16s_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
const int maxk = kernel_w * kernel_h;
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_data_ptr)
{
sum = bias_data_ptr[p];
}
vfloat32m2_t _sum = vfmv_v_f_f32m2(0.f, vl);
const __fp16* kptr = (const __fp16*)weight_data_fp16 + maxk * channels * p * packn;
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const __fp16* sptr = m.row<const __fp16>(sy) + sx * packn;
int k = y * kernel_w + x;
vfloat16m1_t _val = vle16_v_f16m1(sptr, vl);
vfloat16m1_t _w = vle16_v_f16m1(kptr + k * packn, vl);
_sum = vfwmacc_vv_f32m2(_sum, _val, _w, vl);
}
}
kptr += maxk * packn;
}
#if C906
// TODO
std::vector<float> ss(packn);
vse32_v_f32m2((float*)ss.data(), _sum, vl);
for (int i = 0; i < packn; i++)
{
sum += ss[i];
}
#else
sum = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m2_f32m1(vfloat32m1_t(), _sum, vfmv_s_f_f32m1(vfloat32m1_t(), sum, vl), vl));
#endif
sum = activation_ss(sum, activation_type, activation_params);
outptr[j] = sum;
}
outptr += outw;
}
}
}
static void deconvolution_packnto1_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data_fp16, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
const int packn = csrr_vlenb() / 2;
const word_type vl = vsetvl_e16m1(packn);
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
const int maxk = kernel_w * kernel_h;
const __fp16* bias_data_ptr = bias_data_fp16;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__fp16 sum = 0.f;
if (bias_data_ptr)
{
sum = bias_data_ptr[p];
}
vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl);
const __fp16* kptr = (const __fp16*)weight_data_fp16 + maxk * channels * p * packn;
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
for (int y = 0; y < kernel_h; y++)
{
int sys = (i + y * dilation_h - (kernel_extent_h - 1));
if (sys < 0 || sys % stride_h != 0)
continue;
int sy = sys / stride_h;
if (sy >= h)
continue;
for (int x = 0; x < kernel_w; x++)
{
int sxs = (j + x * dilation_w - (kernel_extent_w - 1));
if (sxs < 0 || sxs % stride_w != 0)
continue;
int sx = sxs / stride_w;
if (sx >= w)
continue;
const __fp16* sptr = m.row<const __fp16>(sy) + sx * packn;
int k = y * kernel_w + x;
vfloat16m1_t _val = vle16_v_f16m1(sptr, vl);
vfloat16m1_t _w = vle16_v_f16m1(kptr + k * packn, vl);
_sum = vfmacc_vv_f16m1(_sum, _val, _w, vl);
}
}
kptr += maxk * packn;
}
sum = vfmv_f_s_f16m1_f16(vfredsum_vs_f16m1_f16m1(vfloat16m1_t(), _sum, vfmv_s_f_f16m1(vfloat16m1_t(), sum, vl), vl));
sum = activation_ss(sum, activation_type, activation_params);
outptr[j] = sum;
}
outptr += outw;
}
}
}
|
4.c | #include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#include <math.h>
int main() {
FILE *in = fopen("4_in.txt", "r");
FILE *out = fopen("4_out.txt", "w");
int n;
fscanf(in, "%d", &n);
double *v, *v2;
v = (double*) calloc(n, sizeof(double));
v2 = (double*) calloc(n, sizeof(double));
int i, j, k;
for(i = 0; i < n; i++){
fscanf(in, "%lf", &(v[i]));
v2[i] = v[i];
}
int ln = ceil(log2(n));
for (i = 0; i < ln; i++) {
int is = pow(2, i);
#pragma omp parallel for private(j) shared(v2, v)
for (j = 0; j < n; j++) {
v2[j] = v[j];
if (j >= is) {
v2[j] += v[j - is];
}
}
memcpy(v, v2, n * sizeof(double));
}
fprintf(out, "%d\n", n);
for (i = 0; i < n; i++) {
fprintf(out, "%.4lf\n", v2[i]);
}
free(v);
free(v2);
fclose(in);
fclose(out);
return 0;
} |
GB_unop__identity_fp32_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fp32_uint16)
// op(A') function: GB (_unop_tran__identity_fp32_uint16)
// C type: float
// A type: uint16_t
// cast: float cij = (float) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
float z = (float) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = (float) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fp32_uint16)
(
float *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fp32_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pfor-no-schedule.c | #include <omp.h>
#include <stdio.h>
#define N 100000
int i, chunk,id;
float a[N], b[N], c[N];
int main ()
{
/* Some initializations */
for (i=0; i < N; i++)
a[i] = b[i] = i * 1.0;
#pragma omp parallel for shared(a,b,c) private(i,chunk,id)
for (i=0; i < N; i++)
{
c[i] = a[i] + b[i];
chunk = N / omp_get_num_threads();
id = omp_get_thread_num();
if ( (i%chunk)==0 ) printf("Iteration #%d in thread #%d\n",i, id);
}
return 0;
}
|
5493.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp parallel for private(i, j) collapse(#P12) schedule(#P9, #P11) num_threads(#P11)
#pragma omp
for (i = 1; i < _PB_NI - 1; ++i)
{
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
GB_binop__land_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__land_fp64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__land_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__land_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__land_fp64)
// A*D function (colscale): GB (_AxD__land_fp64)
// D*A function (rowscale): GB (_DxB__land_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__land_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__land_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_fp64)
// C=scalar+B GB (_bind1st__land_fp64)
// C=scalar+B' GB (_bind1st_tran__land_fp64)
// C=A+scalar GB (_bind2nd__land_fp64)
// C=A'+scalar GB (_bind2nd_tran__land_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ((x != 0) && (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_FP64 || GxB_NO_LAND_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__land_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__land_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__land_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__land_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__land_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__land_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__land_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__land_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__land_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__land_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__land_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__land_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__land_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__land_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
residualbased_incrementalupdate_variable_property_static_scheme.h | /*
==============================================================================
Kratos
A General Purpose Software for Multi-Physics Finite Element Analysis
Version 1.0 (Released on march 05, 2007).
Copyright 2007
Pooyan Dadvand, Riccardo Rossi
pooyan@cimne.upc.edu
rrossi@cimne.upc.edu
CIMNE (International Center for Numerical Methods in Engineering),
Gran Capita' s/n, 08034 Barcelona, Spain
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following condition:
Distribution of this code for any commercial purpose is permissible
ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNER.
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
==============================================================================
*/
/* *********************************************************
*
* Last Modified by: $Author: rrossi $
* Date: $Date: 2007-03-06 10:30:34 $
* Revision: $Revision: 1.2 $
*
* ***********************************************************/
#if !defined(KRATOS_NEW_STANDARD_STATIC_VARIABLE_PROPERTY_SCHEME)
#define KRATOS_NEW_STANDARD_STATIC_VARIABLE_PROPERTY_SCHEME
/* System includes */
/* External includes */
#include "boost/smart_ptr.hpp"
/* Project includes */
#include "includes/define.h"
#include "includes/model_part.h"
// #include "solving_strategies/schemes/scheme.h"
#include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h"
#include "includes/variables.h"
#include "includes/c2c_variables.h"
#include "includes/convection_diffusion_settings.h"
namespace Kratos
{
/**@name Kratos Globals */
/*@{ */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
/**@name Enum's */
/*@{ */
/*@} */
/**@name Functions */
/*@{ */
/*@} */
/**@name Kratos Classes */
/*@{ */
/** Short class definition.
This class provides the implementation of the basic tasks that are needed by the solution strategy.
It is intended to be the place for tailoring the solution strategies to problem specific tasks.
Detail class definition.
\URL[Example of use html]{ extended_documentation/no_ex_of_use.html}
\URL[Example of use pdf]{ extended_documentation/no_ex_of_use.pdf}
\URL[Example of use doc]{ extended_documentation/no_ex_of_use.doc}
\URL[Example of use ps]{ extended_documentation/no_ex_of_use.ps}
\URL[Extended documentation html]{ extended_documentation/no_ext_doc.html}
\URL[Extended documentation pdf]{ extended_documentation/no_ext_doc.pdf}
\URL[Extended documentation doc]{ extended_documentation/no_ext_doc.doc}
\URL[Extended documentation ps]{ extended_documentation/no_ext_doc.ps}
*/
template<class TSparseSpace,
class TDenseSpace //= DenseSpace<double>
>
class ResidualBasedIncrementalUpdateStaticVariablePropertyScheme : public ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace,TDenseSpace>
{
public:
/**@name Type Definitions */
/*@{ */
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedIncrementalUpdateStaticVariablePropertyScheme);
typedef Scheme<TSparseSpace,TDenseSpace> BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
/*@} */
/**@name Life Cycle
*/
/*@{ */
/** Constructor.
*/
ResidualBasedIncrementalUpdateStaticVariablePropertyScheme()
: ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace,TDenseSpace>()
{}
/** Destructor.
*/
virtual ~ResidualBasedIncrementalUpdateStaticVariablePropertyScheme() {}
/*@} */
/**@name Operators
*/
/*@{ */
/**
Performing the Initialize of the solution.
*/
void Initialize(
ModelPart& r_model_part
) override
{
//Initialize variables
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
ConvectionDiffusionSettings::Pointer my_settings = CurrentProcessInfo.GetValue(CONVECTION_DIFFUSION_SETTINGS);
const Variable<double>& rUnknownVar = my_settings->GetUnknownVariable();
const Variable<double>& rDensityVar = my_settings->GetDensityVariable();
const Variable<double>& rDiffusionVar = my_settings->GetDiffusionVariable();
// const Variable<double>& rTransferCoef = my_settings->GetTransferCoefficientVariable();
// const double amb_temp = CurrentProcessInfo[AMBIENT_TEMPERATURE];
ModelPart::TableType rDensityVar_table = r_model_part.GetTable(1);
ModelPart::TableType C_table = r_model_part.GetTable(2);
ModelPart::TableType F_table = r_model_part.GetTable(3);
ModelPart::TableType DF_DT_table = r_model_part.GetTable(4);
ModelPart::TableType rDiffusionVar_table = r_model_part.GetTable(5);
//ModelPart::TableType HTC_table = r_model_part.GetTable(7);
double density_var = r_model_part.GetProcessInfo()[DENSITY];
const double latent_heat = r_model_part.GetProcessInfo()[LATENT_HEAT];
const unsigned int buffer_size = r_model_part.GetBufferSize();
// for(typename ModelPart::NodesContainerType::iterator ind=r_model_part.NodesBegin(); ind != r_model_part.NodesEnd();ind++)
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (r_model_part.Nodes().size()); k++)
{
ModelPart::NodesContainerType::iterator ind = r_model_part.NodesBegin() + k;
const double unknown_val = ind->FastGetSolutionStepValue(rUnknownVar);
const double dist = ind->FastGetSolutionStepValue(DISTANCE);
/*double htc_var = ind->FastGetSolutionStepValue(rTransferCoef);
double rho = rDensityVar_table.GetValue(unknown_val);
double cc =C_table.GetValue(unknown_val);
ind->FastGetSolutionStepValue(rTransferCoef) = htc_var/(rho*cc); */
double specific_heat_var =C_table.GetValue(unknown_val);
//double htc_var = HTC_table.GetValue(unknown_val);
//ind->FastGetSolutionStepValue(rTransferCoef) = htc_var;
double conductivity_var = rDiffusionVar_table.GetValue(unknown_val);
if(dist <= 0)
{
// double density_var = rDensityVar_table.GetValue(unknown_val);
// double specific_heat_var =C_table.GetValue(unknown_val);
double solid_fraction_var = F_table.GetValue(unknown_val);
double solid_fraction_rate_var = DF_DT_table.GetValue(unknown_val);
//double htc_var = HTC_table.GetValue(unknown_val);
ind->FastGetSolutionStepValue(rDensityVar) = density_var;
ind->FastGetSolutionStepValue(rDensityVar,1) = density_var;
ind->FastGetSolutionStepValue(SPECIFIC_HEAT) = specific_heat_var;
ind->FastGetSolutionStepValue(SPECIFIC_HEAT,1) = specific_heat_var;
ind->FastGetSolutionStepValue(SOLIDFRACTION) = solid_fraction_var;
ind->FastGetSolutionStepValue(SOLIDFRACTION,1) = solid_fraction_var;
ind->FastGetSolutionStepValue(SOLIDFRACTION_RATE) = solid_fraction_rate_var;
ind->FastGetSolutionStepValue(SOLIDFRACTION_RATE,1) = solid_fraction_rate_var;
ind->FastGetSolutionStepValue(rDiffusionVar) = conductivity_var;
ind->FastGetSolutionStepValue(rDiffusionVar,1) = conductivity_var;
//ind->FastGetSolutionStepValue(rTransferCoef) = htc_var;
//assign an initial value to the enthalpy
const double initial_enthalpy = specific_heat_var*unknown_val + (1.0-ind->FastGetSolutionStepValue(SOLIDFRACTION))*latent_heat;
for(unsigned int i=0; i<buffer_size; i++)
ind->FastGetSolutionStepValue(ENTHALPY,i) = initial_enthalpy;
}
else
{
const double specific_heat_air = 1000.0;
ind->FastGetSolutionStepValue(rDensityVar) = 1.0;
ind->FastGetSolutionStepValue(SPECIFIC_HEAT) = specific_heat_air;
ind->FastGetSolutionStepValue(SOLIDFRACTION) = 0.0;
ind->FastGetSolutionStepValue(SOLIDFRACTION_RATE) = 0.0;
ind->FastGetSolutionStepValue(rDiffusionVar) = conductivity_var*1000.0; //0.05
//ind->FastGetSolutionStepValue(rTransferCoef) = 0.0; ///(density_var*specific_heat_var);
// ind->FastGetSolutionStepValue(rUnknownVar) = amb_temp;
//assign an initial value to the enthalpy
for(unsigned int i=0; i<buffer_size; i++)
ind->FastGetSolutionStepValue(ENTHALPY,i) = specific_heat_air*unknown_val;
}
}
// mSchemeIsInitialized = true;
}
//***************************************************************************
void InitializeSolutionStep(
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
) override
{
KRATOS_TRY
BaseType::InitializeSolutionStep(r_model_part,A,Dx,b);
//update variables based on resolved unknowns
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
ConvectionDiffusionSettings::Pointer my_settings = CurrentProcessInfo.GetValue(CONVECTION_DIFFUSION_SETTINGS);
const Variable<double>& rUnknownVar = my_settings->GetUnknownVariable();
const Variable<double>& rDensityVar = my_settings->GetDensityVariable();
const Variable<double>& rDiffusionVar = my_settings->GetDiffusionVariable();
//const Variable<double>& rTransferCoef = my_settings->GetTransferCoefficientVariable();
//const double amb_temp = CurrentProcessInfo[AMBIENT_TEMPERATURE];
ModelPart::TableType rDensityVar_table = r_model_part.GetTable(1);
ModelPart::TableType C_table = r_model_part.GetTable(2);
ModelPart::TableType F_table = r_model_part.GetTable(3);
ModelPart::TableType DF_DT_table = r_model_part.GetTable(4);
ModelPart::TableType rDiffusionVar_table = r_model_part.GetTable(5);
ModelPart::TableType HTC_table = r_model_part.GetTable(7);
double density_var = r_model_part.GetProcessInfo()[DENSITY];
//const double latent_heat = r_model_part.GetProcessInfo()[LATENT_HEAT];
//for(typename ModelPart::NodesContainerType::iterator ind=r_model_part.NodesBegin(); ind != r_model_part.NodesEnd();ind++)
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (r_model_part.Nodes().size()); k++)
{
ModelPart::NodesContainerType::iterator ind = r_model_part.NodesBegin() + k;
const double unknown_val = ind->FastGetSolutionStepValue(rUnknownVar);
const double dist = ind->FastGetSolutionStepValue(DISTANCE);
double specific_heat_var =C_table.GetValue(unknown_val);
//double htc_var = HTC_table.GetValue(unknown_val);
//ind->FastGetSolutionStepValue(rTransferCoef) = htc_var;
if(dist <= 0)
{
//double density_var = rDensityVar_table.GetValue(unknown_val);
double solid_fraction_var = F_table.GetValue(unknown_val);
double solid_fraction_rate_var = DF_DT_table.GetValue(unknown_val);
double conductvity_var = rDiffusionVar_table.GetValue(unknown_val);
ind->FastGetSolutionStepValue(rDensityVar) = density_var;
ind->FastGetSolutionStepValue(SPECIFIC_HEAT) = specific_heat_var;
ind->FastGetSolutionStepValue(SOLIDFRACTION) = solid_fraction_var;
ind->GetValue(SOLIDFRACTION) = solid_fraction_var; //also save in database without history
ind->FastGetSolutionStepValue(SOLIDFRACTION_RATE) = solid_fraction_rate_var;
ind->FastGetSolutionStepValue(rDiffusionVar) = conductvity_var;
//ind->FastGetSolutionStepValue(rTransferCoef) = htc_var;
//here compute the ENTHALPY = int(c dT,0,T) + L(T)
//which should be computed incrementally as Hn+1 = Hn + 1/2*(Tn+1 - Tn)*(cn+1 - cn) + L(Tn+1) - L(Tn)
// const double Delta_T = unknown_val - ind->GetValue(rUnknownVar);
// const double delta_solid_fraction = 0.0; //(1-Sn+1) - (1-Sn)
const double delta_enthalpy = 0.0; //Delta_T*specific_heat_var + delta_solid_fraction*latent_heat;
ind->FastGetSolutionStepValue(ENTHALPY) = /*ind->FastGetSolutionStepValue(ENTHALPY,1) +*/ delta_enthalpy;
ind->GetValue(ENTHALPY) = ind->FastGetSolutionStepValue(ENTHALPY);
ind->GetValue(SOLIDFRACTION) = solid_fraction_var;
ind->GetValue(SOLIDFRACTION_RATE) = solid_fraction_rate_var;
}
else
{
const double conductivity_var = rDiffusionVar_table.GetValue(unknown_val);
const double specific_heat_air = 1000.0;
ind->FastGetSolutionStepValue(rDensityVar) = 1.0; //density_var; //1.0;
ind->FastGetSolutionStepValue(SPECIFIC_HEAT) = specific_heat_air;
ind->FastGetSolutionStepValue(SOLIDFRACTION) = 0.0;
ind->FastGetSolutionStepValue(SOLIDFRACTION_RATE) = 0.0;
ind->FastGetSolutionStepValue(rDiffusionVar) = conductivity_var*1000.0; //0.05
//ind->FastGetSolutionStepValue(rTransferCoef) = 0.0; //htc_var/(50.0); //*density_var*specific_heat_air);
ind->FastGetSolutionStepValue(ENTHALPY) = specific_heat_air*unknown_val; // * (ind->FastGetSolutionStepValue(rUnknownVar)) ;
ind->GetValue(ENTHALPY) = specific_heat_air*unknown_val;
ind->GetValue(SOLIDFRACTION) = 0.0;
ind->GetValue(SOLIDFRACTION_RATE) = 0.0;
}
}
KRATOS_CATCH("")
}
/**
Performing the update of the solution.
*/
//***************************************************************************
void Update(
ModelPart& r_model_part,
DofsArrayType& rDofSet,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
) override
{
KRATOS_TRY
for(typename DofsArrayType::iterator i_dof = rDofSet.begin() ; i_dof != rDofSet.end() ; ++i_dof)
{
if(i_dof->IsFree())
{
i_dof->GetSolutionStepValue() += Dx[i_dof->EquationId()];
}
}
//update variables based on resolved unknowns
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
ConvectionDiffusionSettings::Pointer my_settings = CurrentProcessInfo.GetValue(CONVECTION_DIFFUSION_SETTINGS);
const Variable<double>& rUnknownVar = my_settings->GetUnknownVariable();
//const Variable<double>& rDensityVar = my_settings->GetDensityVariable();
//const Variable<double>& rDiffusionVar = my_settings->GetDiffusionVariable();
//const Variable<double>& rTransferCoef = my_settings->GetTransferCoefficientVariable();
//const double amb_temp = CurrentProcessInfo[AMBIENT_TEMPERATURE];
ModelPart::TableType rDensityVar_table = r_model_part.GetTable(1);
ModelPart::TableType C_table = r_model_part.GetTable(2);
ModelPart::TableType F_table = r_model_part.GetTable(3);
ModelPart::TableType DF_DT_table = r_model_part.GetTable(4);
ModelPart::TableType rDiffusionVar_table = r_model_part.GetTable(5);
// ModelPart::TableType HTC_table = r_model_part.GetTable(7);
//double density_var = r_model_part.GetProcessInfo()[DENSITY];
const double latent_heat = r_model_part.GetProcessInfo()[LATENT_HEAT];
//for(typename ModelPart::NodesContainerType::iterator ind=r_model_part.NodesBegin(); ind != r_model_part.NodesEnd();ind++)
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (r_model_part.Nodes().size()); k++)
{
ModelPart::NodesContainerType::iterator ind = r_model_part.NodesBegin() + k;
const double unknown_val = ind->FastGetSolutionStepValue(rUnknownVar);
const double dist = ind->FastGetSolutionStepValue(DISTANCE);
// double specific_heat_var =C_table.GetValue(unknown_val);
// double htc_var = HTC_table.GetValue(unknown_val);
// ind->FastGetSolutionStepValue(rTransferCoef) = htc_var;
if(dist <= 0)
{
//double density_var = rDensityVar_table.GetValue(unknown_val);
double solid_fraction_var = F_table.GetValue(unknown_val);
double solid_fraction_rate_var = DF_DT_table.GetValue(unknown_val);
// double conductvity_var = rDiffusionVar_table.GetValue(unknown_val);
// ind->FastGetSolutionStepValue(rDensityVar) = density_var;
// ind->FastGetSolutionStepValue(SPECIFIC_HEAT) = specific_heat_var;
ind->FastGetSolutionStepValue(SOLIDFRACTION) = solid_fraction_var;
ind->FastGetSolutionStepValue(SOLIDFRACTION_RATE) = solid_fraction_rate_var;
// ind->FastGetSolutionStepValue(rDiffusionVar) = conductvity_var;
// ind->FastGetSolutionStepValue(rTransferCoef) = htc_var;
//here compute the ENTHALPY = int(c dT,0,T) + L(T)
//which should be computed incrementally as Hn+1 = Hn + 1/2*(Tn+1 - Tn)*(cn+1 - cn) + L(Tn+1) - L(Tn)
const double Delta_T = unknown_val - ind->GetValue(rUnknownVar);
const double avg_c = ind->FastGetSolutionStepValue(SPECIFIC_HEAT);
const double delta_solid_fraction = ind->GetValue(SOLIDFRACTION) - ind->FastGetSolutionStepValue(SOLIDFRACTION); //(1-Sn+1) - (1-Sn)
const double delta_enthalpy = Delta_T*avg_c + delta_solid_fraction*latent_heat;
ind->FastGetSolutionStepValue(ENTHALPY) = /*ind->GetValue(ENTHALPY) +*/ delta_enthalpy;
}
else
{
const double specific_heat_air = ind->FastGetSolutionStepValue(SPECIFIC_HEAT); //1000.0;
// ind->FastGetSolutionStepValue(rDensityVar) = 1.0;
// ind->FastGetSolutionStepValue(SPECIFIC_HEAT) = specific_heat_air;
// ind->FastGetSolutionStepValue(SOLIDFRACTION) = 0.0;
// ind->FastGetSolutionStepValue(SOLIDFRACTION_RATE) = 0.0;
// ind->FastGetSolutionStepValue(rDiffusionVar) = 1.0;
// ind->FastGetSolutionStepValue(rTransferCoef) = ind->FastGetSolutionStepValue(rTransferCoef)/(density_var*specific_heat_air);
ind->FastGetSolutionStepValue(ENTHALPY) = specific_heat_air*unknown_val; // * (ind->FastGetSolutionStepValue(rUnknownVar)) ;
}
}
KRATOS_CATCH("")
}
//***************************************************************************
/*@} */
/**@name Operations */
/*@{ */
/*@} */
/**@name Access */
/*@{ */
/*@} */
/**@name Inquiry */
/*@{ */
/*@} */
/**@name Friends */
/*@{ */
/*@} */
protected:
/**@name Protected static Member Variables */
/*@{ */
/*@} */
/**@name Protected member Variables */
/*@{ */
/*@} */
/**@name Protected Operators*/
/*@{ */
/*@} */
/**@name Protected Operations*/
/*@{ */
/*@} */
/**@name Protected Access */
/*@{ */
/*@} */
/**@name Protected Inquiry */
/*@{ */
/*@} */
/**@name Protected LifeCycle */
/*@{ */
/*@} */
private:
/**@name Static Member Variables */
/*@{ */
/*@} */
/**@name Member Variables */
/*@{ */
/*@} */
/**@name Private Operators*/
/*@{ */
/*@} */
/**@name Private Operations*/
/*@{ */
/*@} */
/**@name Private Access */
/*@{ */
/*@} */
/**@name Private Inquiry */
/*@{ */
/*@} */
/**@name Un accessible methods */
/*@{ */
/*@} */
}; /* Class Scheme */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
} /* namespace Kratos.*/
#endif /* KRATOS_NEW_STANDARD_STATIC_SCHEME defined */
|
simd4.c | /* { dg-do compile } */
/* { dg-options "-fopenmp" } */
/* { dg-additional-options "-std=c99" { target c } } */
struct S *p; /* { dg-message "forward declaration" "" { target c++ } } */
float f;
int j;
void
foo (void)
{
#pragma omp simd linear(p) linear(f : 1)
for (int i = 0; i < 10; i++)
;
#pragma omp simd linear(j : 7.0) /* { dg-error "step expression must be integral" } */
for (int i = 0; i < 10; i++)
;
}
/* { dg-error "linear clause applied to" "" { target *-*-* } 12 } */
/* { dg-error "(incomplete|undefined) type" "" { target *-*-* } 12 } */
|
IJMatrix_parcsr.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* IJMatrix_ParCSR interface
*
*****************************************************************************/
#include "_hypre_IJ_mv.h"
#include "_hypre_parcsr_mv.h"
#include "../HYPRE.h"
/******************************************************************************
*
* hypre_IJMatrixCreateParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixCreateParCSR(hypre_IJMatrix *matrix)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
hypre_ParCSRMatrix *par_matrix;
HYPRE_BigInt *row_starts;
HYPRE_BigInt *col_starts;
HYPRE_Int num_procs;
HYPRE_Int i;
hypre_MPI_Comm_size(comm,&num_procs);
row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
if (hypre_IJMatrixGlobalFirstRow(matrix))
{
for (i = 0; i < 2; i++)
{
row_starts[i] = row_partitioning[i] - hypre_IJMatrixGlobalFirstRow(matrix);
}
}
else
{
for (i = 0; i < 2; i++)
{
row_starts[i] = row_partitioning[i];
}
}
if (row_partitioning != col_partitioning)
{
col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
if (hypre_IJMatrixGlobalFirstCol(matrix))
{
for (i = 0; i < 2; i++)
{
col_starts[i] = col_partitioning[i]-hypre_IJMatrixGlobalFirstCol(matrix);
}
}
else
{
for (i = 0; i < 2; i++)
{
col_starts[i] = col_partitioning[i];
}
}
}
else
{
col_starts = row_starts;
}
par_matrix = hypre_ParCSRMatrixCreate(comm, hypre_IJMatrixGlobalNumRows(matrix),
hypre_IJMatrixGlobalNumCols(matrix),
row_starts, col_starts, 0, 0, 0);
hypre_IJMatrixObject(matrix) = par_matrix;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetRowSizesParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetRowSizesParCSR(hypre_IJMatrix *matrix,
const HYPRE_Int *sizes)
{
HYPRE_Int local_num_rows, local_num_cols, i, *row_space = NULL;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
}
if (!row_space)
{
row_space = hypre_CTAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST);
}
for (i = 0; i < local_num_rows; i++)
{
row_space[i] = sizes[i];
}
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, row_space);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixRowSpace(aux_matrix) = row_space;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) = 0;
for (i = 0; i < local_num_rows; i++)
{
hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) += sizes[i];
}
#endif
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetDiagOffdSizesParCSR
* sets diag_i inside the diag part of the ParCSRMatrix
* and offd_i inside the offd part,
* requires exact row sizes for diag and offd
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetDiagOffdSizesParCSR(hypre_IJMatrix *matrix,
const HYPRE_Int *diag_sizes,
const HYPRE_Int *offd_sizes)
{
HYPRE_Int local_num_rows, local_num_cols;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *)hypre_IJMatrixTranslator(matrix);
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
if ( hypre_AuxParCSRMatrixDiagSizes(aux_matrix) == NULL)
{
hypre_AuxParCSRMatrixDiagSizes(aux_matrix) = hypre_TAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST);
}
if ( hypre_AuxParCSRMatrixOffdSizes(aux_matrix) == NULL)
{
hypre_AuxParCSRMatrixOffdSizes(aux_matrix) = hypre_TAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST);
}
hypre_TMemcpy(hypre_AuxParCSRMatrixDiagSizes(aux_matrix), diag_sizes, HYPRE_Int, local_num_rows,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
hypre_TMemcpy(hypre_AuxParCSRMatrixOffdSizes(aux_matrix), offd_sizes, HYPRE_Int, local_num_rows,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetMaxOnProcElmtsParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetMaxOnProcElmtsParCSR(hypre_IJMatrix *matrix,
HYPRE_Int max_on_proc_elmts)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_Int local_num_rows, local_num_cols, my_id;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_MPI_Comm_rank(comm,&my_id);
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
if (!aux_matrix)
{
local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) = max_on_proc_elmts;
#endif
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetMaxOffProcElmtsParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetMaxOffProcElmtsParCSR(hypre_IJMatrix *matrix,
HYPRE_Int max_off_proc_elmts)
{
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_Int local_num_rows, local_num_cols, my_id;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_MPI_Comm_rank(comm,&my_id);
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
if (!aux_matrix)
{
local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows,
local_num_cols, NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_AuxParCSRMatrixUsrOffProcElmts(aux_matrix) = max_off_proc_elmts;
#endif
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixInitializeParCSR
*
* initializes AuxParCSRMatrix and ParCSRMatrix as necessary
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixInitializeParCSR(hypre_IJMatrix *matrix)
{
return hypre_IJMatrixInitializeParCSR_v2(matrix, hypre_HandleMemoryLocation(hypre_handle()));
}
HYPRE_Int
hypre_IJMatrixInitializeParCSR_v2(hypre_IJMatrix *matrix, HYPRE_MemoryLocation memory_location)
{
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
HYPRE_MemoryLocation memory_location_aux =
hypre_GetExecPolicy1(memory_location) == HYPRE_EXEC_HOST ? HYPRE_MEMORY_HOST : HYPRE_MEMORY_DEVICE;
if (hypre_IJMatrixAssembleFlag(matrix) == 0)
{
if (!par_matrix)
{
hypre_IJMatrixCreateParCSR(matrix);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
}
HYPRE_Int local_num_rows = hypre_ParCSRMatrixNumRows(par_matrix);
HYPRE_Int i;
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, hypre_ParCSRMatrixNumCols(par_matrix), NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_ParCSRMatrixInitialize_v2(par_matrix, memory_location);
hypre_AuxParCSRMatrixInitialize_v2(aux_matrix, memory_location_aux);
if (memory_location_aux == HYPRE_MEMORY_HOST)
{
if (hypre_AuxParCSRMatrixDiagSizes(aux_matrix))
{
for (i = 0; i < local_num_rows; i++)
{
hypre_CSRMatrixI(diag)[i+1] = hypre_CSRMatrixI(diag)[i] + hypre_AuxParCSRMatrixDiagSizes(aux_matrix)[i];
}
hypre_CSRMatrixNumNonzeros(diag) = hypre_CSRMatrixI(diag)[local_num_rows];
hypre_CSRMatrixInitialize(diag);
}
if (hypre_AuxParCSRMatrixOffdSizes(aux_matrix))
{
for (i = 0; i < local_num_rows; i++)
{
hypre_CSRMatrixI(offd)[i+1] = hypre_CSRMatrixI(offd)[i] + hypre_AuxParCSRMatrixOffdSizes(aux_matrix)[i];
}
hypre_CSRMatrixNumNonzeros(offd) = hypre_CSRMatrixI(offd)[local_num_rows];
hypre_CSRMatrixInitialize(offd);
}
}
if (!hypre_AuxParCSRMatrixNeedAux(aux_matrix))
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < local_num_rows; i++)
{
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[i] = hypre_CSRMatrixI(diag)[i];
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[i] = hypre_CSRMatrixI(offd)[i];
}
}
}
else if ( memory_location_aux == HYPRE_MEMORY_HOST )
{
/* AB 4/06 - the assemble routine destroys the aux matrix - so we need
to recreate if initialize is called again
*/
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, hypre_ParCSRMatrixNumRows(par_matrix),
hypre_ParCSRMatrixNumCols(par_matrix), NULL);
hypre_AuxParCSRMatrixMemoryLocation(aux_matrix) = HYPRE_MEMORY_HOST;
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixGetRowCountsParCSR
*
* gets the number of columns for rows specified by the user
*
*****************************************************************************/
HYPRE_Int hypre_IJMatrixGetRowCountsParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_BigInt *rows,
HYPRE_Int *ncols)
{
HYPRE_BigInt row_index;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
HYPRE_Int *diag_i = hypre_CSRMatrixI(diag);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
HYPRE_Int *offd_i = hypre_CSRMatrixI(offd);
HYPRE_Int i, my_id, pstart, index;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_rank(comm,&my_id);
pstart = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i, row_index) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < nrows; i++)
{
row_index = rows[i];
if (row_index >= row_partitioning[pstart] &&
row_index < row_partitioning[pstart+1])
{
/* compute local row number */
index = (HYPRE_Int)(row_index - row_partitioning[pstart]);
ncols[i] = diag_i[index+1]-diag_i[index]+offd_i[index+1]-offd_i[index];
}
else
{
ncols[i] = 0;
if (print_level)
{
hypre_printf ("Warning! Row %b is not on Proc. %d!\n",
row_index, my_id);
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixGetValuesParCSR
*
* gets values of an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixGetValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
HYPRE_BigInt *rows,
HYPRE_BigInt *cols,
HYPRE_Complex *values)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
HYPRE_Int assemble_flag = hypre_IJMatrixAssembleFlag(matrix);
hypre_CSRMatrix *diag;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
hypre_CSRMatrix *offd;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_BigInt *col_map_offd;
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(par_matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_Int i, j, n, ii, indx, pstart;
HYPRE_Int num_procs, my_id;
HYPRE_BigInt col_0, col_n, row, col_indx, first;
HYPRE_Int row_local, row_size;
HYPRE_Int warning = 0;
HYPRE_Int *counter;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
if (assemble_flag == 0)
{
hypre_error_in_arg(1);
if (print_level)
{
hypre_printf("Error! Matrix not assembled yet! HYPRE_IJMatrixGetValues\n");
}
}
col_0 = col_starts[0];
col_n = col_starts[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
pstart = 0;
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
}
if (nrows < 0)
{
nrows = -nrows;
counter = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST);
counter[0] = 0;
for (i=0; i < nrows; i++)
{
counter[i+1] = counter[i]+ncols[i];
}
indx = 0;
for (i=0; i < nrows; i++)
{
row = rows[i];
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
row_size = diag_i[row_local+1] - diag_i[row_local] +
offd_i[row_local+1] - offd_i[row_local];
if (counter[i]+row_size > counter[nrows])
{
hypre_error_in_arg(1);
if (print_level)
{
hypre_printf ("Error! Not enough memory! HYPRE_IJMatrixGetValues\n");
}
}
if (ncols[i] < row_size)
{
warning = 1;
}
for (j = diag_i[row_local]; j < diag_i[row_local+1]; j++)
{
cols[indx] = (HYPRE_BigInt)diag_j[j] + col_0;
values[indx++] = diag_data[j];
}
for (j = offd_i[row_local]; j < offd_i[row_local+1]; j++)
{
cols[indx] = col_map_offd[offd_j[j]];
values[indx++] = offd_data[j];
}
counter[i+1] = indx;
}
else
{
if (print_level)
{
hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row, my_id);
}
}
}
if (warning)
{
for (i=0; i < nrows; i++)
{
ncols[i] = counter[i+1] - counter[i];
}
if (print_level)
{
hypre_printf ("Warning! ncols has been changed!\n");
}
}
hypre_TFree(counter, HYPRE_MEMORY_HOST);
}
else
{
indx = 0;
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols[ii];
if (n == 0) /* empty row */
{
continue;
}
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
for (i=0; i < n; i++)
{
col_indx = cols[indx] - first;
values[indx] = 0.0;
if (col_indx < col_0 || col_indx > col_n)
/* search in offd */
{
for (j=offd_i[row_local]; j < offd_i[row_local+1]; j++)
{
if (col_map_offd[offd_j[j]] == col_indx)
{
values[indx] = offd_data[j];
break;
}
}
}
else /* search in diag */
{
col_indx = col_indx - col_0;
for (j=diag_i[row_local]; j < diag_i[row_local+1]; j++)
{
if (diag_j[j] == (HYPRE_Int)col_indx)
{
values[indx] = diag_data[j];
break;
}
}
}
indx++;
}
}
else
{
if (print_level)
{
hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row, my_id);
}
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetValuesParCSR
*
* sets values in an IJMatrix before assembly,
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_Int row_local;
//HYPRE_Int row_len;
HYPRE_BigInt col_0, col_n, row;
HYPRE_Int i, ii, j, n, not_found;
//HYPRE_Int col_indx, cnt1;
HYPRE_BigInt **aux_j;
HYPRE_BigInt *local_j;
HYPRE_BigInt *tmp_j;
HYPRE_Complex **aux_data;
HYPRE_Complex *local_data;
HYPRE_Complex *tmp_data;
HYPRE_Int diag_space, offd_space;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int tmp_indx, indx;
HYPRE_Int space, size, old_size;
HYPRE_Int cnt, cnt_diag, cnt_offd;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_BigInt first;
HYPRE_Int pstart;
/*HYPRE_Int current_num_elmts;*/
/*HYPRE_Int max_off_proc_elmts;*/
//HYPRE_Int off_proc_i_indx;
//HYPRE_BigInt *off_proc_i;
//HYPRE_BigInt *off_proc_j;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
/*HYPRE_Complex *off_proc_data;*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
pstart = 0;
if (nrows < 0)
{
hypre_error_in_arg(2);
if (print_level)
{
hypre_printf("Error! nrows negative! HYPRE_IJMatrixSetValues\n");
}
}
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/
{
HYPRE_BigInt *col_map_offd;
HYPRE_Int num_cols_offd;
HYPRE_Int j_offd;
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
size = diag_i[row_local+1] - diag_i[row_local] +
offd_i[row_local+1] - offd_i[row_local];
if (n > size) /* Should we change this and allow this?
This could be same column index, i.e. only last
value is set, previous ones overwritten. */
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
return hypre_error_flag;
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local+1];
len_offd = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
for (j=pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
/* return -1;*/
return hypre_error_flag;
}
diag_data[pos_diag] = values[indx];
}
else /* insert into diag */
{
for (j=pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
/* return -1; */
return hypre_error_flag;
}
}
indx++;
}
}
}
}
else
{
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
else
{
tmp_j = NULL;
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i=0; i < n; i++)
{
for (j=0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size+tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt,
size+tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size+tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i=0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_BigInt *big_offd_j;
HYPRE_Int col_j;
offd_indx =hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx =hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
big_offd_j = hypre_CSRMatrixBigJ(offd);
offd_data = hypre_CSRMatrixData(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local+1];
offd_space = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j=offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
else /* insert into diag */
{
col_j = (HYPRE_Int)(cols[indx]-col_0);
for (j=diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == col_j)
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = col_j;
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetConstantValuesParCSR
*
* sets all values in an already assembled IJMatrix to a constant value.
*
*****************************************************************************/
void
hypre_IJMatrixSetConstantValuesParCSRHost( hypre_IJMatrix *matrix,
HYPRE_Complex value )
{
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
HYPRE_Complex *diag_data = hypre_CSRMatrixData(diag);
HYPRE_Complex *offd_data = hypre_CSRMatrixData(offd);
HYPRE_Int nnz_diag = hypre_CSRMatrixNumNonzeros(diag);
HYPRE_Int nnz_offd = hypre_CSRMatrixNumNonzeros(offd);
HYPRE_Int ii;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < nnz_diag; ii++)
{
diag_data[ii] = value;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < nnz_offd; ii++)
{
offd_data[ii] = value;
}
}
HYPRE_Int
hypre_IJMatrixSetConstantValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Complex value )
{
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (hypre_GetExecPolicy1(hypre_IJMatrixMemoryLocation(matrix)) == HYPRE_EXEC_DEVICE)
{
hypre_IJMatrixSetConstantValuesParCSRDevice(matrix, value);
}
else
#endif
{
hypre_IJMatrixSetConstantValuesParCSRHost(matrix, value);
}
}
else
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,
"Matrix not assembled! Required to set constant values!");
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAddToValuesParCSR
*
* adds row values to an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAddToValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_Int row_local;
HYPRE_BigInt row;
HYPRE_BigInt col_0, col_n;
HYPRE_Int i, ii, j, n, not_found;
HYPRE_BigInt **aux_j;
HYPRE_BigInt *local_j;
HYPRE_BigInt *tmp_j;
HYPRE_Complex **aux_data;
HYPRE_Complex *local_data;
HYPRE_Complex *tmp_data;
HYPRE_Int diag_space, offd_space;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int tmp_indx, indx;
HYPRE_Int space, size, old_size;
HYPRE_Int cnt, cnt_diag, cnt_offd;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int offd_indx, diag_indx;
HYPRE_BigInt first;
HYPRE_Int pstart;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_Int current_num_elmts;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int off_proc_i_indx;
HYPRE_BigInt *off_proc_i;
HYPRE_BigInt *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
pstart = 0;
if (hypre_IJMatrixAssembleFlag(matrix))
{
HYPRE_Int num_cols_offd;
HYPRE_BigInt *col_map_offd;
HYPRE_Int j_offd;
/* AB - 4/06 - need to get this object*/
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
size = diag_i[row_local+1] - diag_i[row_local] +
offd_i[row_local+1] - offd_i[row_local];
if (n > size) /* Should we change this and allow this?
This could be same column index, i.e. only last
value is set, previous ones overwritten. */
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
return hypre_error_flag;
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local+1];
len_offd = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
/* return -1; */
}
for (j=pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
diag_data[pos_diag] += values[indx];
}
else /* insert into diag */
{
for (j=pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
}
indx++;
}
}
/* not my row */
else
{
if (!aux_matrix)
{
size = (HYPRE_Int)(row_partitioning[pstart+1]-row_partitioning[pstart]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, size, size, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n,1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3*n;
off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex,
max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
/* AB - 4/6 - the row should be negative to indicate an add */
/* UMY - 12/28/09 - now positive since we eliminated the feature of
setting on other processors */
/* off_proc_i[off_proc_i_indx++] = row; */
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i=0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix)
= current_num_elmts;
}
}
}
/* not assembled */
else
{
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
else
{
tmp_j = NULL;
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i=0; i < n; i++)
{
for (j=0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size+tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt,
size+tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size+tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i=0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_BigInt *big_offd_j;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
big_offd_j = hypre_CSRMatrixBigJ(offd);
offd_data = hypre_CSRMatrixData(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local+1];
offd_space = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j=offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
/* return 1;*/
return hypre_error_flag;
}
}
not_found = 1;
}
else /* insert into diag */
{
HYPRE_Int col_j = (HYPRE_Int)( cols[indx] - col_0);
for (j=diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == col_j)
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = col_j;
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* not my row */
else
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n,1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) =
max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3*n;
off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex,
max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix)
= max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i=0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix)
= current_num_elmts;
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixDestroyParCSR
*
* frees an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixDestroyParCSR(hypre_IJMatrix *matrix)
{
hypre_ParCSRMatrixDestroy((hypre_ParCSRMatrix *)hypre_IJMatrixObject(matrix));
hypre_AuxParCSRMatrixDestroy((hypre_AuxParCSRMatrix*)hypre_IJMatrixTranslator(matrix));
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAssembleOffProcValsParCSR
*
* This is for handling set and get values calls to off-proc. entries -
* it is called from matrix assemble. There is an alternate version for
* when the assumed partition is being used.
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAssembleOffProcValsParCSR( hypre_IJMatrix *matrix,
HYPRE_Int off_proc_i_indx,
HYPRE_Int max_off_proc_elmts,
HYPRE_Int current_num_elmts,
HYPRE_MemoryLocation memory_location,
HYPRE_BigInt *off_proc_i,
HYPRE_BigInt *off_proc_j,
HYPRE_Complex *off_proc_data )
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int i, j, k, in_i;
HYPRE_Int myid;
HYPRE_Int proc_id, last_proc, prev_id, tmp_id;
HYPRE_Int max_response_size;
HYPRE_BigInt global_num_cols;
HYPRE_BigInt global_first_col;
HYPRE_BigInt global_first_row;
HYPRE_Int ex_num_contacts = 0, num_rows = 0;
HYPRE_BigInt range_start, range_end;
HYPRE_Int num_elements;
HYPRE_Int storage;
HYPRE_Int indx;
HYPRE_BigInt row;
HYPRE_Int num_ranges, row_index = 0;
HYPRE_Int num_recvs;
HYPRE_BigInt upper_bound;
HYPRE_Int counter;
HYPRE_Int num_real_procs;
HYPRE_Int /*current_proc,*/ original_proc_indx;
HYPRE_BigInt *row_list=NULL;
HYPRE_Int *row_list_num_elements=NULL;
HYPRE_Int *a_proc_id=NULL, *orig_order=NULL;
HYPRE_Int *real_proc_id = NULL, *us_real_proc_id = NULL;
HYPRE_Int *ex_contact_procs = NULL, *ex_contact_vec_starts = NULL;
HYPRE_BigInt *ex_contact_buf = NULL;
HYPRE_Int *recv_starts=NULL;
HYPRE_BigInt *response_buf = NULL;
HYPRE_Int *response_buf_starts=NULL;
HYPRE_Int *num_rows_per_proc = NULL, *num_elements_total = NULL;
HYPRE_Int *argsort_contact_procs = NULL;
HYPRE_Int obj_size_bytes, complex_size;
HYPRE_BigInt big_int_size;
HYPRE_Int tmp_int;
HYPRE_BigInt tmp_big_int;
HYPRE_BigInt *col_ptr;
HYPRE_BigInt *big_int_data = NULL;
HYPRE_Int big_int_data_size = 0, complex_data_size = 0;
void *void_contact_buf = NULL;
void *index_ptr;
void *recv_data_ptr;
HYPRE_Complex tmp_complex;
HYPRE_Complex *col_data_ptr;
HYPRE_Complex *complex_data = NULL;
hypre_DataExchangeResponse response_obj1, response_obj2;
hypre_ProcListElements send_proc_obj;
hypre_IJAssumedPart *apart;
hypre_MPI_Comm_rank(comm, &myid);
global_num_cols = hypre_IJMatrixGlobalNumCols(matrix);
global_first_col = hypre_IJMatrixGlobalFirstCol(matrix);
global_first_row = hypre_IJMatrixGlobalFirstRow(matrix);
if (memory_location == HYPRE_MEMORY_DEVICE)
{
HYPRE_BigInt *tmp = hypre_TAlloc(HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST);
HYPRE_BigInt *off_proc_i_h = hypre_TAlloc(HYPRE_BigInt, 2*current_num_elmts, HYPRE_MEMORY_HOST);
HYPRE_BigInt *off_proc_j_h = hypre_TAlloc(HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST);
HYPRE_Complex *off_proc_data_h = hypre_TAlloc(HYPRE_Complex, current_num_elmts, HYPRE_MEMORY_HOST);
hypre_TMemcpy(tmp, off_proc_i, HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(off_proc_j_h, off_proc_j, HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(off_proc_data_h, off_proc_data, HYPRE_Complex, current_num_elmts, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
for (i = 0; i < current_num_elmts; i++)
{
off_proc_i_h[2*i] = tmp[i];
off_proc_i_h[2*i+1] = 1;
}
off_proc_i_indx = current_num_elmts * 2;
off_proc_i = off_proc_i_h;
off_proc_j = off_proc_j_h;
off_proc_data = off_proc_data_h;
hypre_TFree(tmp, HYPRE_MEMORY_HOST);
}
/* call hypre_IJMatrixAddToValuesParCSR directly inside this function
* with one chunk of data */
HYPRE_Int off_proc_nelm_recv_cur = 0;
HYPRE_Int off_proc_nelm_recv_max = 0;
HYPRE_BigInt *off_proc_i_recv = NULL;
HYPRE_BigInt *off_proc_j_recv = NULL;
HYPRE_Complex *off_proc_data_recv = NULL;
HYPRE_BigInt *off_proc_i_recv_d = NULL;
HYPRE_BigInt *off_proc_j_recv_d = NULL;
HYPRE_Complex *off_proc_data_recv_d = NULL;
num_rows = off_proc_i_indx/2;
/* verify that we have created the assumed partition */
if (hypre_IJMatrixAssumedPart(matrix) == NULL)
{
hypre_IJMatrixCreateAssumedPartition(matrix);
}
apart = (hypre_IJAssumedPart*) hypre_IJMatrixAssumedPart(matrix);
/*if (hypre_ParCSRMatrixAssumedPartition(par_matrix) == NULL)
{
hypre_ParCSRMatrixCreateAssumedPartition(par_matrix);
}
apart = hypre_ParCSRMatrixAssumedPartition(par_matrix);*/
row_list = hypre_CTAlloc(HYPRE_BigInt, num_rows, HYPRE_MEMORY_HOST);
row_list_num_elements = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
a_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
orig_order = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
real_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
/* get the assumed processor id for each row */
if (num_rows > 0 )
{
for (i=0; i < num_rows; i++)
{
row = off_proc_i[i*2];
//if (row < 0) row = -row - 1;
row_list[i] = row;
row_list_num_elements[i] = off_proc_i[i*2+1];
hypre_GetAssumedPartitionProcFromRow(comm, row, global_first_row,
global_num_cols, &proc_id);
a_proc_id[i] = proc_id;
orig_order[i] = i;
}
/* now we need to find the actual order of each row - sort on row -
this will result in proc ids sorted also...*/
hypre_BigQsortb2i(row_list, a_proc_id, orig_order, 0, num_rows -1);
/* calculate the number of contacts */
ex_num_contacts = 1;
last_proc = a_proc_id[0];
for (i=1; i < num_rows; i++)
{
if (a_proc_id[i] > last_proc)
{
ex_num_contacts++;
last_proc = a_proc_id[i];
}
}
}
/* now we will go through a create a contact list - need to contact assumed
processors and find out who the actual row owner is - we will contact with
a range (2 numbers) */
ex_contact_procs = hypre_CTAlloc(HYPRE_Int, ex_num_contacts, HYPRE_MEMORY_HOST);
ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, ex_num_contacts+1, HYPRE_MEMORY_HOST);
ex_contact_buf = hypre_CTAlloc(HYPRE_BigInt, ex_num_contacts*2, HYPRE_MEMORY_HOST);
counter = 0;
range_end = -1;
for (i=0; i< num_rows; i++)
{
if (row_list[i] > range_end)
{
/* assumed proc */
proc_id = a_proc_id[i];
/* end of prev. range */
if (counter > 0)
{
ex_contact_buf[counter*2 - 1] = row_list[i-1];
}
/*start new range*/
ex_contact_procs[counter] = proc_id;
ex_contact_vec_starts[counter] = counter*2;
ex_contact_buf[counter*2] = row_list[i];
counter++;
hypre_GetAssumedPartitionRowRange(comm, proc_id, global_first_col, global_num_cols,
&range_start, &range_end);
}
}
/* finish the starts */
ex_contact_vec_starts[counter] = counter*2;
/* finish the last range */
if (counter > 0)
{
ex_contact_buf[counter*2 - 1] = row_list[num_rows - 1];
}
/* don't allocate space for responses */
/* create response object - can use same fill response as used in the commpkg
routine */
response_obj1.fill_response = hypre_RangeFillResponseIJDetermineRecvProcs;
response_obj1.data1 = apart; /* this is necessary so we can fill responses*/
response_obj1.data2 = NULL;
max_response_size = 6; /* 6 means we can fit 3 ranges*/
hypre_DataExchangeList(ex_num_contacts, ex_contact_procs,
ex_contact_buf, ex_contact_vec_starts, sizeof(HYPRE_BigInt),
sizeof(HYPRE_BigInt), &response_obj1, max_response_size, 1,
comm, (void**) &response_buf, &response_buf_starts);
/* now response_buf contains a proc_id followed by a range upper bound */
hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_buf, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(a_proc_id, HYPRE_MEMORY_HOST);
/*how many ranges were returned?*/
num_ranges = response_buf_starts[ex_num_contacts];
num_ranges = num_ranges/2;
prev_id = -1;
j = 0;
counter = 0;
num_real_procs = 0;
/* loop through ranges - create a list of actual processor ids*/
for (i=0; i<num_ranges; i++)
{
upper_bound = response_buf[i*2+1];
counter = 0;
tmp_id = response_buf[i*2];
/* loop through row_list entries - counting how many are in the range */
while (j < num_rows && row_list[j] <= upper_bound)
{
real_proc_id[j] = tmp_id;
j++;
counter++;
}
if (counter > 0 && tmp_id != prev_id)
{
num_real_procs++;
}
prev_id = tmp_id;
}
/* now we have the list of real processor ids (real_proc_id) - and the number
of distinct ones - so now we can set up data to be sent - we have
HYPRE_Int data and HYPRE_Complex data. that we will need to pack
together */
/* first find out how many rows and elements we need to send per proc - so we
can do storage */
ex_contact_procs = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
num_rows_per_proc = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
num_elements_total = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
counter = 0;
if (num_real_procs > 0 )
{
ex_contact_procs[0] = real_proc_id[0];
num_rows_per_proc[0] = 1;
num_elements_total[0] = row_list_num_elements[orig_order[0]];
/* loop through real procs - these are sorted (row_list is sorted also)*/
for (i=1; i < num_rows; i++)
{
if (real_proc_id[i] == ex_contact_procs[counter]) /* same processor */
{
num_rows_per_proc[counter] += 1; /*another row */
num_elements_total[counter] += row_list_num_elements[orig_order[i]];
}
else /* new processor */
{
counter++;
ex_contact_procs[counter] = real_proc_id[i];
num_rows_per_proc[counter] = 1;
num_elements_total[counter] = row_list_num_elements[orig_order[i]];
}
}
}
/* to pack together, we need to use the largest obj. size of
(HYPRE_Int) and (HYPRE_Complex) - if these are much different, then we are
wasting some storage, but I do not think that it will be a
large amount since this function should not be used on really
large amounts of data anyway*/
big_int_size = sizeof(HYPRE_BigInt);
complex_size = sizeof(HYPRE_Complex);
obj_size_bytes = hypre_max(big_int_size, complex_size);
/* set up data to be sent to send procs */
/* for each proc, ex_contact_buf contains #rows, row #,
no. elements, col indicies, col data, row #, no. elements, col
indicies, col data, etc. */
/* first calculate total storage and make vec_starts arrays */
storage = 0;
ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, num_real_procs + 1, HYPRE_MEMORY_HOST);
ex_contact_vec_starts[0] = -1;
for (i=0; i < num_real_procs; i++)
{
storage += 1 + 2 * num_rows_per_proc[i] + 2* num_elements_total[i];
ex_contact_vec_starts[i+1] = -storage-1; /* need negative for next loop */
}
hypre_TFree(num_elements_total, HYPRE_MEMORY_HOST);
/*void_contact_buf = hypre_MAlloc(storage*obj_size_bytes);*/
void_contact_buf = hypre_CTAlloc(char, storage*obj_size_bytes, HYPRE_MEMORY_HOST);
index_ptr = void_contact_buf; /* step through with this index */
/* for each proc: #rows, row #, no. elements,
col indicies, col data, row #, no. elements, col indicies, col data, etc. */
/* un-sort real_proc_id - we want to access data arrays in order, so
cheaper to do this*/
us_real_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
for (i=0; i < num_rows; i++)
{
us_real_proc_id[orig_order[i]] = real_proc_id[i];
}
hypre_TFree(real_proc_id, HYPRE_MEMORY_HOST);
counter = 0; /* index into data arrays */
prev_id = -1;
for (i=0; i < num_rows; i++)
{
proc_id = us_real_proc_id[i];
/* can't use row list[i] - you loose the negative signs that differentiate
add/set values */
row = off_proc_i[i*2];
num_elements = row_list_num_elements[i];
/* find position of this processor */
indx = hypre_BinarySearch(ex_contact_procs, proc_id, num_real_procs);
in_i = ex_contact_vec_starts[indx];
index_ptr = (void *) ((char *) void_contact_buf + in_i*obj_size_bytes);
/* first time for this processor - add the number of rows to the buffer */
if (in_i < 0)
{
in_i = -in_i - 1;
/* re-calc. index_ptr since in_i was negative */
index_ptr = (void *) ((char *) void_contact_buf + in_i*obj_size_bytes);
tmp_int = num_rows_per_proc[indx];
hypre_TMemcpy( index_ptr, &tmp_int, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
}
/* add row # */
hypre_TMemcpy( index_ptr, &row, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
/* add number of elements */
hypre_TMemcpy( index_ptr, &num_elements, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
/* now add col indices */
for (j=0; j< num_elements; j++)
{
tmp_big_int = off_proc_j[counter+j]; /* col number */
hypre_TMemcpy( index_ptr, &tmp_big_int, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i ++;
}
/* now add data */
for (j=0; j< num_elements; j++)
{
tmp_complex = off_proc_data[counter++]; /* value */
hypre_TMemcpy( index_ptr, &tmp_complex, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
}
/* increment the indexes to keep track of where we are - we
* adjust below to be actual starts*/
ex_contact_vec_starts[indx] = in_i;
}
/* some clean up */
hypre_TFree(response_buf, HYPRE_MEMORY_HOST);
hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST);
hypre_TFree(us_real_proc_id, HYPRE_MEMORY_HOST);
hypre_TFree(orig_order, HYPRE_MEMORY_HOST);
hypre_TFree(row_list, HYPRE_MEMORY_HOST);
hypre_TFree(row_list_num_elements, HYPRE_MEMORY_HOST);
hypre_TFree(num_rows_per_proc, HYPRE_MEMORY_HOST);
for (i=num_real_procs; i > 0; i--)
{
ex_contact_vec_starts[i] = ex_contact_vec_starts[i-1];
}
ex_contact_vec_starts[0] = 0;
/* now send the data */
/***********************************/
/* first get the integer info in send_proc_obj */
/* the response we expect is just a confirmation*/
response_buf = NULL;
response_buf_starts = NULL;
/*build the response object*/
/* use the send_proc_obj for the info kept from contacts */
/*estimate inital storage allocation */
send_proc_obj.length = 0;
send_proc_obj.storage_length = num_real_procs + 5;
send_proc_obj.id =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST);
send_proc_obj.vec_starts =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST);
send_proc_obj.vec_starts[0] = 0;
send_proc_obj.element_storage_length = storage + 20;
send_proc_obj.v_elements =
hypre_TAlloc(char, obj_size_bytes*send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST);
response_obj2.fill_response = hypre_FillResponseIJOffProcVals;
response_obj2.data1 = NULL;
response_obj2.data2 = &send_proc_obj;
max_response_size = 0;
hypre_DataExchangeList(num_real_procs, ex_contact_procs,
void_contact_buf, ex_contact_vec_starts, obj_size_bytes,
0, &response_obj2, max_response_size, 2,
comm, (void **) &response_buf, &response_buf_starts);
hypre_TFree(response_buf, HYPRE_MEMORY_HOST);
hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST);
hypre_TFree(void_contact_buf, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST);
/* Now we can unpack the send_proc_objects and call set
and add to values functions. We unpack messages in a
deterministic order, using processor rank */
num_recvs = send_proc_obj.length;
argsort_contact_procs = hypre_CTAlloc(HYPRE_Int, num_recvs, HYPRE_MEMORY_HOST);
for(i=0; i < num_recvs; i++)
{
argsort_contact_procs[i] = i;
}
/* This sort's the id array, but the original indices are stored in
* argsort_contact_procs */
hypre_qsort2i( send_proc_obj.id, argsort_contact_procs, 0, num_recvs-1 );
/* alias */
recv_data_ptr = send_proc_obj.v_elements;
recv_starts = send_proc_obj.vec_starts;
for (i=0; i < num_recvs; i++)
{
/* Find the current processor in order, and reset recv_data_ptr to that processor's message */
original_proc_indx = argsort_contact_procs[i];
/*current_proc = send_proc_obj.id[i];*/
indx = recv_starts[original_proc_indx];
recv_data_ptr = (void *) ((char *) send_proc_obj.v_elements + indx*obj_size_bytes);
/* get the number of rows for this recv */
hypre_TMemcpy( &num_rows, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
for (j=0; j < num_rows; j++) /* for each row: unpack info */
{
/* row # */
hypre_TMemcpy( &row, recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
/* num elements for this row */
hypre_TMemcpy( &num_elements, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
/* col indices */ /* Need to check this again !!!! */
if (big_int_size == obj_size_bytes)
{
col_ptr = (HYPRE_BigInt *) recv_data_ptr;
recv_data_ptr = (void *) ((char *)recv_data_ptr + num_elements*obj_size_bytes);
}
else /* copy data */
{
if (big_int_data_size < num_elements)
{
big_int_data = hypre_TReAlloc(big_int_data, HYPRE_BigInt, num_elements + 10, HYPRE_MEMORY_HOST);
}
for (k=0; k< num_elements; k++)
{
hypre_TMemcpy( &big_int_data[k], recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
}
col_ptr = big_int_data;
}
/* col data */
if (complex_size == obj_size_bytes)
{
col_data_ptr = (HYPRE_Complex *) recv_data_ptr;
recv_data_ptr = (void *) ((char *)recv_data_ptr + num_elements*obj_size_bytes);
}
else /* copy data */
{
if (complex_data_size < num_elements)
{
complex_data =
hypre_TReAlloc(complex_data, HYPRE_Complex, num_elements + 10, HYPRE_MEMORY_HOST);
}
for (k=0; k< num_elements; k++)
{
hypre_TMemcpy( &complex_data[k], recv_data_ptr, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
}
col_data_ptr = complex_data;
}
if (memory_location == HYPRE_MEMORY_HOST)
{
hypre_IJMatrixAddToValuesParCSR(matrix, 1, &num_elements, &row, &row_index, col_ptr, col_data_ptr);
}
else
{
HYPRE_Int nelm_new = off_proc_nelm_recv_cur + num_elements;
if (nelm_new > off_proc_nelm_recv_max)
{
off_proc_nelm_recv_max = nelm_new * 2;
off_proc_i_recv = hypre_TReAlloc(off_proc_i_recv, HYPRE_BigInt, off_proc_nelm_recv_max, HYPRE_MEMORY_HOST);
off_proc_j_recv = hypre_TReAlloc(off_proc_j_recv, HYPRE_BigInt, off_proc_nelm_recv_max, HYPRE_MEMORY_HOST);
off_proc_data_recv = hypre_TReAlloc(off_proc_data_recv, HYPRE_Complex, off_proc_nelm_recv_max, HYPRE_MEMORY_HOST);
}
HYPRE_Int i;
for (i = 0; i < num_elements; i++)
{
off_proc_i_recv[off_proc_nelm_recv_cur + i] = row;
}
hypre_TMemcpy(off_proc_j_recv + off_proc_nelm_recv_cur, col_ptr, HYPRE_BigInt, num_elements,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
hypre_TMemcpy(off_proc_data_recv + off_proc_nelm_recv_cur, col_data_ptr, HYPRE_Complex, num_elements,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
off_proc_nelm_recv_cur = nelm_new;
}
indx += (num_elements*2);
}
}
if (memory_location == HYPRE_MEMORY_DEVICE)
{
off_proc_i_recv_d = hypre_TAlloc(HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE);
off_proc_j_recv_d = hypre_TAlloc(HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE);
off_proc_data_recv_d = hypre_TAlloc(HYPRE_Complex, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(off_proc_i_recv_d, off_proc_i_recv, HYPRE_BigInt, off_proc_nelm_recv_cur,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
hypre_TMemcpy(off_proc_j_recv_d, off_proc_j_recv, HYPRE_BigInt, off_proc_nelm_recv_cur,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
hypre_TMemcpy(off_proc_data_recv_d, off_proc_data_recv, HYPRE_Complex, off_proc_nelm_recv_cur,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_IJMatrixSetAddValuesParCSRDevice(matrix, off_proc_nelm_recv_cur, NULL, off_proc_i_recv_d, NULL, off_proc_j_recv_d,
off_proc_data_recv_d, "add");
#endif
}
hypre_TFree(send_proc_obj.v_elements, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST);
hypre_TFree(argsort_contact_procs, HYPRE_MEMORY_HOST);
if (big_int_data)
{
hypre_TFree(big_int_data, HYPRE_MEMORY_HOST);
}
if (complex_data)
{
hypre_TFree(complex_data, HYPRE_MEMORY_HOST);
}
if (memory_location == HYPRE_MEMORY_DEVICE)
{
hypre_TFree(off_proc_i, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_j, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_data, HYPRE_MEMORY_HOST);
}
hypre_TFree(off_proc_i_recv, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_j_recv, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_data_recv, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_i_recv_d, HYPRE_MEMORY_DEVICE);
hypre_TFree(off_proc_j_recv_d, HYPRE_MEMORY_DEVICE);
hypre_TFree(off_proc_data_recv_d, HYPRE_MEMORY_DEVICE);
return hypre_error_flag;
}
/*--------------------------------------------------------------------
* hypre_FillResponseIJOffProcVals
* Fill response function for the previous function (2nd data exchange)
*--------------------------------------------------------------------*/
HYPRE_Int
hypre_FillResponseIJOffProcVals(void *p_recv_contact_buf,
HYPRE_Int contact_size,
HYPRE_Int contact_proc,
void *ro,
MPI_Comm comm,
void **p_send_response_buf,
HYPRE_Int *response_message_size )
{
HYPRE_Int myid;
HYPRE_Int index, count, elength;
HYPRE_Int object_size;
void *index_ptr;
hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*) ro;
hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*) response_obj->data2;
object_size = hypre_max(sizeof(HYPRE_BigInt), sizeof(HYPRE_Complex));
hypre_MPI_Comm_rank(comm, &myid );
/*check to see if we need to allocate more space in send_proc_obj for vec starts
* and id */
if (send_proc_obj->length == send_proc_obj->storage_length)
{
send_proc_obj->storage_length +=20; /*add space for 20 more contact*/
send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts,HYPRE_Int,
send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST);
if( send_proc_obj->id != NULL)
{
send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int,
send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST);
}
}
/*initialize*/
count = send_proc_obj->length;
index = send_proc_obj->vec_starts[count]; /* current number of elements */
if( send_proc_obj->id != NULL)
{
send_proc_obj->id[count] = contact_proc;
}
/*do we need more storage for the elements?*/
if (send_proc_obj->element_storage_length < index + contact_size)
{
elength = hypre_max(contact_size, 100);
elength += index;
send_proc_obj->v_elements = hypre_TReAlloc((char*)send_proc_obj->v_elements,
char, elength*object_size, HYPRE_MEMORY_HOST);
send_proc_obj->element_storage_length = elength;
}
/*populate send_proc_obj*/
index_ptr = (void *) ((char *) send_proc_obj->v_elements + index*object_size);
hypre_TMemcpy(index_ptr, p_recv_contact_buf , char, object_size*contact_size, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
send_proc_obj->vec_starts[count+1] = index + contact_size;
send_proc_obj->length++;
/* output - no message to return (confirmation) */
*response_message_size = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------*/
HYPRE_Int hypre_FindProc(HYPRE_BigInt *list, HYPRE_BigInt value, HYPRE_Int list_length)
{
HYPRE_Int low, high, m;
low = 0;
high = list_length;
if (value >= list[high] || value < list[low])
{
return -1;
}
else
{
while (low+1 < high)
{
m = (low + high) / 2;
if (value < list[m])
{
high = m;
}
else if (value >= list[m])
{
low = m;
}
}
return low;
}
}
/******************************************************************************
*
* hypre_IJMatrixAssembleParCSR
*
* assembles IJMatrix from AuxParCSRMatrix auxiliary structure
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAssembleParCSR(hypre_IJMatrix *matrix)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
HYPRE_Int *diag_i = hypre_CSRMatrixI(diag);
HYPRE_Int *offd_i = hypre_CSRMatrixI(offd);
HYPRE_Int *diag_j;
HYPRE_Int *offd_j = NULL;
HYPRE_Complex *diag_data;
HYPRE_Complex *offd_data = NULL;
HYPRE_Int i, j, j0;
HYPRE_Int num_cols_offd;
HYPRE_Int *diag_pos;
HYPRE_BigInt *col_map_offd;
HYPRE_Int *rownnz;
HYPRE_Int *row_length;
HYPRE_BigInt **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int my_id, num_procs;
HYPRE_Int num_rows;
HYPRE_Int num_rownnz;
HYPRE_Int i_diag, i_offd;
HYPRE_BigInt col_0, col_n;
HYPRE_Int nnz_offd;
HYPRE_BigInt *big_offd_j;
HYPRE_BigInt *tmp_j;
HYPRE_Complex temp;
HYPRE_BigInt base = hypre_IJMatrixGlobalFirstCol(matrix);
HYPRE_Int off_proc_i_indx;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int current_num_elmts;
HYPRE_BigInt *off_proc_i;
HYPRE_BigInt *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int offd_proc_elmts;
//HYPRE_Int new_off_proc_i_indx;
//HYPRE_Int cancel_indx;
//HYPRE_Int col_indx;
//HYPRE_Int current_indx;
//HYPRE_Int current_i;
//HYPRE_Int row_len;
HYPRE_Int max_num_threads;
HYPRE_Int aux_flag, aux_flag_global;
HYPRE_ANNOTATE_FUNC_BEGIN;
max_num_threads = hypre_NumThreads();
/* first find out if anyone has an aux_matrix, and create one if you don't
* have one, but other procs do */
aux_flag = 0;
aux_flag_global = 0;
if (aux_matrix)
{
aux_flag = 1;
}
hypre_MPI_Allreduce(&aux_flag, &aux_flag_global, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm);
if (aux_flag_global && (!aux_flag))
{
hypre_MPI_Comm_rank(comm, &my_id);
num_rows = (HYPRE_Int)(row_partitioning[my_id+1] - row_partitioning[my_id]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, num_rows, num_rows, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
if (aux_matrix)
{
/* first delete all cancelled elements */
/*cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
if (cancel_indx)
{
current_num_elmts=hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i=hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j=hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data=hypre_AuxParCSRMatrixOffProcData(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
col_indx = 0;
current_i = 0;
current_indx = 0;
new_off_proc_i_indx = off_proc_i_indx;
for (i=0; i < off_proc_i_indx; i= i+2)
{
row_len = off_proc_i[i+1];
for (j=0; j < off_proc_i[i+1]; j++)
{
if (off_proc_j[col_indx] == -1)
{
col_indx++;
row_len--;
current_num_elmts--;
}
else
{
off_proc_j[current_indx] = off_proc_j[col_indx];
off_proc_data[current_indx++] = off_proc_data[col_indx++];
}
}
if (row_len)
{
off_proc_i[current_i] = off_proc_i[i];
off_proc_i[current_i+1] = row_len;
current_i += 2;
}
else
{
new_off_proc_i_indx -= 2;
}
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = new_off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts;
}*/
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
hypre_MPI_Allreduce(&off_proc_i_indx, &offd_proc_elmts, 1, HYPRE_MPI_INT,
hypre_MPI_SUM, comm);
if (offd_proc_elmts)
{
max_off_proc_elmts=hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
current_num_elmts=hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i=hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j=hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data=hypre_AuxParCSRMatrixOffProcData(aux_matrix);
hypre_IJMatrixAssembleOffProcValsParCSR(
matrix,off_proc_i_indx, max_off_proc_elmts, current_num_elmts,
HYPRE_MEMORY_HOST,
off_proc_i, off_proc_j, off_proc_data);
}
}
if (hypre_IJMatrixAssembleFlag(matrix) == 0)
{
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
/* move data into ParCSRMatrix if not there already */
if (hypre_AuxParCSRMatrixNeedAux(aux_matrix))
{
HYPRE_Int *diag_array;
HYPRE_Int *offd_array;
/* Update nonzero rows of aux_matrix */
hypre_AuxParCSRMatrixSetRownnz(aux_matrix);
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
num_rownnz = hypre_AuxParCSRMatrixLocalNumRownnz(aux_matrix);
rownnz = hypre_AuxParCSRMatrixRownnz(aux_matrix);
diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
diag_pos = hypre_TAlloc(HYPRE_Int, num_rownnz, HYPRE_MEMORY_HOST);
i_diag = i_offd = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i, j, i_diag, i_offd)
#endif
{
HYPRE_BigInt *local_j;
HYPRE_Complex *local_data;
HYPRE_Int ii, rest, size, ns, ne;
HYPRE_Int num_threads, my_thread_num;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
size = num_rownnz/num_threads;
rest = num_rownnz - size*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(size + 1);
ne = (my_thread_num+1)*(size + 1);
}
else
{
ns = my_thread_num*size + rest;
ne = (my_thread_num+1)*size + rest;
}
i_diag = i_offd = 0;
for (i = ns; i < ne; i++)
{
ii = rownnz ? rownnz[i] : i;
local_j = aux_j[ii];
local_data = aux_data[ii];
diag_pos[i] = -1;
for (j = 0; j < row_length[ii]; j++)
{
if (local_j[j] < col_0 || local_j[j] > col_n)
{
i_offd++;
}
else
{
i_diag++;
if ((HYPRE_Int)(local_j[j] - col_0) == i)
{
diag_pos[i] = j;
}
}
}
}
diag_array[my_thread_num] = i_diag;
offd_array[my_thread_num] = i_offd;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
i_diag = 0;
i_offd = 0;
for (i = 0; i < num_threads; i++)
{
i_diag += diag_array[i];
i_offd += offd_array[i];
diag_array[i] = i_diag;
offd_array[i] = i_offd;
}
diag_i[num_rows] = i_diag;
offd_i[num_rows] = i_offd;
hypre_TFree(hypre_CSRMatrixJ(diag), hypre_CSRMatrixMemoryLocation(diag));
hypre_TFree(hypre_CSRMatrixData(diag), hypre_CSRMatrixMemoryLocation(diag));
hypre_TFree(hypre_CSRMatrixJ(offd), hypre_CSRMatrixMemoryLocation(offd));
hypre_TFree(hypre_CSRMatrixData(offd), hypre_CSRMatrixMemoryLocation(offd));
hypre_TFree(hypre_CSRMatrixBigJ(offd), hypre_CSRMatrixMemoryLocation(offd));
diag_j = hypre_CTAlloc(HYPRE_Int, i_diag, hypre_CSRMatrixMemoryLocation(diag));
diag_data = hypre_CTAlloc(HYPRE_Complex, i_diag, hypre_CSRMatrixMemoryLocation(diag));
offd_j = hypre_CTAlloc(HYPRE_Int, i_offd, hypre_CSRMatrixMemoryLocation(offd));
offd_data = hypre_CTAlloc(HYPRE_Complex, i_offd, hypre_CSRMatrixMemoryLocation(offd));
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, i_offd, hypre_CSRMatrixMemoryLocation(offd));
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num)
{
i_diag = diag_array[my_thread_num-1];
i_offd = offd_array[my_thread_num-1];
}
else
{
i_diag = 0;
i_offd = 0;
}
for (i = ns; i < ne; i++)
{
ii = rownnz ? rownnz[i] : i;
diag_i[ii] = i_diag;
offd_i[ii] = i_offd;
local_j = aux_j[ii];
local_data = aux_data[ii];
if (diag_pos[i] > -1)
{
diag_j[i_diag] = (HYPRE_Int)(local_j[diag_pos[i]] - col_0);
diag_data[i_diag++] = local_data[diag_pos[i]];
}
for (j = 0; j < row_length[ii]; j++)
{
if (local_j[j] < col_0 || local_j[j] > col_n)
{
big_offd_j[i_offd] = local_j[j];
offd_data[i_offd++] = local_data[j];
}
else if (j != diag_pos[i])
{
diag_j[i_diag] = (HYPRE_Int)(local_j[j] - col_0);
diag_data[i_diag++] = local_data[j];
}
}
}
/* Correct diag_i and offd_i */
if (rownnz != NULL)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = ns; i < (ne-1); i++)
{
for (ii = rownnz[i] + 1; ii < rownnz[i+1]; ii++)
{
diag_i[ii] = diag_i[rownnz[i+1]];
offd_i[ii] = offd_i[rownnz[i+1]];
}
}
if (my_thread_num < (num_threads - 1))
{
for (ii = rownnz[ne-1] + 1; ii < rownnz[ne]; ii++)
{
diag_i[ii] = diag_i[rownnz[ne]];
offd_i[ii] = offd_i[rownnz[ne]];
}
}
else
{
for (ii = rownnz[ne-1] + 1; ii < num_rows; ii++)
{
diag_i[ii] = diag_i[num_rows];
offd_i[ii] = offd_i[num_rows];
}
}
}
} /* end parallel region */
hypre_TFree(diag_array, HYPRE_MEMORY_HOST);
hypre_TFree(offd_array, HYPRE_MEMORY_HOST);
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_data;
hypre_CSRMatrixNumNonzeros(diag) = diag_i[num_rows];
if (offd_i[num_rows] > 0)
{
hypre_CSRMatrixJ(offd) = offd_j;
hypre_CSRMatrixBigJ(offd) = big_offd_j;
hypre_CSRMatrixData(offd) = offd_data;
}
hypre_CSRMatrixNumNonzeros(offd) = offd_i[num_rows];
hypre_TFree(diag_pos, HYPRE_MEMORY_HOST);
}
else
{
/* move diagonal element into first space */
big_offd_j = hypre_CSRMatrixBigJ(offd);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private (i,j,j0,temp)
#endif
for (i = 0; i < num_rows; i++)
{
j0 = diag_i[i];
for (j = j0; j < diag_i[i+1]; j++)
{
if (diag_j[j] == i)
{
temp = diag_data[j0];
diag_data[j0] = diag_data[j];
diag_data[j] = temp;
diag_j[j] = diag_j[j0];
diag_j[j0] = i;
break;
}
}
}
offd_j = hypre_CSRMatrixJ(offd);
if (!offd_j && offd_i[num_rows])
{
offd_j = hypre_CTAlloc(HYPRE_Int, offd_i[num_rows], hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixJ(offd) = offd_j;
}
}
/* generate the nonzero rows inside offd and diag by calling */
hypre_CSRMatrixSetRownnz(diag);
hypre_CSRMatrixSetRownnz(offd);
/* generate col_map_offd */
nnz_offd = offd_i[num_rows];
if (nnz_offd)
{
tmp_j = hypre_CTAlloc(HYPRE_BigInt, nnz_offd, HYPRE_MEMORY_HOST);
for (i = 0; i < nnz_offd; i++)
{
tmp_j[i] = big_offd_j[i];
}
hypre_BigQsort0(tmp_j,0,nnz_offd-1);
num_cols_offd = 1;
for (i = 0; i < nnz_offd-1; i++)
{
if (tmp_j[i+1] > tmp_j[i])
{
tmp_j[num_cols_offd++] = tmp_j[i+1];
}
}
col_map_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST);
for (i = 0; i < num_cols_offd; i++)
{
col_map_offd[i] = tmp_j[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i)
#endif
for (i = 0; i < nnz_offd; i++)
{
offd_j[i] = hypre_BigBinarySearch(col_map_offd,big_offd_j[i],num_cols_offd);
}
if (base)
{
for (i = 0; i < num_cols_offd; i++)
{
col_map_offd[i] -= base;
}
}
hypre_ParCSRMatrixColMapOffd(par_matrix) = col_map_offd;
hypre_CSRMatrixNumCols(offd) = num_cols_offd;
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(big_offd_j, hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = NULL;
}
hypre_IJMatrixAssembleFlag(matrix) = 1;
}
hypre_AuxParCSRMatrixDestroy(aux_matrix);
hypre_IJMatrixTranslator(matrix) = NULL;
HYPRE_ANNOTATE_FUNC_END;
return hypre_error_flag;
}
/******************************************************************************
*
* IJMatrix_ParCSR interface
*
*****************************************************************************/
#include "_hypre_IJ_mv.h"
#include "../HYPRE.h"
/******************************************************************************
*
* hypre_IJMatrixSetValuesOMPParCSR
*
* sets values in an IJMatrix before assembly,
* use of this routine requires that the values in rows are different from each
* other, i.e rows[i] != rows[j] for i != j
* to ensure accurate threading
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetValuesOMPParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_BigInt col_0, col_n, first;
//HYPRE_Int cancel_indx;
HYPRE_BigInt **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_BigInt *big_offd_j;
HYPRE_Complex *offd_data;
HYPRE_Int pstart;
/*HYPRE_Int current_num_elmts;*/
/*HYPRE_Int max_off_proc_elmts;*/
//HYPRE_Int off_proc_i_indx;
//HYPRE_BigInt *off_proc_i;
//HYPRE_BigInt *off_proc_j;
//HYPRE_Int *offproc_cnt;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
//HYPRE_Int max_num_threads;
HYPRE_Int error_flag = 0;
/*HYPRE_Complex *off_proc_data;*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
//max_num_threads = hypre_NumThreads();
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
//offproc_cnt = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
pstart = 0;
if (nrows < 0)
{
hypre_error_in_arg(2);
if (print_level)
{
hypre_printf("Error! nrows negative! HYPRE_IJMatrixSetValues\n");
}
return hypre_error_flag;
}
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/
{
HYPRE_BigInt *col_map_offd;
HYPRE_Int num_cols_offd;
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
/*if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
}*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int j_offd;
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
//HYPRE_Int row_len;
HYPRE_Int row_local;
HYPRE_Int i, j, ii, n;
HYPRE_BigInt row;
HYPRE_Int not_found, size, indx;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows/num_threads;
rest = nrows - len*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(len+1);
ne = (my_thread_num+1)*(len+1);
}
else
{
ns = my_thread_num*len+rest;
ne = (my_thread_num+1)*len+rest;
}
for (ii=ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
size = diag_i[row_local+1] - diag_i[row_local]
+ offd_i[row_local+1] - offd_i[row_local];
if (n > size)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
break;
/*return hypre_error_flag; */
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local+1];
len_offd = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag; */
}
for (j=pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag; */
}
diag_data[pos_diag] = values[indx];
}
else /* insert into diag */
{
for (j=pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
}
indx++;
}
}
/* processor does not own the row */
//else /*search for previous occurrences and cancel them */
/*{
if (aux_matrix)
{
col_indx = 0;
for (i=0; i < off_proc_i_indx; i=i+2)
{
row_len = off_proc_i[i+1];
if (off_proc_i[i] == row)
{
for (j=0; j < n; j++)
{
cnt1 = col_indx;
for (k=0; k < row_len; k++)
{
if (off_proc_j[cnt1] == cols[j])
{
off_proc_j[cnt1++] = -1;
offproc_cnt[my_thread_num]++; */
/*cancel_indx++;*/
/* if no repetition allowed */
/* off_proc_j[col_indx] = -1;
col_indx -= k;
break; */
/*}
else
{
cnt1++;
}
}
}
col_indx += row_len;
}
else
{
col_indx += row_len;
}
}*/
/*hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;*/
//}
//}
}
} /*end parallel region */
}
else /* matrix not assembled */
{
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
/*if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
}*/
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
}
else
{
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
offd_data = hypre_CSRMatrixData(offd);
big_offd_j = hypre_CSRMatrixBigJ(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_BigInt *tmp_j = NULL;
HYPRE_BigInt *local_j = NULL;
HYPRE_Complex *tmp_data = NULL;
HYPRE_Complex *local_data = NULL;
HYPRE_Int tmp_indx;
//HYPRE_Int row_len;
HYPRE_Int row_local;
HYPRE_Int i, j, ii, n;
HYPRE_BigInt row;
HYPRE_Int not_found, size, indx;
HYPRE_Int old_size, space, cnt;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows/num_threads;
rest = nrows - len*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(len+1);
ne = (my_thread_num+1)*(len+1);
}
else
{
ns = my_thread_num*len+rest;
ne = (my_thread_num+1)*len+rest;
}
for (ii=ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
if (need_aux)
{
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i=0; i < n; i++)
{
for (j=0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size+tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local],HYPRE_BigInt,
size+tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size+tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i=0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int offd_space, diag_space;
HYPRE_Int cnt_diag, cnt_offd;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local+1];
offd_space = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j=offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
else /* insert into diag */
{
for (j=diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = (HYPRE_Int)(cols[indx]-col_0);
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* processor does not own the row */
/*else
{
if (aux_matrix)
{
col_indx = 0;
for (i=0; i < off_proc_i_indx; i=i+2)
{
row_len = off_proc_i[i+1];
if (off_proc_i[i] == row)
{
for (j=0; j < n; j++)
{
cnt1 = col_indx;
for (k=0; k < row_len; k++)
{
if (off_proc_j[cnt1] == cols[j])
{
off_proc_j[cnt1++] = -1; */
/*cancel_indx++;*/
//offproc_cnt[my_thread_num]++;
/* if no repetition allowed */
/* off_proc_j[col_indx] = -1;
col_indx -= k;
break; */
/* }
else
{
cnt1++;
}
}
}
col_indx += row_len;
}
else
{
col_indx += row_len;
}
}*/
/*hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;*/
/*}
}*/
}
} /* end parallel region */
}
/*if (error_flag)
{
return hypre_error_flag;
}
if (aux_matrix)
{
for (i1=0; i1 < max_num_threads; i1++)
{
cancel_indx += offproc_cnt[i1];
}
hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;
}*/
//hypre_TFree(offproc_cnt, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAddToValuesOMPParCSR
*
* adds row values to an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAddToValuesOMPParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_BigInt col_0, col_n, first;
HYPRE_BigInt **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int pstart;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_BigInt *big_offd_j;
HYPRE_Complex *offd_data;
HYPRE_Int current_num_elmts;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int off_proc_i_indx;
HYPRE_BigInt *off_proc_i;
HYPRE_BigInt *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int **offproc_cnt;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
HYPRE_Int max_num_threads;
HYPRE_Int error_flag = 0;
HYPRE_Int i1;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
max_num_threads = hypre_NumThreads();
par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
offproc_cnt = hypre_CTAlloc(HYPRE_Int *, max_num_threads, HYPRE_MEMORY_HOST);
for (i1=0; i1 < max_num_threads; i1++)
offproc_cnt[i1] = NULL;
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
pstart = 0;
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled */
{
HYPRE_Int num_cols_offd;
HYPRE_BigInt *col_map_offd;
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int j_offd;
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int row_local;
HYPRE_Int i, j, ii, n;
HYPRE_BigInt row;
HYPRE_Int not_found, size, indx;
HYPRE_Int *my_offproc_cnt = NULL;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows/num_threads;
rest = nrows - len*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(len+1);
ne = (my_thread_num+1)*(len+1);
}
else
{
ns = my_thread_num*len+rest;
ne = (my_thread_num+1)*len+rest;
}
for (ii=ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
size = diag_i[row_local+1] - diag_i[row_local]
+ offd_i[row_local+1] - offd_i[row_local];
if (n > size)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
break;
/*return hypre_error_flag; */
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local+1];
len_offd = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
for (j=pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
diag_data[pos_diag] += values[indx];
}
else /* insert into diag */
{
for (j=pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
}
indx++;
}
}
/* not my row */
/* need to find solution for threaded version!!!! */
/* could save row number and process later .... */
else
{
if (!my_offproc_cnt)
{
my_offproc_cnt = hypre_CTAlloc(HYPRE_Int, 200, HYPRE_MEMORY_HOST);
offproc_cnt[my_thread_num] = my_offproc_cnt;
my_offproc_cnt[0] = 200;
my_offproc_cnt[1] = 2;
}
i = my_offproc_cnt[1];
if (i+2 < my_offproc_cnt[0])
{
my_offproc_cnt[i] = ii;
my_offproc_cnt[i+1] = indx;
my_offproc_cnt[1] += 2;
}
else
{
size = my_offproc_cnt[0];
my_offproc_cnt = hypre_TReAlloc(my_offproc_cnt, HYPRE_Int, size+200, HYPRE_MEMORY_HOST);
my_offproc_cnt[0] += 200;
my_offproc_cnt[i] = ii;
my_offproc_cnt[i+1] = indx;
my_offproc_cnt[1] += 2;
}
}
}
} /* end parallel region */
}
/* not assembled */
else
{
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
}
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
}
else
{
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
big_offd_j = hypre_CSRMatrixBigJ(offd);
offd_data = hypre_CSRMatrixData(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_BigInt *tmp_j = NULL;
HYPRE_BigInt *local_j = NULL;
HYPRE_Complex *tmp_data = NULL;
HYPRE_Complex *local_data = NULL;
HYPRE_Int tmp_indx;
HYPRE_Int row_local;
HYPRE_BigInt row;
HYPRE_Int i, j, ii, n;
HYPRE_Int not_found, size, indx;
HYPRE_Int old_size, space, cnt;
HYPRE_Int *my_offproc_cnt = NULL;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows/num_threads;
rest = nrows - len*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(len+1);
ne = (my_thread_num+1)*(len+1);
}
else
{
ns = my_thread_num*len+rest;
ne = (my_thread_num+1)*len+rest;
}
for (ii=ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = (HYPRE_Int)(row - row_partitioning[pstart]);
/* compute local row number */
if (need_aux)
{
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i=0; i < n; i++)
{
for (j=0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size+tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local],HYPRE_BigInt,
size+tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size+tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i=0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int offd_space, diag_space;
HYPRE_Int cnt_diag, cnt_offd;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local+1];
offd_space = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j=offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
else /* insert into diag */
{
for (j=diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = (HYPRE_Int)(cols[indx]-col_0);
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* not my row */
else
{
if (!my_offproc_cnt)
{
my_offproc_cnt = hypre_CTAlloc(HYPRE_Int, 200, HYPRE_MEMORY_HOST);
offproc_cnt[my_thread_num] = my_offproc_cnt;
my_offproc_cnt[0] = 200;
my_offproc_cnt[1] = 2;
}
i = my_offproc_cnt[1];
if (i+2 < my_offproc_cnt[0])
{
my_offproc_cnt[i] = ii;
my_offproc_cnt[i+1] = indx;
my_offproc_cnt[1] += 2;
}
else
{
size = my_offproc_cnt[0];
my_offproc_cnt = hypre_TReAlloc(my_offproc_cnt, HYPRE_Int, size+200, HYPRE_MEMORY_HOST);
my_offproc_cnt[0] += 200;
my_offproc_cnt[i] = ii;
my_offproc_cnt[i+1] = indx;
my_offproc_cnt[1] += 2;
}
}
}
} /*end parallel region */
}
if (error_flag)
{
return hypre_error_flag;
}
if (!aux_matrix)
{
HYPRE_Int size = (HYPRE_Int)(row_partitioning[pstart+1]-row_partitioning[pstart]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, size, size, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
for (i1 = 0; i1 < max_num_threads; i1++)
{
if (offproc_cnt[i1])
{
HYPRE_Int *my_offproc_cnt = offproc_cnt[i1];
HYPRE_Int i, i2, ii, n, indx;
HYPRE_BigInt row;
for (i2 = 2; i2 < my_offproc_cnt[1]; i2+=2)
{
ii = my_offproc_cnt[i2];
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = my_offproc_cnt[i2+1];
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n,1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) =
max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3*n;
off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex,
max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix)
= max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i=0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts;
}
hypre_TFree(offproc_cnt[i1], HYPRE_MEMORY_HOST);
}
}
hypre_TFree(offproc_cnt, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
|
temporal_min_method.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Suneth Warnakulasuriya (https://github.com/sunethwarna)
//
#if !defined(KRATOS_TEMPORAL_MIN_METHOD_H_INCLUDED)
#define KRATOS_TEMPORAL_MIN_METHOD_H_INCLUDED
// System includes
#include <limits>
#include <string>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
// Application includes
#include "custom_methods/temporal_method.h"
#include "custom_utilities/method_utilities.h"
#include "custom_utilities/temporal_method_utilities.h"
namespace Kratos
{
///@addtogroup StatisticsApplication
///@{
///@name Kratos Globals
///@{
namespace TemporalMethods
{
template <class TContainerType, class TContainerItemType, template <class T> class TDataRetrievalFunctor, template <class T> class TDataStorageFunctor>
class TemporalMinMethod
{
public:
template <class TDataType>
class NormMethod : public TemporalMethod
{
public:
KRATOS_CLASS_POINTER_DEFINITION(NormMethod);
NormMethod(
ModelPart& rModelPart,
const std::string& rNormType,
const Variable<TDataType>& rInputVariable,
const int EchoLevel,
const Variable<double>& rOutputVariable,
const Variable<double>& rMinTimeValueVariable)
: TemporalMethod(rModelPart, EchoLevel),
mNormType(rNormType),
mrInputVariable(rInputVariable),
mrOutputVariable(rOutputVariable),
mrMinTimeValueVariable(rMinTimeValueVariable)
{
KRATOS_TRY
KRATOS_ERROR_IF(rOutputVariable == rMinTimeValueVariable)
<< "Same variable is given for minimum and its time value "
"variable in norm min method for input variable "
<< rInputVariable.Name()
<< ". Please provide two different "
"variables. [ variable = "
<< rOutputVariable.Name() << " ].\n";
KRATOS_CATCH("");
}
void CalculateStatistics() override
{
TContainerType& r_container =
MethodUtilities::GetDataContainer<TContainerType>(this->GetModelPart());
const auto& norm_method =
MethodUtilities::GetNormMethod(mrInputVariable, mNormType);
const double total_time = this->GetTotalTime();
const int number_of_items = r_container.size();
#pragma omp parallel for
for (int i = 0; i < number_of_items; ++i)
{
TContainerItemType& r_item = *(r_container.begin() + i);
const TDataType& r_input_value =
TDataRetrievalFunctor<TContainerItemType>()(r_item, mrInputVariable);
const double input_norm_value = norm_method(r_input_value);
double& r_output_value =
TDataStorageFunctor<TContainerItemType>()(r_item, mrOutputVariable);
double& r_min_time_value = TDataStorageFunctor<TContainerItemType>()(
r_item, mrMinTimeValueVariable);
if (input_norm_value < r_output_value)
{
r_output_value = input_norm_value;
r_min_time_value = total_time;
}
}
KRATOS_INFO_IF("TemporalNormMinMethod", this->GetEchoLevel() > 1)
<< "Calculated temporal norm min for " << mrInputVariable.Name()
<< " input variable with " << mrOutputVariable.Name()
<< " min variable and " << mrMinTimeValueVariable.Name()
<< " time value variable for " << this->GetModelPart().Name() << ".\n";
}
void InitializeStatisticsVariables() override
{
TContainerType& r_container =
MethodUtilities::GetDataContainer<TContainerType>(this->GetModelPart());
auto& initializer_method =
TemporalMethodUtilities::InitializeVariables<TContainerType, TContainerItemType, TDataStorageFunctor>;
initializer_method(
r_container, mrOutputVariable, std::numeric_limits<double>::max());
initializer_method(r_container, mrMinTimeValueVariable, 0.0);
KRATOS_INFO_IF("TemporalNormMinMethod", this->GetEchoLevel() > 0)
<< "Initialized temporal norm min method for "
<< mrInputVariable.Name() << " input variable with "
<< mrOutputVariable.Name() << " min variable and "
<< mrMinTimeValueVariable.Name() << " time value variable for "
<< this->GetModelPart().Name() << ".\n";
}
private:
const std::string mNormType;
const Variable<TDataType>& mrInputVariable;
const Variable<double>& mrOutputVariable;
const Variable<double>& mrMinTimeValueVariable;
};
std::vector<TemporalMethod::Pointer> static CreateTemporalMethodObject(
ModelPart& rModelPart, const std::string& rNormType, const int EchoLevel, Parameters Params)
{
KRATOS_TRY
Parameters default_parameters = Parameters(R"(
{
"input_variables" : [],
"output_variables" : [],
"output_time_step_variables" : []
})");
Params.RecursivelyValidateAndAssignDefaults(default_parameters);
const std::vector<std::string>& input_variable_names_list =
Params["input_variables"].GetStringArray();
const std::vector<std::string>& output_variable_1_names_list =
Params["output_variables"].GetStringArray();
const std::vector<std::string>& output_variable_2_names_list =
Params["output_time_step_variables"].GetStringArray();
std::vector<TemporalMethod::Pointer> method_list;
if (rNormType == "none") // for non norm types
{
KRATOS_ERROR << "none norm type is not defined for Min method.\n";
}
else // for values with norms
{
MethodUtilities::CheckVariableType<double>(output_variable_1_names_list);
MethodUtilities::CheckVariableType<double>(output_variable_2_names_list);
const int number_of_variables = input_variable_names_list.size();
for (int i = 0; i < number_of_variables; ++i)
{
const std::string& r_variable_input_name = input_variable_names_list[i];
const std::string& r_variable_1_output_name =
output_variable_1_names_list[i];
const std::string& r_variable_2_output_name =
output_variable_2_names_list[i];
ADD_TEMPORAL_NORM_METHOD_TWO_OUTPUT_VARIABLE_OBJECT(
rModelPart, rNormType, r_variable_input_name, EchoLevel,
r_variable_1_output_name, r_variable_2_output_name, method_list, NormMethod)
}
}
return method_list;
KRATOS_CATCH("");
}
};
} // namespace TemporalMethods
} // namespace Kratos
#endif // KRATOS_TEMPORAL_MIN_METHOD_H_INCLUDED |
weightedNorm2Many.c | /*
The MIT License (MIT)
Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
extern "C" void FUNC(weightedNorm2Many)(const dlong & Nblocks, const dlong & N,
const dlong & Nfields,
const dlong & offset,
const dfloat * __restrict__ cpu_w,
const dfloat * __restrict__ cpu_a,
dfloat * __restrict__ cpu_wa){
dfloat wa2 = 0;
#ifdef __NEKRS__OMP__
#pragma omp parallel for collapse(2) reduction(+:wa2)
#endif
for(int fld=0;fld<Nfields;fld++) {
for(int i=0;i<N;++i){
const dlong id = i + fld*offset;
const dfloat ai = cpu_a[id];
const dfloat wi = cpu_w[i];
wa2 += ai*ai*wi;
}
}
cpu_wa[0] = wa2;
}
|
BrightnessTemperatureBox.c |
// Re-write of find_HII_bubbles.c for being accessible within the MCMC
int ComputeBrightnessTemp(float redshift, struct UserParams *user_params, struct CosmoParams *cosmo_params,
struct AstroParams *astro_params, struct FlagOptions *flag_options,
struct TsBox *spin_temp, struct IonizedBox *ionized_box,
struct PerturbedField *perturb_field, struct BrightnessTemp *box) {
int status;
Try{ // Try block around whole function.
LOG_DEBUG("Starting Brightness Temperature calculation for redshift %f", redshift);
// Makes the parameter structs visible to a variety of functions/macros
// Do each time to avoid Python garbage collection issues
Broadcast_struct_global_PS(user_params,cosmo_params);
Broadcast_struct_global_UF(user_params,cosmo_params);
char wisdom_filename[500];
int i, ii, j, k, n_x, n_y, n_z;
float k_x, k_y, k_z;
double ave;
ave = 0.;
omp_set_num_threads(user_params->N_THREADS);
float *v = (float *) calloc(HII_TOT_FFT_NUM_PIXELS,sizeof(float));
float *vel_gradient = (float *) calloc(HII_TOT_FFT_NUM_PIXELS,sizeof(float));
float *x_pos = calloc(astro_params->N_RSD_STEPS,sizeof(float));
float *x_pos_offset = calloc(astro_params->N_RSD_STEPS,sizeof(float));
float **delta_T_RSD_LOS = (float **)calloc(user_params->N_THREADS,sizeof(float *));
for(i=0;i<user_params->N_THREADS;i++) {
delta_T_RSD_LOS[i] = (float *)calloc(user_params->HII_DIM,sizeof(float));
}
#pragma omp parallel shared(v,perturb_field) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->HII_DIM; i++){
for (j=0; j<user_params->HII_DIM; j++){
for (k=0; k<user_params->HII_DIM; k++){
*((float *)v + HII_R_FFT_INDEX(i,j,k)) = perturb_field->velocity[HII_R_INDEX(i,j,k)];
}
}
}
}
float d1_low, d1_high, d2_low, d2_high, gradient_component, min_gradient_component, subcell_width, x_val1, x_val2, subcell_displacement;
float RSD_pos_new, RSD_pos_new_boundary_low,RSD_pos_new_boundary_high, fraction_within, fraction_outside, cell_distance;
double dvdx, max_v_deriv;
float const_factor, T_rad, pixel_Ts_factor, pixel_x_HI, pixel_deltax, H;
init_ps();
T_rad = T_cmb*(1+redshift);
H = hubble(redshift);
const_factor = 27 * (cosmo_params->OMb*cosmo_params->hlittle*cosmo_params->hlittle/0.023) *
sqrt( (0.15/(cosmo_params->OMm)/(cosmo_params->hlittle)/(cosmo_params->hlittle)) * (1.+redshift)/10.0 );
/////////////////////////////// END INITIALIZATION /////////////////////////////////////////////
LOG_DEBUG("Performed Initialization.");
// ok, lets fill the delta_T box; which will be the same size as the bubble box
#pragma omp parallel shared(const_factor,perturb_field,ionized_box,box,redshift,spin_temp,T_rad) \
private(i,j,k,pixel_deltax,pixel_x_HI,pixel_Ts_factor) num_threads(user_params->N_THREADS)
{
#pragma omp for reduction(+:ave)
for (i=0; i<user_params->HII_DIM; i++){
for (j=0; j<user_params->HII_DIM; j++){
for (k=0; k<user_params->HII_DIM; k++){
pixel_deltax = perturb_field->density[HII_R_INDEX(i,j,k)];
pixel_x_HI = ionized_box->xH_box[HII_R_INDEX(i,j,k)];
box->brightness_temp[HII_R_INDEX(i,j,k)] = const_factor*pixel_x_HI*(1+pixel_deltax);
if (flag_options->USE_TS_FLUCT) {
if(flag_options->SUBCELL_RSD) {
// Converting the prefactors into the optical depth, tau. Factor of 1000 is the conversion of spin temperature from K to mK
box->brightness_temp[HII_R_INDEX(i,j,k)] *= (1. + redshift)/(1000.*spin_temp->Ts_box[HII_R_INDEX(i,j,k)]);
}
else {
pixel_Ts_factor = (1 - T_rad / spin_temp->Ts_box[HII_R_INDEX(i,j,k)]);
box->brightness_temp[HII_R_INDEX(i,j,k)] *= pixel_Ts_factor;
}
}
ave += box->brightness_temp[HII_R_INDEX(i,j,k)];
}
}
}
}
LOG_DEBUG("Filled delta_T.");
if(isfinite(ave)==0) {
LOG_ERROR("Average brightness temperature is infinite or NaN!");
// Throw(ParameterError);
Throw(InfinityorNaNError);
}
ave /= (float)HII_TOT_NUM_PIXELS;
x_val1 = 0.;
x_val2 = 1.;
subcell_width = (user_params->BOX_LEN/(float)user_params->HII_DIM)/(float)(astro_params->N_RSD_STEPS);
// now write out the delta_T box
if (global_params.T_USE_VELOCITIES){
ave = 0.;
memcpy(vel_gradient, v, sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS);
dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, vel_gradient);
#pragma omp parallel shared(vel_gradient) private(n_x,n_y,n_z,k_x,k_y,k_z) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (n_x=0; n_x<user_params->HII_DIM; n_x++){
if (n_x>HII_MIDDLE)
k_x =(n_x-user_params->HII_DIM) * DELTA_K; // wrap around for FFT convention
else
k_x = n_x * DELTA_K;
for (n_y=0; n_y<user_params->HII_DIM; n_y++){
if (n_y>HII_MIDDLE)
k_y =(n_y-user_params->HII_DIM) * DELTA_K;
else
k_y = n_y * DELTA_K;
for (n_z=0; n_z<=HII_MIDDLE; n_z++){
k_z = n_z * DELTA_K;
// take partial deriavative along the line of sight
*((fftwf_complex *) vel_gradient + HII_C_INDEX(n_x,n_y,n_z)) *= k_z*I/(float)HII_TOT_NUM_PIXELS;
}
}
}
}
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, vel_gradient);
// now add the velocity correction to the delta_T maps (only used for T_S >> T_CMB case).
max_v_deriv = fabs(global_params.MAX_DVDR*H);
if(flag_options->SUBCELL_RSD) {
// now add the velocity correction to the delta_T maps
min_gradient_component = 1.0;
#pragma omp parallel shared(vel_gradient,T_rad,redshift,spin_temp,box,max_v_deriv) \
private(i,j,k,gradient_component,dvdx) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->HII_DIM; i++){
for (j=0; j<user_params->HII_DIM; j++){
for (k=0; k<user_params->HII_DIM; k++){
gradient_component = fabs(vel_gradient[HII_R_FFT_INDEX(i,j,k)]/H + 1.0);
if(flag_options->USE_TS_FLUCT) {
// Calculate the brightness temperature, using the optical depth
if(gradient_component < FRACT_FLOAT_ERR) {
// Gradient component goes to zero, optical depth diverges.
// But, since we take exp(-tau), this goes to zero and (1 - exp(-tau)) goes to unity.
// Again, factors of 1000. are conversions from K to mK
box->brightness_temp[HII_R_INDEX(i,j,k)] = 1000.*(spin_temp->Ts_box[HII_R_INDEX(i,j,k)] - T_rad)/(1. + redshift);
}
else {
box->brightness_temp[HII_R_INDEX(i,j,k)] = (1. - exp(- box->brightness_temp[HII_R_INDEX(i,j,k)]/gradient_component ))*\
1000.*(spin_temp->Ts_box[HII_R_INDEX(i,j,k)] - T_rad)/(1. + redshift);
}
}
else {
dvdx = vel_gradient[HII_R_FFT_INDEX(i,j,k)];
// set maximum allowed gradient for this linear approximation
if (fabs(dvdx) > max_v_deriv){
if (dvdx < 0) dvdx = -max_v_deriv;
else dvdx = max_v_deriv;
// nonlin_ct++;
}
box->brightness_temp[HII_R_INDEX(i,j,k)] /= (dvdx/H + 1.0);
}
}
}
}
}
// normalised units of cell length. 0 equals beginning of cell, 1 equals end of cell
// These are the sub-cell central positions (x_pos_offset), and the corresponding normalised value (x_pos) between 0 and 1
for(ii=0;ii<astro_params->N_RSD_STEPS;ii++) {
x_pos_offset[ii] = subcell_width*(float)ii + subcell_width/2.;
x_pos[ii] = x_pos_offset[ii]/( user_params->BOX_LEN/(float)user_params->HII_DIM );
}
// Note to convert the velocity v, to a displacement in redshift space, convert from s -> r + (1+z)*v/H(z)
// To convert the velocity within the array v to km/s, it is a*dD/dt*delta. Where the scale factor a comes from the continuity equation
// The array v as defined in 21cmFAST is (ik/k^2)*dD/dt*delta, as it is defined as a comoving quantity (scale factor is implicit).
// However, the conversion between real and redshift space also picks up a scale factor, therefore the scale factors drop out and therefore
// the displacement of the sub-cells is purely determined from the array, v and the Hubble factor: v/H.
#pragma omp parallel shared(delta_T_RSD_LOS,box,ionized_box,v,x_val1,x_val2,x_pos,x_pos_offset,subcell_width) \
private(i,j,k,ii,d1_low,d2_low,d1_high,d2_high,subcell_displacement,RSD_pos_new,\
RSD_pos_new_boundary_low,RSD_pos_new_boundary_high,cell_distance,fraction_outside,fraction_within) \
num_threads(user_params->N_THREADS)
{
#pragma omp for reduction(+:ave)
for (i=0; i<user_params->HII_DIM; i++){
for (j=0; j<user_params->HII_DIM; j++){
// Generate the optical-depth for the specific line-of-sight with R.S.D
for(k=0;k<user_params->HII_DIM;k++) {
delta_T_RSD_LOS[omp_get_thread_num()][k] = 0.0;
}
for (k=0; k<user_params->HII_DIM; k++){
if((fabs(box->brightness_temp[HII_R_INDEX(i,j,k)]) >= FRACT_FLOAT_ERR) && \
(ionized_box->xH_box[HII_R_INDEX(i,j,k)] >= FRACT_FLOAT_ERR)) {
if(k==0) {
d1_low = v[HII_R_FFT_INDEX(i,j,user_params->HII_DIM-1)]/H;
d2_low = v[HII_R_FFT_INDEX(i,j,k)]/H;
}
else {
d1_low = v[HII_R_FFT_INDEX(i,j,k-1)]/H;
d2_low = v[HII_R_FFT_INDEX(i,j,k)]/H;
}
// Displacements (converted from velocity) for the original cell centres straddling half of the sub-cells (cell after)
if(k==(user_params->HII_DIM-1)) {
d1_high = v[HII_R_FFT_INDEX(i,j,k)]/H;
d2_high = v[HII_R_FFT_INDEX(i,j,0)]/H;
}
else {
d1_high = v[HII_R_FFT_INDEX(i,j,k)]/H;
d2_high = v[HII_R_FFT_INDEX(i,j,k+1)]/H;
}
for(ii=0;ii<astro_params->N_RSD_STEPS;ii++) {
// linearly interpolate the displacements to determine the corresponding displacements of the sub-cells
// Checking of 0.5 is for determining if we are left or right of the mid-point
// of the original cell (for the linear interpolation of the displacement)
// to use the appropriate cell
if(x_pos[ii] <= 0.5) {
subcell_displacement = d1_low + ( (x_pos[ii] + 0.5 ) - x_val1)*( d2_low - d1_low )/( x_val2 - x_val1 );
}
else {
subcell_displacement = d1_high + ( (x_pos[ii] - 0.5 ) - x_val1)*( d2_high - d1_high )/( x_val2 - x_val1 );
}
// The new centre of the sub-cell post R.S.D displacement.
// Normalised to units of cell width for determining it's displacement
RSD_pos_new = (x_pos_offset[ii] + subcell_displacement)/( user_params->BOX_LEN/((float)user_params->HII_DIM) );
// The sub-cell boundaries of the sub-cell, for determining the fractional
// contribution of the sub-cell to neighbouring cells when
// the sub-cell straddles two cell positions
RSD_pos_new_boundary_low = RSD_pos_new - (subcell_width/2.)/( user_params->BOX_LEN/((float)user_params->HII_DIM) );
RSD_pos_new_boundary_high = RSD_pos_new + (subcell_width/2.)/( user_params->BOX_LEN/((float)user_params->HII_DIM) );
if(RSD_pos_new_boundary_low >= 0.0 && RSD_pos_new_boundary_high < 1.0) {
// sub-cell has remained in the original cell (just add it back to the original cell)
delta_T_RSD_LOS[omp_get_thread_num()][k] += box->brightness_temp[HII_R_INDEX(i,j,k)]/((float)astro_params->N_RSD_STEPS);
}
else if(RSD_pos_new_boundary_low < 0.0 && RSD_pos_new_boundary_high < 0.0) {
// sub-cell has moved completely into a new cell (toward the observer)
// determine how far the sub-cell has moved in units of original cell boundary
cell_distance = ceil(fabs(RSD_pos_new_boundary_low))-1.;
// Determine the location of the sub-cell relative to the original cell binning
if(fabs(RSD_pos_new_boundary_high) > cell_distance) {
// sub-cell is entirely contained within the new cell (just add it to the new cell)
// check if the new cell position is at the edge of the box. If so, periodic boundary conditions
if(k<((int)cell_distance+1)) {
delta_T_RSD_LOS[omp_get_thread_num()][k-((int)cell_distance+1) + user_params->HII_DIM] += \
box->brightness_temp[HII_R_INDEX(i,j,k)]/((float)astro_params->N_RSD_STEPS);
}
else {
delta_T_RSD_LOS[omp_get_thread_num()][k-((int)cell_distance+1)] += \
box->brightness_temp[HII_R_INDEX(i,j,k)]/((float)astro_params->N_RSD_STEPS);
}
}
else {
// sub-cell is partially contained within the cell
// Determine the fraction of the sub-cell which is in either of the two original cells
fraction_outside = (fabs(RSD_pos_new_boundary_low) - cell_distance)\
/(subcell_width/( user_params->BOX_LEN/((float)user_params->HII_DIM) ));
fraction_within = 1. - fraction_outside;
// Check if the first part of the sub-cell is at the box edge
if(k<(((int)cell_distance))) {
delta_T_RSD_LOS[omp_get_thread_num()][k-((int)cell_distance) + user_params->HII_DIM] += \
fraction_within*box->brightness_temp[HII_R_INDEX(i,j,k)]/((float)astro_params->N_RSD_STEPS);
}
else {
delta_T_RSD_LOS[omp_get_thread_num()][k-((int)cell_distance)] += \
fraction_within*box->brightness_temp[HII_R_INDEX(i,j,k)]/((float)astro_params->N_RSD_STEPS);
}
// Check if the second part of the sub-cell is at the box edge
if(k<(((int)cell_distance + 1))) {
delta_T_RSD_LOS[omp_get_thread_num()][k-((int)cell_distance+1) + user_params->HII_DIM] += \
fraction_outside*box->brightness_temp[HII_R_INDEX(i,j,k)]/((float)astro_params->N_RSD_STEPS);
}
else {
delta_T_RSD_LOS[omp_get_thread_num()][k-((int)cell_distance+1)] += \
fraction_outside*box->brightness_temp[HII_R_INDEX(i,j,k)]/((float)astro_params->N_RSD_STEPS);
}
}
}
else if(RSD_pos_new_boundary_low < 0.0 && (RSD_pos_new_boundary_high > 0.0 && RSD_pos_new_boundary_high < 1.0)) {
// sub-cell has moved partially into a new cell (toward the observer)
// Determine the fraction of the sub-cell which is in either of the two original cells
fraction_within = RSD_pos_new_boundary_high/(subcell_width/( user_params->BOX_LEN/((float)user_params->HII_DIM) ));
fraction_outside = 1. - fraction_within;
// Check the periodic boundaries conditions and move the fraction of each sub-cell to the appropriate new cell
if(k==0) {
delta_T_RSD_LOS[omp_get_thread_num()][user_params->HII_DIM-1] += \
fraction_outside*box->brightness_temp[HII_R_INDEX(i,j,k)]/((float)astro_params->N_RSD_STEPS);
delta_T_RSD_LOS[omp_get_thread_num()][k] += \
fraction_within*box->brightness_temp[HII_R_INDEX(i,j,k)]/((float)astro_params->N_RSD_STEPS);
}
else {
delta_T_RSD_LOS[omp_get_thread_num()][k-1] += \
fraction_outside*box->brightness_temp[HII_R_INDEX(i,j,k)]/((float)astro_params->N_RSD_STEPS);
delta_T_RSD_LOS[omp_get_thread_num()][k] += \
fraction_within*box->brightness_temp[HII_R_INDEX(i,j,k)]/((float)astro_params->N_RSD_STEPS);
}
}
else if((RSD_pos_new_boundary_low >= 0.0 && RSD_pos_new_boundary_low < 1.0) && (RSD_pos_new_boundary_high >= 1.0)) {
// sub-cell has moved partially into a new cell (away from the observer)
// Determine the fraction of the sub-cell which is in either of the two original cells
fraction_outside = (RSD_pos_new_boundary_high - 1.)/(subcell_width/( user_params->BOX_LEN/((float)user_params->HII_DIM) ));
fraction_within = 1. - fraction_outside;
// Check the periodic boundaries conditions and move the fraction of each sub-cell to the appropriate new cell
if(k==(user_params->HII_DIM-1)) {
delta_T_RSD_LOS[omp_get_thread_num()][k] += \
fraction_within*box->brightness_temp[HII_R_INDEX(i,j,k)]/((float)astro_params->N_RSD_STEPS);
delta_T_RSD_LOS[omp_get_thread_num()][0] += \
fraction_outside*box->brightness_temp[HII_R_INDEX(i,j,k)]/((float)astro_params->N_RSD_STEPS);
}
else {
delta_T_RSD_LOS[omp_get_thread_num()][k] += \
fraction_within*box->brightness_temp[HII_R_INDEX(i,j,k)]/((float)astro_params->N_RSD_STEPS);
delta_T_RSD_LOS[omp_get_thread_num()][k+1] += \
fraction_outside*box->brightness_temp[HII_R_INDEX(i,j,k)]/((float)astro_params->N_RSD_STEPS);
}
}
else {
// sub-cell has moved completely into a new cell (away from the observer)
// determine how far the sub-cell has moved in units of original cell boundary
cell_distance = floor(fabs(RSD_pos_new_boundary_high));
if(RSD_pos_new_boundary_low >= cell_distance) {
// sub-cell is entirely contained within the new cell (just add it to the new cell)
// check if the new cell position is at the edge of the box. If so, periodic boundary conditions
if(k>(user_params->HII_DIM - 1 - (int)cell_distance)) {
delta_T_RSD_LOS[omp_get_thread_num()][k+(int)cell_distance - user_params->HII_DIM] += \
box->brightness_temp[HII_R_INDEX(i,j,k)]/((float)astro_params->N_RSD_STEPS);
}
else {
delta_T_RSD_LOS[omp_get_thread_num()][k+(int)cell_distance] += \
box->brightness_temp[HII_R_INDEX(i,j,k)]/((float)astro_params->N_RSD_STEPS);
}
}
else {
// sub-cell is partially contained within the cell
// Determine the fraction of the sub-cell which is in either of the two original cells
fraction_outside = (RSD_pos_new_boundary_high - cell_distance)/(subcell_width/( user_params->BOX_LEN/((float)user_params->HII_DIM) ));
fraction_within = 1. - fraction_outside;
// Check if the first part of the sub-cell is at the box edge
if(k>(user_params->HII_DIM - 1 - ((int)cell_distance-1))) {
delta_T_RSD_LOS[omp_get_thread_num()][k+(int)cell_distance-1 - user_params->HII_DIM] += \
fraction_within*box->brightness_temp[HII_R_INDEX(i,j,k)]/((float)astro_params->N_RSD_STEPS);
}
else {
delta_T_RSD_LOS[omp_get_thread_num()][k+(int)cell_distance-1] += \
fraction_within*box->brightness_temp[HII_R_INDEX(i,j,k)]/((float)astro_params->N_RSD_STEPS);
}
// Check if the second part of the sub-cell is at the box edge
if(k>(user_params->HII_DIM - 1 - ((int)cell_distance))) {
delta_T_RSD_LOS[omp_get_thread_num()][k+(int)cell_distance - user_params->HII_DIM] += \
fraction_outside*box->brightness_temp[HII_R_INDEX(i,j,k)]/((float)astro_params->N_RSD_STEPS);
}
else {
delta_T_RSD_LOS[omp_get_thread_num()][k+(int)cell_distance] += \
fraction_outside*box->brightness_temp[HII_R_INDEX(i,j,k)]/((float)astro_params->N_RSD_STEPS);
}
}
}
}
}
}
for(k=0;k<user_params->HII_DIM;k++) {
box->brightness_temp[HII_R_INDEX(i,j,k)] = delta_T_RSD_LOS[omp_get_thread_num()][k];
ave += delta_T_RSD_LOS[omp_get_thread_num()][k];
}
}
}
}
ave /= (float)HII_TOT_NUM_PIXELS;
}
else {
#pragma omp parallel shared(vel_gradient,box) private(i,j,k,dvdx) num_threads(user_params->N_THREADS)
{
#pragma omp for reduction(+:ave)
for (i=0; i<user_params->HII_DIM; i++){
for (j=0; j<user_params->HII_DIM; j++){
for (k=0; k<user_params->HII_DIM; k++){
dvdx = vel_gradient[HII_R_FFT_INDEX(i,j,k)];
// set maximum allowed gradient for this linear approximation
if (fabs(dvdx) > max_v_deriv){
if (dvdx < 0) dvdx = -max_v_deriv;
else dvdx = max_v_deriv;
// nonlin_ct++;
}
box->brightness_temp[HII_R_INDEX(i,j,k)] /= (dvdx/H + 1.0);
ave += box->brightness_temp[HII_R_INDEX(i,j,k)];
}
}
}
}
ave /= (HII_TOT_NUM_PIXELS+0.0);
}
}
LOG_DEBUG("Included velocities.");
if(isfinite(ave)==0) {
LOG_ERROR("Average brightness temperature (after including velocities) is infinite or NaN!");
// Throw(ParameterError);
Throw(InfinityorNaNError);
}
LOG_DEBUG("z = %.2f, ave Tb = %e", redshift, ave);
free(v);
free(vel_gradient);
free(x_pos);
free(x_pos_offset);
for(i=0;i<user_params->N_THREADS;i++) {
free(delta_T_RSD_LOS[i]);
}
free(delta_T_RSD_LOS);
fftwf_cleanup_threads();
fftwf_cleanup();
LOG_DEBUG("Cleaned up.");
} // End of try
Catch(status){
return(status);
}
return(0);
}
|
lsh_index.h | /***********************************************************************
* Software License Agreement (BSD License)
*
* Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
* Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
*
* THE BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*************************************************************************/
/***********************************************************************
* Author: Vincent Rabaud
*************************************************************************/
#ifndef FLANN_LSH_INDEX_H_
#define FLANN_LSH_INDEX_H_
#include <algorithm>
#include <cassert>
#include <cstring>
#include <map>
#include <vector>
#include "flann/general.h"
#include "flann/algorithms/nn_index.h"
#include "flann/util/matrix.h"
#include "flann/util/result_set.h"
#include "flann/util/heap.h"
#include "flann/util/lsh_table.h"
#include "flann/util/allocator.h"
#include "flann/util/random.h"
#include "flann/util/saving.h"
namespace flann
{
struct LshIndexParams : public IndexParams
{
LshIndexParams(unsigned int table_number = 12, unsigned int key_size = 20, unsigned int multi_probe_level = 2)
{
(* this)["algorithm"] = FLANN_INDEX_LSH;
// The number of hash tables to use
(*this)["table_number"] = table_number;
// The length of the key in the hash tables
(*this)["key_size"] = key_size;
// Number of levels to use in multi-probe (0 for standard LSH)
(*this)["multi_probe_level"] = multi_probe_level;
}
};
/**
* Locality-sensitive hashing index
*
* Contains the tables and other information for indexing a set of points
* for nearest-neighbor matching.
*/
template<typename Distance>
class LshIndex : public NNIndex<Distance>
{
public:
typedef typename Distance::ElementType ElementType;
typedef typename Distance::ResultType DistanceType;
typedef NNIndex<Distance> BaseClass;
/** Constructor
* @param params parameters passed to the LSH algorithm
* @param d the distance used
*/
LshIndex(const IndexParams& params = LshIndexParams(), Distance d = Distance()) :
BaseClass(params, d)
{
table_number_ = get_param<unsigned int>(index_params_,"table_number",12);
key_size_ = get_param<unsigned int>(index_params_,"key_size",20);
multi_probe_level_ = get_param<unsigned int>(index_params_,"multi_probe_level",2);
fill_xor_mask(0, key_size_, multi_probe_level_, xor_masks_);
}
/** Constructor
* @param input_data dataset with the input features
* @param params parameters passed to the LSH algorithm
* @param d the distance used
*/
LshIndex(const Matrix<ElementType>& input_data, const IndexParams& params = LshIndexParams(), Distance d = Distance()) :
BaseClass(params, d)
{
table_number_ = get_param<unsigned int>(index_params_,"table_number",12);
key_size_ = get_param<unsigned int>(index_params_,"key_size",20);
multi_probe_level_ = get_param<unsigned int>(index_params_,"multi_probe_level",2);
fill_xor_mask(0, key_size_, multi_probe_level_, xor_masks_);
setDataset(input_data);
}
LshIndex(const LshIndex& other) : BaseClass(other),
tables_(other.tables_),
table_number_(other.table_number_),
key_size_(other.key_size_),
multi_probe_level_(other.multi_probe_level_),
xor_masks_(other.xor_masks_)
{
}
LshIndex& operator=(LshIndex other)
{
this->swap(other);
return *this;
}
virtual ~LshIndex()
{
freeIndex();
}
BaseClass* clone() const
{
return new LshIndex(*this);
}
using BaseClass::buildIndex;
void addPoints(const Matrix<ElementType>& points, float rebuild_threshold = 2)
{
assert(points.cols==veclen_);
size_t old_size = size_;
extendDataset(points);
if (rebuild_threshold>1 && size_at_build_*rebuild_threshold<size_) {
buildIndex();
}
else {
for (unsigned int i = 0; i < table_number_; ++i) {
lsh::LshTable<ElementType>& table = tables_[i];
for (size_t i=old_size;i<size_;++i) {
table.add(i, points_[i]);
}
}
}
}
flann_algorithm_t getType() const
{
return FLANN_INDEX_LSH;
}
template<typename Archive>
void serialize(Archive& ar)
{
ar.setObject(this);
ar & *static_cast<NNIndex<Distance>*>(this);
ar & table_number_;
ar & key_size_;
ar & multi_probe_level_;
ar & xor_masks_;
ar & tables_;
if (Archive::is_loading::value) {
index_params_["algorithm"] = getType();
index_params_["table_number"] = table_number_;
index_params_["key_size"] = key_size_;
index_params_["multi_probe_level"] = multi_probe_level_;
}
}
#ifdef FLANN_SERIALIZATION_LZ4
void saveIndex(FILE* stream)
{
serialization::SaveArchive sa(stream);
sa & *this;
}
void loadIndex(FILE* stream)
{
serialization::LoadArchive la(stream);
la & *this;
}
#endif
/**
* Computes the index memory usage
* Returns: memory used by the index
*/
int usedMemory() const
{
return size_ * sizeof(int);
}
/**
* \brief Perform k-nearest neighbor search
* \param[in] queries The query points for which to find the nearest neighbors
* \param[out] indices The indices of the nearest neighbors found
* \param[out] dists Distances to the nearest neighbors found
* \param[in] knn Number of nearest neighbors to return
* \param[in] params Search parameters
*/
int knnSearch(const Matrix<ElementType>& queries,
Matrix<size_t>& indices,
Matrix<DistanceType>& dists,
size_t knn,
const SearchParams& params) const
{
assert(queries.cols == veclen_);
assert(indices.rows >= queries.rows);
assert(dists.rows >= queries.rows);
assert(indices.cols >= knn);
assert(dists.cols >= knn);
int count = 0;
if (params.use_heap==FLANN_True) {
#pragma omp parallel num_threads(params.cores)
{
KNNUniqueResultSet<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
resultSet.copy(indices[i], dists[i], n, params.sorted);
indices_to_ids(indices[i], indices[i], n);
count += n;
}
}
}
else {
#pragma omp parallel num_threads(params.cores)
{
KNNResultSet<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
resultSet.copy(indices[i], dists[i], n, params.sorted);
indices_to_ids(indices[i], indices[i], n);
count += n;
}
}
}
return count;
}
/**
* \brief Perform k-nearest neighbor search
* \param[in] queries The query points for which to find the nearest neighbors
* \param[out] indices The indices of the nearest neighbors found
* \param[out] dists Distances to the nearest neighbors found
* \param[in] knn Number of nearest neighbors to return
* \param[in] params Search parameters
*/
int knnSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<size_t> >& indices,
std::vector<std::vector<DistanceType> >& dists,
size_t knn,
const SearchParams& params) const
{
assert(queries.cols == veclen_);
if (indices.size() < queries.rows ) indices.resize(queries.rows);
if (dists.size() < queries.rows ) dists.resize(queries.rows);
int count = 0;
if (params.use_heap==FLANN_True) {
#pragma omp parallel num_threads(params.cores)
{
KNNUniqueResultSet<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
indices[i].resize(n);
dists[i].resize(n);
if (n > 0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
count += n;
}
}
}
else {
#pragma omp parallel num_threads(params.cores)
{
KNNResultSet<DistanceType> resultSet(knn);
#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params);
size_t n = std::min(resultSet.size(), knn);
indices[i].resize(n);
dists[i].resize(n);
if (n > 0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
count += n;
}
}
}
return count;
}
/**
* Find set of nearest neighbors to vec. Their indices are stored inside
* the result object.
*
* Params:
* result = the result object in which the indices of the nearest-neighbors are stored
* vec = the vector for which to search the nearest neighbors
* maxCheck = the maximum number of restarts (in a best-bin-first manner)
*/
void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& /*searchParams*/) const
{
getNeighbors(vec, result);
}
protected:
/**
* Builds the index
*/
void buildIndexImpl()
{
tables_.resize(table_number_);
std::vector<std::pair<size_t,ElementType*> > features;
features.reserve(points_.size());
for (size_t i=0;i<points_.size();++i) {
features.push_back(std::make_pair(i, points_[i]));
}
for (unsigned int i = 0; i < table_number_; ++i) {
lsh::LshTable<ElementType>& table = tables_[i];
table = lsh::LshTable<ElementType>(veclen_, key_size_);
// Add the features to the table
table.add(features);
}
}
void freeIndex()
{
/* nothing to do here */
}
private:
/** Defines the comparator on score and index
*/
typedef std::pair<float, unsigned int> ScoreIndexPair;
struct SortScoreIndexPairOnSecond
{
bool operator()(const ScoreIndexPair& left, const ScoreIndexPair& right) const
{
return left.second < right.second;
}
};
/** Fills the different xor masks to use when getting the neighbors in multi-probe LSH
* @param key the key we build neighbors from
* @param lowest_index the lowest index of the bit set
* @param level the multi-probe level we are at
* @param xor_masks all the xor mask
*/
void fill_xor_mask(lsh::BucketKey key, int lowest_index, unsigned int level,
std::vector<lsh::BucketKey>& xor_masks)
{
xor_masks.push_back(key);
if (level == 0) return;
for (int index = lowest_index - 1; index >= 0; --index) {
// Create a new key
lsh::BucketKey new_key = key | (lsh::BucketKey(1) << index);
fill_xor_mask(new_key, index, level - 1, xor_masks);
}
}
/** Performs the approximate nearest-neighbor search.
* @param vec the feature to analyze
* @param do_radius flag indicating if we check the radius too
* @param radius the radius if it is a radius search
* @param do_k flag indicating if we limit the number of nn
* @param k_nn the number of nearest neighbors
* @param checked_average used for debugging
*/
void getNeighbors(const ElementType* vec, bool do_radius, float radius, bool do_k, unsigned int k_nn,
float& checked_average)
{
static std::vector<ScoreIndexPair> score_index_heap;
if (do_k) {
unsigned int worst_score = std::numeric_limits<unsigned int>::max();
typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin();
typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end();
for (; table != table_end; ++table) {
size_t key = table->getKey(vec);
std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin();
std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end();
for (; xor_mask != xor_mask_end; ++xor_mask) {
size_t sub_key = key ^ (*xor_mask);
const lsh::Bucket* bucket = table->getBucketFromKey(sub_key);
if (bucket == 0) continue;
// Go over each descriptor index
std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin();
std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end();
DistanceType hamming_distance;
// Process the rest of the candidates
for (; training_index < last_training_index; ++training_index) {
if (removed_ && removed_points_.test(*training_index)) continue;
hamming_distance = distance_(vec, points_[*training_index].point, veclen_);
if (hamming_distance < worst_score) {
// Insert the new element
score_index_heap.push_back(ScoreIndexPair(hamming_distance, training_index));
std::push_heap(score_index_heap.begin(), score_index_heap.end());
if (score_index_heap.size() > (unsigned int)k_nn) {
// Remove the highest distance value as we have too many elements
std::pop_heap(score_index_heap.begin(), score_index_heap.end());
score_index_heap.pop_back();
// Keep track of the worst score
worst_score = score_index_heap.front().first;
}
}
}
}
}
}
else {
typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin();
typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end();
for (; table != table_end; ++table) {
size_t key = table->getKey(vec);
std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin();
std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end();
for (; xor_mask != xor_mask_end; ++xor_mask) {
size_t sub_key = key ^ (*xor_mask);
const lsh::Bucket* bucket = table->getBucketFromKey(sub_key);
if (bucket == 0) continue;
// Go over each descriptor index
std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin();
std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end();
DistanceType hamming_distance;
// Process the rest of the candidates
for (; training_index < last_training_index; ++training_index) {
if (removed_ && removed_points_.test(*training_index)) continue;
// Compute the Hamming distance
hamming_distance = distance_(vec, points_[*training_index].point, veclen_);
if (hamming_distance < radius) score_index_heap.push_back(ScoreIndexPair(hamming_distance, training_index));
}
}
}
}
}
/** Performs the approximate nearest-neighbor search.
* This is a slower version than the above as it uses the ResultSet
* @param vec the feature to analyze
*/
void getNeighbors(const ElementType* vec, ResultSet<DistanceType>& result) const
{
typename std::vector<lsh::LshTable<ElementType> >::const_iterator table = tables_.begin();
typename std::vector<lsh::LshTable<ElementType> >::const_iterator table_end = tables_.end();
for (; table != table_end; ++table) {
size_t key = table->getKey(vec);
std::vector<lsh::BucketKey>::const_iterator xor_mask = xor_masks_.begin();
std::vector<lsh::BucketKey>::const_iterator xor_mask_end = xor_masks_.end();
for (; xor_mask != xor_mask_end; ++xor_mask) {
size_t sub_key = key ^ (*xor_mask);
const lsh::Bucket* bucket = table->getBucketFromKey(sub_key);
if (bucket == 0) continue;
// Go over each descriptor index
std::vector<lsh::FeatureIndex>::const_iterator training_index = bucket->begin();
std::vector<lsh::FeatureIndex>::const_iterator last_training_index = bucket->end();
DistanceType hamming_distance;
// Process the rest of the candidates
for (; training_index < last_training_index; ++training_index) {
if (removed_ && removed_points_.test(*training_index)) continue;
// Compute the Hamming distance
hamming_distance = distance_(vec, points_[*training_index], veclen_);
result.addPoint(hamming_distance, *training_index);
}
}
}
}
void swap(LshIndex& other)
{
BaseClass::swap(other);
std::swap(tables_, other.tables_);
std::swap(size_at_build_, other.size_at_build_);
std::swap(table_number_, other.table_number_);
std::swap(key_size_, other.key_size_);
std::swap(multi_probe_level_, other.multi_probe_level_);
std::swap(xor_masks_, other.xor_masks_);
}
/** The different hash tables */
std::vector<lsh::LshTable<ElementType> > tables_;
/** table number */
unsigned int table_number_;
/** key size */
unsigned int key_size_;
/** How far should we look for neighbors in multi-probe LSH */
unsigned int multi_probe_level_;
/** The XOR masks to apply to a key to get the neighboring buckets */
std::vector<lsh::BucketKey> xor_masks_;
USING_BASECLASS_SYMBOLS
};
}
#endif //FLANN_LSH_INDEX_H_
|
ast-dump-openmp-for-simd.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp for simd
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp for simd
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp for simd collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp for simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp for simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-for-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPForSimdDirective {{.*}} <line:4:1, col:21>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPForSimdDirective {{.*}} <line:10:1, col:21>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPForSimdDirective {{.*}} <line:17:1, col:33>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:22, col:32>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:31> 'int'
// CHECK-NEXT: | | |-value: Int 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:31> 'int' 1
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPForSimdDirective {{.*}} <line:24:1, col:33>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:22, col:32>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:31> 'int'
// CHECK-NEXT: | | |-value: Int 2
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:31> 'int' 2
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPForSimdDirective {{.*}} <line:31:1, col:33>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:22, col:32>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:31> 'int'
// CHECK-NEXT: | |-value: Int 2
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:31> 'int' 2
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
Par-15-ParConsecutiveNoWaitFor.c |
int main(int argc, char **argv) {
int a[4] = {1,2,3,4};
int b[4] = {0, 0, 0, 0};
#pragma omp parallel
{
#pragma omp for nowait
for (int i = 0; i < 4; ++i) {
a[i] = 3*a[i];
}
#pragma omp for
for (int j = 0; j < 4; ++j) {
b[j] = a[j];
}
}
return 0;
}
|
GB_helper.c | //------------------------------------------------------------------------------
// GB_helper.c: helper functions for @GrB interface
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// TODO::: move these into the @GrB interface instead
// These functions are only used by the @GrB interface for
// SuiteSparse:GraphBLAS.
#include "GB_helper.h"
//------------------------------------------------------------------------------
// GB_NTHREADS: determine the number of threads to use
//------------------------------------------------------------------------------
#define GB_NTHREADS(work) \
int nthreads_max = GB_Global_nthreads_max_get ( ) ; \
double chunk = GB_Global_chunk_get ( ) ; \
int nthreads = GB_nthreads (work, chunk, nthreads_max) ;
//------------------------------------------------------------------------------
// GB_ALLOCATE_WORK: allocate per-thread workspace
//------------------------------------------------------------------------------
#define GB_ALLOCATE_WORK(work_type) \
size_t Work_size ; \
work_type *Work = GB_MALLOC_WORK (nthreads, work_type, &Work_size) ; \
if (Work == NULL) return (false) ;
//------------------------------------------------------------------------------
// GB_FREE_WORKSPACE: free per-thread workspace
//------------------------------------------------------------------------------
#define GB_FREE_WORKSPACE \
GB_FREE_WORK (&Work, Work_size) ;
//------------------------------------------------------------------------------
// GB_helper0: get the current wall-clock time from OpenMP
//------------------------------------------------------------------------------
double GB_helper0 (void)
{
return (GB_OPENMP_GET_WTIME) ;
}
//------------------------------------------------------------------------------
// GB_helper1: convert 0-based indices to 1-based for gbextracttuples
//------------------------------------------------------------------------------
void GB_helper1 // convert zero-based indices to one-based
(
double *restrict I_double, // output array
const GrB_Index *restrict I, // input array
int64_t nvals // size of input and output arrays
)
{
GB_NTHREADS (nvals) ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < nvals ; k++)
{
I_double [k] = (double) (I [k] + 1) ;
}
}
//------------------------------------------------------------------------------
// GB_helper1i: convert 0-based indices to 1-based for gbextracttuples
//------------------------------------------------------------------------------
void GB_helper1i // convert zero-based indices to one-based
(
int64_t *restrict I, // input/output array
int64_t nvals // size of input/output array
)
{
GB_NTHREADS (nvals) ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < nvals ; k++)
{
I [k] ++ ;
}
}
//------------------------------------------------------------------------------
// GB_helper3: convert 1-based indices to 0-based for gb_mxarray_to_list
//------------------------------------------------------------------------------
bool GB_helper3 // return true if OK, false on error
(
int64_t *restrict List, // size len, output array
const double *restrict List_double, // size len, input array
int64_t len,
int64_t *List_max // also compute the max entry in the list
)
{
GB_NTHREADS (len) ;
ASSERT (List != NULL) ;
ASSERT (List_double != NULL) ;
ASSERT (List_max != NULL) ;
bool ok = true ;
int64_t listmax = -1 ;
GB_ALLOCATE_WORK (int64_t) ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
bool my_ok = true ;
int64_t k1, k2, my_listmax = -1 ;
GB_PARTITION (k1, k2, len, tid, nthreads) ;
for (int64_t k = k1 ; k < k2 ; k++)
{
double x = List_double [k] ;
int64_t i = (int64_t) x ;
my_ok = my_ok && (x == (double) i) ;
my_listmax = GB_IMAX (my_listmax, i) ;
List [k] = i - 1 ;
}
// rather than create a separate per-thread boolean workspace, just
// use a sentinal value of INT64_MIN if non-integer indices appear
// in List_double.
Work [tid] = my_ok ? my_listmax : INT64_MIN ;
}
// wrapup
for (tid = 0 ; tid < nthreads ; tid++)
{
listmax = GB_IMAX (listmax, Work [tid]) ;
ok = ok && (Work [tid] != INT64_MIN) ;
}
GB_FREE_WORKSPACE ;
(*List_max) = listmax ;
return (ok) ;
}
//------------------------------------------------------------------------------
// GB_helper3i: convert 1-based indices to 0-based for gb_mxarray_to_list
//------------------------------------------------------------------------------
bool GB_helper3i // return true if OK, false on error
(
int64_t *restrict List, // size len, output array
const int64_t *restrict List_int64, // size len, input array
int64_t len,
int64_t *List_max // also compute the max entry in the list
)
{
GB_NTHREADS (len) ;
int64_t listmax = -1 ;
GB_ALLOCATE_WORK (int64_t) ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t k1, k2, my_listmax = -1 ;
GB_PARTITION (k1, k2, len, tid, nthreads) ;
for (int64_t k = k1 ; k < k2 ; k++)
{
int64_t i = List_int64 [k] ;
my_listmax = GB_IMAX (my_listmax, i) ;
List [k] = i - 1 ;
}
Work [tid] = my_listmax ;
}
// wrapup
for (tid = 0 ; tid < nthreads ; tid++)
{
listmax = GB_IMAX (listmax, Work [tid]) ;
}
GB_FREE_WORKSPACE ;
(*List_max) = listmax ;
return (true) ;
}
//------------------------------------------------------------------------------
// GB_helper4: find the max entry in an index list for gbbuild
//------------------------------------------------------------------------------
bool GB_helper4 // return true if OK, false on error
(
const GrB_Index *restrict I, // array of size len
const int64_t len,
GrB_Index *List_max // find max (I) + 1
)
{
GB_NTHREADS (len) ;
GrB_Index listmax = 0 ;
GB_ALLOCATE_WORK (GrB_Index) ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t k1, k2 ;
GrB_Index my_listmax = 0 ;
GB_PARTITION (k1, k2, len, tid, nthreads) ;
for (int64_t k = k1 ; k < k2 ; k++)
{
my_listmax = GB_IMAX (my_listmax, I [k]) ;
}
Work [tid] = my_listmax ;
}
// wrapup
for (tid = 0 ; tid < nthreads ; tid++)
{
listmax = GB_IMAX (listmax, Work [tid]) ;
}
GB_FREE_WORKSPACE ;
if (len > 0) listmax++ ;
(*List_max) = listmax ;
return (true) ;
}
//------------------------------------------------------------------------------
// GB_helper5: construct pattern of S for gblogassign
//------------------------------------------------------------------------------
void GB_helper5 // construct pattern of S
(
GrB_Index *restrict Si, // array of size anz
GrB_Index *restrict Sj, // array of size anz
const GrB_Index *restrict Mi, // array of size mnz, M->i, may be NULL
const GrB_Index *restrict Mj, // array of size mnz,
const int64_t mvlen, // M->vlen
GrB_Index *restrict Ai, // array of size anz, A->i, may be NULL
const int64_t avlen, // M->vlen
const GrB_Index anz
)
{
GB_NTHREADS (anz) ;
ASSERT (Mj != NULL) ;
ASSERT (Si != NULL) ;
ASSERT (Sj != NULL) ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < anz ; k++)
{
int64_t i = GBI (Ai, k, avlen) ;
Si [k] = GBI (Mi, i, mvlen) ;
Sj [k] = Mj [i] ;
}
}
//------------------------------------------------------------------------------
// GB_helper7: Kx = uint64 (0:mnz-1), for gblogextract
//------------------------------------------------------------------------------
// TODO: use GrB_apply with a positional operator instead
void GB_helper7 // Kx = uint64 (0:mnz-1)
(
uint64_t *restrict Kx, // array of size mnz
const GrB_Index mnz
)
{
GB_NTHREADS (mnz) ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < mnz ; k++)
{
Kx [k] = k ;
}
}
//------------------------------------------------------------------------------
// GB_helper8: expand a scalar into an array for gbbuild
//------------------------------------------------------------------------------
// TODO: use GrB_assign instead
void GB_helper8
(
GB_void *C, // output array of size nvals * s
GB_void *A, // input scalar of size s
GrB_Index nvals, // size of C
size_t s // size of each scalar
)
{
GB_NTHREADS (nvals) ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < nvals ; k++)
{
// C [k] = A [0]
memcpy (C + k * s, A, s) ;
}
}
//------------------------------------------------------------------------------
// GB_helper10: compute norm (x-y,p) of two dense FP32 or FP64 vectors
//------------------------------------------------------------------------------
// p can be:
// 0 or 2: 2-norm, sqrt (sum ((x-y).^2))
// 1: 1-norm, sum (abs (x-y))
// INT64_MAX inf-norm, max (abs (x-y))
// INT64_MIN (-inf)-norm, min (abs (x-y))
// other: p-norm not yet computed
double GB_helper10 // norm (x-y,p), or -1 on error
(
GB_void *x_arg, // float or double, depending on type parameter
bool x_iso, // true if x is iso
GB_void *y_arg, // same type as x, treat as zero if NULL
bool y_iso, // true if x is iso
GrB_Type type, // GrB_FP32 or GrB_FP64
int64_t p, // 0, 1, 2, INT64_MIN, or INT64_MAX
GrB_Index n
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
if (!(type == GrB_FP32 || type == GrB_FP64))
{
// type of x and y must be GrB_FP32 or GrB_FP64
return ((double) -1) ;
}
if (n == 0)
{
return ((double) 0) ;
}
//--------------------------------------------------------------------------
// allocate workspace and determine # of threads to use
//--------------------------------------------------------------------------
GB_NTHREADS (n) ;
GB_ALLOCATE_WORK (double) ;
#define X(k) x [x_iso ? 0 : k]
#define Y(k) y [y_iso ? 0 : k]
//--------------------------------------------------------------------------
// each thread computes its partial norm
//--------------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t k1, k2 ;
GB_PARTITION (k1, k2, n, tid, nthreads) ;
if (type == GrB_FP32)
{
//------------------------------------------------------------------
// FP32 case
//------------------------------------------------------------------
float my_s = 0 ;
const float *x = (float *) x_arg ;
const float *y = (float *) y_arg ;
switch (p)
{
case 0: // Frobenius norm
case 2: // 2-norm: sqrt of sum of (x-y).^2
{
if (y == NULL)
{
for (int64_t k = k1 ; k < k2 ; k++)
{
float t = X (k) ;
my_s += (t*t) ;
}
}
else
{
for (int64_t k = k1 ; k < k2 ; k++)
{
float t = (X (k) - Y (k)) ;
my_s += (t*t) ;
}
}
}
break ;
case 1: // 1-norm: sum (abs (x-y))
{
if (y == NULL)
{
for (int64_t k = k1 ; k < k2 ; k++)
{
my_s += fabsf (X (k)) ;
}
}
else
{
for (int64_t k = k1 ; k < k2 ; k++)
{
my_s += fabsf (X (k) - Y (k)) ;
}
}
}
break ;
case INT64_MAX: // inf-norm: max (abs (x-y))
{
if (y == NULL)
{
for (int64_t k = k1 ; k < k2 ; k++)
{
my_s = fmaxf (my_s, fabsf (X (k))) ;
}
}
else
{
for (int64_t k = k1 ; k < k2 ; k++)
{
my_s = fmaxf (my_s, fabsf (X (k) - Y (k))) ;
}
}
}
break ;
case INT64_MIN: // (-inf)-norm: min (abs (x-y))
{
my_s = INFINITY ;
if (y == NULL)
{
for (int64_t k = k1 ; k < k2 ; k++)
{
my_s = fminf (my_s, fabsf (X (k))) ;
}
}
else
{
for (int64_t k = k1 ; k < k2 ; k++)
{
my_s = fminf (my_s, fabsf (X (k) - Y (k))) ;
}
}
}
break ;
default: ; // p-norm not yet supported
}
Work [tid] = (double) my_s ;
}
else
{
//------------------------------------------------------------------
// FP64 case
//------------------------------------------------------------------
double my_s = 0 ;
const double *x = (double *) x_arg ;
const double *y = (double *) y_arg ;
switch (p)
{
case 0: // Frobenius norm
case 2: // 2-norm: sqrt of sum of (x-y).^2
{
if (y == NULL)
{
for (int64_t k = k1 ; k < k2 ; k++)
{
double t = X (k) ;
my_s += (t*t) ;
}
}
else
{
for (int64_t k = k1 ; k < k2 ; k++)
{
double t = (X (k) - Y (k)) ;
my_s += (t*t) ;
}
}
}
break ;
case 1: // 1-norm: sum (abs (x-y))
{
if (y == NULL)
{
for (int64_t k = k1 ; k < k2 ; k++)
{
my_s += fabs (X (k)) ;
}
}
else
{
for (int64_t k = k1 ; k < k2 ; k++)
{
my_s += fabs (X (k) - Y (k)) ;
}
}
}
break ;
case INT64_MAX: // inf-norm: max (abs (x-y))
{
if (y == NULL)
{
for (int64_t k = k1 ; k < k2 ; k++)
{
my_s = fmax (my_s, fabs (X (k))) ;
}
}
else
{
for (int64_t k = k1 ; k < k2 ; k++)
{
my_s = fmax (my_s, fabs (X (k) - Y (k))) ;
}
}
}
break ;
case INT64_MIN: // (-inf)-norm: min (abs (x-y))
{
my_s = INFINITY ;
if (y == NULL)
{
for (int64_t k = k1 ; k < k2 ; k++)
{
my_s = fmin (my_s, fabs (X (k))) ;
}
}
else
{
for (int64_t k = k1 ; k < k2 ; k++)
{
my_s = fmin (my_s, fabs (X (k) - Y (k))) ;
}
}
}
break ;
default: ; // p-norm not yet supported
}
Work [tid] = my_s ;
}
}
//--------------------------------------------------------------------------
// combine results of each thread
//--------------------------------------------------------------------------
double s = 0 ;
switch (p)
{
case 0: // Frobenius norm
case 2: // 2-norm: sqrt of sum of (x-y).^2
{
for (int64_t tid = 0 ; tid < nthreads ; tid++)
{
s += Work [tid] ;
}
s = sqrt (s) ;
}
break ;
case 1: // 1-norm: sum (abs (x-y))
{
for (int64_t tid = 0 ; tid < nthreads ; tid++)
{
s += Work [tid] ;
}
}
break ;
case INT64_MAX: // inf-norm: max (abs (x-y))
{
for (int64_t tid = 0 ; tid < nthreads ; tid++)
{
s = fmax (s, Work [tid]) ;
}
}
break ;
case INT64_MIN: // (-inf)-norm: min (abs (x-y))
{
s = Work [0] ;
for (int64_t tid = 1 ; tid < nthreads ; tid++)
{
s = fmin (s, Work [tid]) ;
}
}
break ;
default: // p-norm not yet supported
s = -1 ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORKSPACE ;
return (s) ;
}
|
GB_unaryop__lnot_uint32_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint32_int16
// op(A') function: GB_tran__lnot_uint32_int16
// C type: uint32_t
// A type: int16_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT32 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint32_int16
(
uint32_t *Cx, // Cx and Ax may be aliased
int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint32_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_task_priority2.c | // RUN: %libomp-compile && env OMP_MAX_TASK_PRIORITY='2' %libomp-run
// Test OMP 4.5 task priorities
// Higher priority task supposed to be executed before lower priority task.
#include <stdio.h>
#include <omp.h>
#include "omp_my_sleep.h"
// delay(n) - sleep n ms
#define delay(n) my_sleep(((double)n)/1000.0)
int main ( void ) {
int passed;
passed = (omp_get_max_task_priority() == 2);
printf("Got %d max priority via env\n", omp_get_max_task_priority());
if(!passed) {
printf( "failed\n" );
return 1;
}
printf("parallel 1 spawns 4 tasks for primary thread to execute\n");
#pragma omp parallel num_threads(2)
{
int th = omp_get_thread_num();
if (th == 0) // primary thread
{
#pragma omp task priority(1)
{ // middle priority
int val, t = omp_get_thread_num();
#pragma omp atomic capture
val = passed++;
printf("P1: val = %d, thread gen %d, thread exe %d\n", val, th, t);
delay(10); // sleep 10 ms
}
#pragma omp task priority(2)
{ // high priority
int val, t = omp_get_thread_num();
#pragma omp atomic capture
val = passed++;
printf("P2: val = %d, thread gen %d, thread exe %d\n", val, th, t);
delay(20); // sleep 20 ms
}
#pragma omp task priority(0)
{ // low priority specified explicitly
int val, t = omp_get_thread_num();
#pragma omp atomic capture
val = passed++;
printf("P0exp: val = %d, thread gen %d, thread exe %d\n", val, th, t);
delay(1); // sleep 1 ms
}
#pragma omp task
{ // low priority by default
int val, t = omp_get_thread_num();
#pragma omp atomic capture
val = passed++;
printf("P0imp: val = %d, thread gen %d, thread exe %d\n", val, th, t);
delay(1); // sleep 1 ms
}
} else {
// wait for the primary thread to finish all tasks
int wait = 0;
do {
delay(5);
#pragma omp atomic read
wait = passed;
} while (wait < 5);
}
}
printf("parallel 2 spawns 4 tasks for worker thread to execute\n");
#pragma omp parallel num_threads(2)
{
int th = omp_get_thread_num();
if (th == 0) // primary thread
{
#pragma omp task priority(1)
{ // middle priority
int val, t = omp_get_thread_num();
#pragma omp atomic capture
val = passed++;
printf("P1: val = %d, thread gen %d, thread exe %d\n", val, th, t);
delay(10); // sleep 10 ms
}
#pragma omp task priority(2)
{ // high priority
int val, t = omp_get_thread_num();
#pragma omp atomic capture
val = passed++;
printf("P2: val = %d, thread gen %d, thread exe %d\n", val, th, t);
delay(20); // sleep 20 ms
}
#pragma omp task priority(0)
{ // low priority specified explicitly
int val, t = omp_get_thread_num();
#pragma omp atomic capture
val = passed++;
printf("P0exp: val = %d, thread gen %d, thread exe %d\n", val, th, t);
delay(1); // sleep 1 ms
}
#pragma omp task
{ // low priority by default
int val, t = omp_get_thread_num();
#pragma omp atomic capture
val = passed++;
printf("P0imp: val = %d, thread gen %d, thread exe %d\n", val, th, t);
delay(1); // sleep 1 ms
}
// signal creation of all tasks: passed = 5 + 1 = 6
#pragma omp atomic
passed++;
// wait for completion of all 4 tasks
int wait = 0;
do {
delay(5);
#pragma omp atomic read
wait = passed;
} while (wait < 10); // passed = 6 + 4 = 10
} else {
// wait for the primary thread to create all tasks
int wait = 0;
do {
delay(5);
#pragma omp atomic read
wait = passed;
} while (wait < 6);
// go execute 4 tasks created by primary thread
}
}
if (passed != 10) {
printf("failed, passed = %d (should be 10)\n", passed);
return 1;
}
printf("passed\n");
return 0;
}
// CHECK: parallel 1
// CHECK-NEXT: P2
// CHECK-NEXT: P1
// CHECK-NEXT: P0
// CHECK-NEXT: P0
// CHECK-NEXT: parallel 2
// CHECK-NEXT: P2
// CHECK-NEXT: P1
// CHECK-NEXT: P0
// CHECK-NEXT: P0
// CHECK: passed
|
GB_unaryop__abs_int8_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int8_int16
// op(A') function: GB_tran__abs_int8_int16
// C type: int8_t
// A type: int16_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, aij) \
int8_t z = (int8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT8 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int8_int16
(
int8_t *Cx, // Cx and Ax may be aliased
int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int8_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
lu-dep-timed.c | /*
* Copyright (C) 2010 Computer Architecture and Parallel Systems Laboratory (CAPSL)
*
* Original author: Elkin E. Garcia
* E-Mail: egarcia@capsl.udel.edu
*
* License
* Redistribution of this code is allowed only after an explicit permission is
* given by the original author or CAPSL and this license should be included in
* all files, either existing or new ones. Modifying the code is allowed, but
* the original author and/or CAPSL must be notified about these modifications.
* The original author and/or CAPSL is also allowed to use these modifications
* and publicly report results that include them. Appropriate acknowledgments
* to everyone who made the modifications will be added in this case.
*
* Warranty
*
* THIS CODE IS PROVIDED ON AN "AS IS"
* THIS CODE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND,
* EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT
* THE COVERED CODE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR
* PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
* OF THE COVERED CODE IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN
* ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME
* THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
* OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY
* COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
*/
/*
Static blocked LU Decomposition
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <omp.h>
#include <math.h>
#define CHECK 1
void Print_Matrix (double *v, int M, int N);
void InitMatrix(double *A, int N);
void ProcessDiagonalBlock(double *A, int L1, int N);
void ProcessBlockOnRow(double *A, double *D, int L1, int L3, int N);
void ProcessBlockOnColumn(double *A, double *D, int L1, int L2, int N);
void ProcessInnerBlock(double *A, double *R, double *C, int L1, int L2, int L3, int N);
void stepLU(double *A, int Block, int offset, int N );
void InitMatrix2(double *A, int N);
void InitMatrix3(double *A, int N);
void stage1(double *A, int offset, int *sizedim, int *start, int N, int M);
void stage2(double *A, int offset, int *sizedim, int *start, int N, int M);
void stage3(double *A, int offset, int *sizedim, int *start, int N, int M);
void lu2 (double *A, int N);
int itr = 0;
double total = 0;
double task_total = 0;
double wait_total = 0;
int start_flag = 0;
unsigned long GetTickCount()
{
struct timeval tv;
gettimeofday(&tv,NULL);
return (tv.tv_sec * 1000000) + (tv.tv_usec);
}
int main (int argc, char *argv[])
{
double *A,*A2,*L,*U, temp2;
int i,j,k;
int temp=0;
int offset = 0;
double t1,t2,t3,t4;
int N = 100;
int Block = 1;
int M=1; //number of blocks per dimension
if( argc > 1 )
N = atoi(argv[1]);
if( argc > 2 )
M = atoi(argv[2]);
A = (double *)malloc (N*N*sizeof(double));
A2 = (double *)malloc (N*N*sizeof(double));
L = (double *)malloc (N*N*sizeof(double));
U = (double *)malloc (N*N*sizeof(double));
if( A==NULL || A2==NULL || L==NULL || U==NULL ) {
printf("Can't allocate memory\n");
exit(1);
}
int *sizedim;
int *start;
int R; //Remain
sizedim = (int*)malloc(M*sizeof(int));
start = (int*)malloc(M*sizeof(int));
R = N;
t1 = GetTickCount();
#pragma omp parallel
{
#pragma omp master
{
while (N-offset>M){
for (i=0;i<M;i++){
if (i<R%M){
sizedim[i]=R/M+1;
start[i]=(R/M+1)*i;
} else {
sizedim[i]=R/M;
start[i]=(R/M+1)*(R%M)+(R/M)*(i-R%M);
}
}
stage1(A, offset, sizedim, start, N, M);
t3 = GetTickCount();
stage2(A, offset, sizedim, start, N, M);
stage3(A, offset, sizedim, start, N, M);
t4 = GetTickCount();
total += (t4-t3);
offset+=sizedim[0];
R=R-sizedim[0];
}
}
}
ProcessDiagonalBlock(&A[offset*N+offset], N-offset, N);
t2 = GetTickCount();
printf("Time for LU-decomposition in secs: %f \n", (t2-t1)/1000000);
printf("Time for the task region in secs: %f \n", (total)/1000000);
printf("Time for inside tasks in secs: %f \n", (task_total)/1000000);
printf("Time spent in taskwait in secs: %f \n", (wait_total)/1000000);
#ifdef CHECK
for (i=0;i<N;i++)
for (j=0;j<N;j++)
if (i>j)
L[i*N+j] = A[i*N+j];
else
U[i*N+j] = A[i*N+j];
for (i=0;i<N;i++)
L[i*N+i] = 1;
for (i=0;i<N;i++)
for (j=0;j<N;j++){
temp2=0;
for (k=0;k<N;k++)
temp2+=L[i*N+k]*U[k*N+j];
if ((A2[i*N+j]-temp2)/A2[i*N+j] >0.1 || (A2[i*N+j]-temp2)/A2[i*N+j] <-0.1)
temp++;
}
printf("Errors = %d \n", temp);
#endif
return 0;
}
void stage1(double *A, int offset, int *sizedim, int *start, int N, int M)
{
ProcessDiagonalBlock(&A[offset*N+offset], sizedim[0], N);
}
void stage2(double *A, int offset, int *sizedim, int *start, int N, int M)
{
int x=offset, y=offset;
int B = sizedim[0];
int i;
int L1 = sizedim[0];
int L2, L3;
for (i=1;i<M;i++){
L2 = sizedim[i];
L3 = sizedim[i];
#pragma omp task firstprivate(i, L1, L2,x, y, N) depend(out: A[(x+start[i])*N+y] )
{
double t1 = GetTickCount();
ProcessBlockOnColumn(&A[(x+start[i])*N+y], &A[x*N+y], L1, L2, N);
double t2 = GetTickCount();
t2 = t2-t1;
#pragma omp atomic
task_total += t2;
}
#pragma omp task firstprivate(i, L1, L2,x, y, N) depend(out: A[x*N+(y+start[i])] )
{
double t1 = GetTickCount();
ProcessBlockOnRow(&A[x*N+(y+start[i])], &A[x*N+y], L1, L3, N);
double t2 = GetTickCount();
t2 = t2-t1;
#pragma omp atomic
task_total += t2;
}
}
}
void stage3(double *A, int offset, int *sizedim, int *start, int N, int M)
{
int x=offset, y=offset;
int B = sizedim[0];
int i,j;
int L1 = sizedim[0];
int L2, L3;
for (i=1;i<M;i++)
for (j=1;j<M;j++){
L2 = sizedim[i];
L3 = sizedim[j];
#pragma omp task firstprivate(i,j,M,N,x,y,L1,L2,L3) depend(in: A[x*N+(y+start[i])], A[(x+start[i])*N+y] )
{
double t1 = GetTickCount();
ProcessInnerBlock( &A[(x+start[i])*N+(y+start[j])],
&A[ x *N+(y+start[j])],
&A[(x+start[i])*N+ y ],
L1, L2, L3, N);
double t2 = GetTickCount();
t2 = t2-t1;
#pragma omp atomic
task_total += t2;
}
}
double w1 = GetTickCount();
#pragma omp taskwait
double w2 = GetTickCount();
wait_total += (w2-w1);
}
void ProcessDiagonalBlock(double *A, int L1, int N)
/* *A is a pointer to the block processed */
/* The size of the diagonal block is L1xL1 */
/* N is the size of the matrix in one dimension */
{
int i,j,k;
for (i=0;i<L1;i++)
for (j=i+1;j<L1;j++){
A[j*N+i]/=A[i*N+i];
for (k=i+1;k<L1;k++)
A[j*N+k] = A[j*N+k] - A[j*N+i]*A[i*N+k];
}
}
void ProcessBlockOnColumn(double *A, double *D, int L1, int L2, int N)
{
/* *A is a pointer to the column block processed */
/* *D is a pointer to the diagonal block required */
/* The size of the column block is L2xL1 */
/* The size of the diagonal block is L1xL1 */
int i,j,k;
for (i=0;i<L1;i++)
for (j=0;j<L2;j++){
A[j*N+i]/=D[i*N+i];
for (k=i+1;k<L1;k++)
A[j*N+k]+=-A[j*N+i]*D[i*N+k];
}
}
void ProcessBlockOnRow(double *A, double *D, int L1, int L3, int N)
{
/* *A is a pointer to the row block processed */
/* *D is a pointer to the diagonal block required */
/* The size of the row block is L1xL3 */
/* The size of the diagonal block is L1xL1 */
int i,j,k;
for (i=0;i<L1;i++)
for (j=i+1;j<L1;j++)
for (k=0;k<L3;k++)
A[j*N+k]+=-D[j*N+i]*A[i*N+k];
}
void ProcessInnerBlock(double *A, double *R, double *C, int L1, int L2, int L3, int N)
{
/* *A is a pointer to the inner block processed */
/* *R is a pointer to the row block required */
/* *C is a pointer to the column block required */
/* The size of the row block is L1xL3 */
/* The size of the column block is L2xL1 */
/* The size of the inner block is L2xL3 */
int i,j,k;
for (i=0;i<L1;i++)
for (j=0;j<L2;j++)
for (k=0;k<L3;k++)
A[j*N+k]+=-C[j*N+i]*R[i*N+k];
}
void Print_Matrix (double *v, int M, int N) {
int i,j;
printf("\n");
for (i=0;i<M;i++){
for (j=0;j<N;j++)
printf("%.2f,",v[i*N+j]);
printf("\n");
}
printf("\n");
}
void InitMatrix2(double *A, int N)
{
long long i, j,k;
for (i=0;i<N*N;i++)
A[i]=0;
for (k=0;k<N;k++)
for (i = k; i < N; i++)
for (j = k; j < N; j++)
A[i * N + j] +=1;
}
void InitMatrix3(double *A, int N)
{
long long i,j,k;
double *L, *U;
L = (double*) malloc(N*N*sizeof(double));
U = (double*) malloc(N*N*sizeof(double));
#pragma omp parallel for private(i,j)
for (i=0;i<N;i++)
for (j=0;j<N;j++){
A[i*N+j]=0;
if (i>=j)
L[i*N+j] = i-j+1;
else
L[i*N+j] = 0;
if (i<=j)
U[i*N+j] = j-i+1;
else
U[i*N+j] = 0;
}
#pragma omp parallel for private(i,j,k)
for (i=0;i<N;i++)
for (j=0;j<N;j++)
for (k=0;k<N;k++)
A[i*N+j]+=L[i*N+k]*U[k*N+j];
}
|
DRB108-atomic-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
/*
* Test if atomic can be recognized properly. No data races.
* */
int main (void)
{
int a=0;
#pragma omp parallel for reduction(+:a)
for (int i = 0; i < 100; i++)
{
a += 1;
}
printf ("a=%d\n",a);
return 0;
}
|
ParallelOpenMP.h | #pragma once
#include <ATen/ATen.h>
#include <cstddef>
#include <exception>
#ifdef _OPENMP
#define INTRA_OP_PARALLEL
#include <omp.h>
#endif
namespace at {
template <class F>
inline void parallel_for(
const int64_t begin,
const int64_t end,
const int64_t grain_size,
const F& f) {
TORCH_CHECK(grain_size >= 0);
at::internal::lazy_init_num_threads();
if (begin >= end) {
return;
}
#ifdef _OPENMP
std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
std::exception_ptr eptr;
// Work around memory leak when using 1 thread in nested "omp parallel"
// caused by some buggy OpenMP versions and the fact that omp_in_parallel()
// returns false when omp_get_max_threads() == 1 inside nested "omp parallel"
// See issue gh-32284
#pragma omp parallel if (omp_get_max_threads() > 1 && !omp_in_parallel() && ((end - begin) > grain_size))
{
// choose number of tasks based on grain size and number of threads
// can't use num_threads clause due to bugs in GOMP's thread pool (See #32008)
int64_t num_threads = omp_get_num_threads();
if (grain_size > 0) {
num_threads = std::min(num_threads, divup((end - begin), grain_size));
}
int64_t tid = omp_get_thread_num();
int64_t chunk_size = divup((end - begin), num_threads);
int64_t begin_tid = begin + tid * chunk_size;
if (begin_tid < end) {
try {
f(begin_tid, std::min(end, chunk_size + begin_tid));
} catch (...) {
if (!err_flag.test_and_set()) {
eptr = std::current_exception();
}
}
}
}
if (eptr) {
std::rethrow_exception(eptr);
}
#else
f(begin, end);
#endif
}
template <class scalar_t, class F, class SF>
inline scalar_t parallel_reduce(
const int64_t begin,
const int64_t end,
const int64_t grain_size,
const scalar_t ident,
const F& f,
const SF& sf) {
TORCH_CHECK(grain_size >= 0);
at::internal::lazy_init_num_threads();
if (begin >= end) {
return ident;
} else if (in_parallel_region() || get_num_threads() == 1) {
return f(begin, end, ident);
} else {
const int64_t num_results = divup((end - begin), grain_size);
std::vector<scalar_t> results(num_results);
scalar_t* results_data = results.data();
std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
std::exception_ptr eptr;
#pragma omp parallel for if ((end - begin) >= grain_size)
for (int64_t id = 0; id < num_results; id++) {
int64_t i = begin + id * grain_size;
try {
results_data[id] = f(i, i + std::min(end - i, grain_size), ident);
} catch (...) {
if (!err_flag.test_and_set()) {
eptr = std::current_exception();
}
}
}
if (eptr) {
std::rethrow_exception(eptr);
}
scalar_t result = ident;
for (auto partial_result : results) {
result = sf(result, partial_result);
}
return result;
}
}
} // namespace at
|
resample.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% RRRR EEEEE SSSSS AAA M M PPPP L EEEEE %
% R R E SS A A MM MM P P L E %
% RRRR EEE SSS AAAAA M M M PPPP L EEE %
% R R E SS A A M M P L E %
% R R EEEEE SSSSS A A M M P LLLLL EEEEE %
% %
% %
% MagickCore Pixel Resampling Methods %
% %
% Software Design %
% Cristy %
% Anthony Thyssen %
% August 2007 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/color-private.h"
#include "magick/cache.h"
#include "magick/draw.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/pixel.h"
#include "magick/pixel-private.h"
#include "magick/quantum.h"
#include "magick/random_.h"
#include "magick/resample.h"
#include "magick/resize.h"
#include "magick/resize-private.h"
#include "magick/resource_.h"
#include "magick/transform.h"
#include "magick/signature-private.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/option.h"
/*
EWA Resampling Options
*/
/* select ONE resampling method */
#define EWA 1 /* Normal EWA handling - raw or clamped */
/* if 0 then use "High Quality EWA" */
#define EWA_CLAMP 1 /* EWA Clamping from Nicolas Robidoux */
#define FILTER_LUT 1 /* Use a LUT rather then direct filter calls */
/* output debugging information */
#define DEBUG_ELLIPSE 0 /* output ellipse info for debug */
#define DEBUG_HIT_MISS 0 /* output hit/miss pixels (as gnuplot commands) */
#define DEBUG_NO_PIXEL_HIT 0 /* Make pixels that fail to hit anything - RED */
#if ! FILTER_DIRECT
#define WLUT_WIDTH 1024 /* size of the filter cache */
#endif
/*
Typedef declarations.
*/
struct _ResampleFilter
{
CacheView
*view;
Image
*image;
ExceptionInfo
*exception;
MagickBooleanType
debug;
/* Information about image being resampled */
ssize_t
image_area;
InterpolatePixelMethod
interpolate;
VirtualPixelMethod
virtual_pixel;
FilterTypes
filter;
/* processing settings needed */
MagickBooleanType
limit_reached,
do_interpolate,
average_defined;
MagickPixelPacket
average_pixel;
/* current ellipitical area being resampled around center point */
double
A, B, C,
Vlimit, Ulimit, Uwidth, slope;
#if FILTER_LUT
/* LUT of weights for filtered average in elliptical area */
double
filter_lut[WLUT_WIDTH];
#else
/* Use a Direct call to the filter functions */
ResizeFilter
*filter_def;
double
F;
#endif
/* the practical working support of the filter */
double
support;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e R e s a m p l e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireResampleFilter() initializes the information resample needs do to a
% scaled lookup of a color from an image, using area sampling.
%
% The algorithm is based on a Elliptical Weighted Average, where the pixels
% found in a large elliptical area is averaged together according to a
% weighting (filter) function. For more details see "Fundamentals of Texture
% Mapping and Image Warping" a master's thesis by Paul.S.Heckbert, June 17,
% 1989. Available for free from, http://www.cs.cmu.edu/~ph/
%
% As EWA resampling (or any sort of resampling) can require a lot of
% calculations to produce a distorted scaling of the source image for each
% output pixel, the ResampleFilter structure generated holds that information
% between individual image resampling.
%
% This function will make the appropriate AcquireVirtualCacheView() calls
% to view the image, calling functions do not need to open a cache view.
%
% Usage Example...
% resample_filter=AcquireResampleFilter(image,exception);
% SetResampleFilter(resample_filter, GaussianFilter, 1.0);
% for (y=0; y < (ssize_t) image->rows; y++) {
% for (x=0; x < (ssize_t) image->columns; x++) {
% u= ....; v= ....;
% ScaleResampleFilter(resample_filter, ... scaling vectors ...);
% (void) ResamplePixelColor(resample_filter,u,v,&pixel);
% ... assign resampled pixel value ...
% }
% }
% DestroyResampleFilter(resample_filter);
%
% The format of the AcquireResampleFilter method is:
%
% ResampleFilter *AcquireResampleFilter(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ResampleFilter *AcquireResampleFilter(const Image *image,
ExceptionInfo *exception)
{
register ResampleFilter
*resample_filter;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
resample_filter=(ResampleFilter *) AcquireMagickMemory(
sizeof(*resample_filter));
if (resample_filter == (ResampleFilter *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(resample_filter,0,sizeof(*resample_filter));
resample_filter->exception=exception;
resample_filter->image=ReferenceImage((Image *) image);
resample_filter->view=AcquireVirtualCacheView(resample_filter->image,exception);
resample_filter->debug=IsEventLogging();
resample_filter->signature=MagickCoreSignature;
resample_filter->image_area=(ssize_t) (image->columns*image->rows);
resample_filter->average_defined = MagickFalse;
/* initialise the resampling filter settings */
SetResampleFilter(resample_filter, image->filter, image->blur);
(void) SetResampleFilterInterpolateMethod(resample_filter,
image->interpolate);
(void) SetResampleFilterVirtualPixelMethod(resample_filter,
GetImageVirtualPixelMethod(image));
return(resample_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y R e s a m p l e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyResampleFilter() finalizes and cleans up the resampling
% resample_filter as returned by AcquireResampleFilter(), freeing any memory
% or other information as needed.
%
% The format of the DestroyResampleFilter method is:
%
% ResampleFilter *DestroyResampleFilter(ResampleFilter *resample_filter)
%
% A description of each parameter follows:
%
% o resample_filter: resampling information structure
%
*/
MagickExport ResampleFilter *DestroyResampleFilter(
ResampleFilter *resample_filter)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->view=DestroyCacheView(resample_filter->view);
resample_filter->image=DestroyImage(resample_filter->image);
#if ! FILTER_LUT
resample_filter->filter_def=DestroyResizeFilter(resample_filter->filter_def);
#endif
resample_filter->signature=(~MagickCoreSignature);
resample_filter=(ResampleFilter *) RelinquishMagickMemory(resample_filter);
return(resample_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s a m p l e P i x e l C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResamplePixelColor() samples the pixel values surrounding the location
% given using an elliptical weighted average, at the scale previously
% calculated, and in the most efficent manner possible for the
% VirtualPixelMethod setting.
%
% The format of the ResamplePixelColor method is:
%
% MagickBooleanType ResamplePixelColor(ResampleFilter *resample_filter,
% const double u0,const double v0,MagickPixelPacket *pixel)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o u0,v0: A double representing the center of the area to resample,
% The distortion transformed transformed x,y coordinate.
%
% o pixel: the resampled pixel is returned here.
%
*/
MagickExport MagickBooleanType ResamplePixelColor(
ResampleFilter *resample_filter,const double u0,const double v0,
MagickPixelPacket *pixel)
{
MagickBooleanType
status;
ssize_t u,v, v1, v2, uw, hit;
double u1;
double U,V,Q,DQ,DDQ;
double divisor_c,divisor_m;
register double weight;
register const PixelPacket *pixels;
register const IndexPacket *indexes;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
status=MagickTrue;
/* GetMagickPixelPacket(resample_filter->image,pixel); */
if ( resample_filter->do_interpolate ) {
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,resample_filter->interpolate,u0,v0,pixel,
resample_filter->exception);
return(status);
}
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "u0=%lf; v0=%lf;\n", u0, v0);
#endif
/*
Does resample area Miss the image Proper?
If and that area a simple solid color - then simply return that color!
This saves a lot of calculation when resampling outside the bounds of
the source image.
However it probably should be expanded to image bounds plus the filters
scaled support size.
*/
hit = 0;
switch ( resample_filter->virtual_pixel ) {
case BackgroundVirtualPixelMethod:
case ConstantVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case WhiteVirtualPixelMethod:
case MaskVirtualPixelMethod:
if ( resample_filter->limit_reached
|| u0 + resample_filter->Ulimit < 0.0
|| u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
|| v0 + resample_filter->Vlimit < 0.0
|| v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0
)
hit++;
break;
case UndefinedVirtualPixelMethod:
case EdgeVirtualPixelMethod:
if ( ( u0 + resample_filter->Ulimit < 0.0 && v0 + resample_filter->Vlimit < 0.0 )
|| ( u0 + resample_filter->Ulimit < 0.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
&& v0 + resample_filter->Vlimit < 0.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 )
)
hit++;
break;
case HorizontalTileVirtualPixelMethod:
if ( v0 + resample_filter->Vlimit < 0.0
|| v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0
)
hit++; /* outside the horizontally tiled images. */
break;
case VerticalTileVirtualPixelMethod:
if ( u0 + resample_filter->Ulimit < 0.0
|| u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
)
hit++; /* outside the vertically tiled images. */
break;
case DitherVirtualPixelMethod:
if ( ( u0 + resample_filter->Ulimit < -32.0 && v0 + resample_filter->Vlimit < -32.0 )
|| ( u0 + resample_filter->Ulimit < -32.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0
&& v0 + resample_filter->Vlimit < -32.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 )
)
hit++;
break;
case TileVirtualPixelMethod:
case MirrorVirtualPixelMethod:
case RandomVirtualPixelMethod:
case HorizontalTileEdgeVirtualPixelMethod:
case VerticalTileEdgeVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
/* resampling of area is always needed - no VP limits */
break;
}
if ( hit ) {
/* The area being resampled is simply a solid color
* just return a single lookup color.
*
* Should this return the users requested interpolated color?
*/
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,IntegerInterpolatePixel,u0,v0,pixel,
resample_filter->exception);
return(status);
}
/*
When Scaling limits reached, return an 'averaged' result.
*/
if ( resample_filter->limit_reached ) {
switch ( resample_filter->virtual_pixel ) {
/* This is always handled by the above, so no need.
case BackgroundVirtualPixelMethod:
case ConstantVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case GrayVirtualPixelMethod,
case WhiteVirtualPixelMethod
case MaskVirtualPixelMethod:
*/
case UndefinedVirtualPixelMethod:
case EdgeVirtualPixelMethod:
case DitherVirtualPixelMethod:
case HorizontalTileEdgeVirtualPixelMethod:
case VerticalTileEdgeVirtualPixelMethod:
/* We need an average edge pixel, from the correct edge!
How should I calculate an average edge color?
Just returning an averaged neighbourhood,
works well in general, but falls down for TileEdge methods.
This needs to be done properly!!!!!!
*/
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,AverageInterpolatePixel,u0,v0,pixel,
resample_filter->exception);
break;
case HorizontalTileVirtualPixelMethod:
case VerticalTileVirtualPixelMethod:
/* just return the background pixel - Is there a better way? */
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,IntegerInterpolatePixel,-1.0,-1.0,pixel,
resample_filter->exception);
break;
case TileVirtualPixelMethod:
case MirrorVirtualPixelMethod:
case RandomVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
default:
/* generate a average color of the WHOLE image */
if ( resample_filter->average_defined == MagickFalse ) {
Image
*average_image;
CacheView
*average_view;
GetMagickPixelPacket(resample_filter->image,(MagickPixelPacket *)
&resample_filter->average_pixel);
resample_filter->average_defined=MagickTrue;
/* Try to get an averaged pixel color of whole image */
average_image=ResizeImage(resample_filter->image,1,1,BoxFilter,1.0,
resample_filter->exception);
if (average_image == (Image *) NULL)
{
*pixel=resample_filter->average_pixel; /* FAILED */
break;
}
average_view=AcquireVirtualCacheView(average_image,
&average_image->exception);
pixels=(PixelPacket *)GetCacheViewVirtualPixels(average_view,0,0,1,1,
resample_filter->exception);
if (pixels == (const PixelPacket *) NULL) {
average_view=DestroyCacheView(average_view);
average_image=DestroyImage(average_image);
*pixel=resample_filter->average_pixel; /* FAILED */
break;
}
indexes=(IndexPacket *) GetCacheViewAuthenticIndexQueue(average_view);
SetMagickPixelPacket(resample_filter->image,pixels,indexes,
&(resample_filter->average_pixel));
average_view=DestroyCacheView(average_view);
average_image=DestroyImage(average_image);
if ( resample_filter->virtual_pixel == CheckerTileVirtualPixelMethod )
{
/* CheckerTile is a alpha blend of the image's average pixel
color and the current background color */
/* image's average pixel color */
weight = QuantumScale*((MagickRealType)(QuantumRange-
resample_filter->average_pixel.opacity));
resample_filter->average_pixel.red *= weight;
resample_filter->average_pixel.green *= weight;
resample_filter->average_pixel.blue *= weight;
divisor_c = weight;
/* background color */
weight = QuantumScale*((MagickRealType)(QuantumRange-
resample_filter->image->background_color.opacity));
resample_filter->average_pixel.red +=
weight*resample_filter->image->background_color.red;
resample_filter->average_pixel.green +=
weight*resample_filter->image->background_color.green;
resample_filter->average_pixel.blue +=
weight*resample_filter->image->background_color.blue;
resample_filter->average_pixel.opacity +=
resample_filter->image->background_color.opacity;
divisor_c += weight;
/* alpha blend */
resample_filter->average_pixel.red /= divisor_c;
resample_filter->average_pixel.green /= divisor_c;
resample_filter->average_pixel.blue /= divisor_c;
resample_filter->average_pixel.opacity /= 2; /* 50% blend */
}
}
*pixel=resample_filter->average_pixel;
break;
}
return(status);
}
/*
Initialize weighted average data collection
*/
hit = 0;
divisor_c = 0.0;
divisor_m = 0.0;
pixel->red = pixel->green = pixel->blue = 0.0;
if (pixel->matte != MagickFalse) pixel->opacity = 0.0;
if (pixel->colorspace == CMYKColorspace) pixel->index = 0.0;
/*
Determine the parellelogram bounding box fitted to the ellipse
centered at u0,v0. This area is bounding by the lines...
*/
v1 = (ssize_t)ceil(v0 - resample_filter->Vlimit); /* range of scan lines */
v2 = (ssize_t)floor(v0 + resample_filter->Vlimit);
/* scan line start and width accross the parallelogram */
u1 = u0 + (v1-v0)*resample_filter->slope - resample_filter->Uwidth;
uw = (ssize_t)(2.0*resample_filter->Uwidth)+1;
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "v1=%ld; v2=%ld\n", (long)v1, (long)v2);
(void) FormatLocaleFile(stderr, "u1=%ld; uw=%ld\n", (long)u1, (long)uw);
#else
# define DEBUG_HIT_MISS 0 /* only valid if DEBUG_ELLIPSE is enabled */
#endif
/*
Do weighted resampling of all pixels, within the scaled ellipse,
bound by a Parellelogram fitted to the ellipse.
*/
DDQ = 2*resample_filter->A;
for( v=v1; v<=v2; v++ ) {
#if DEBUG_HIT_MISS
long uu = ceil(u1); /* actual pixel location (for debug only) */
(void) FormatLocaleFile(stderr, "# scan line from pixel %ld, %ld\n", (long)uu, (long)v);
#endif
u = (ssize_t)ceil(u1); /* first pixel in scanline */
u1 += resample_filter->slope; /* start of next scan line */
/* location of this first pixel, relative to u0,v0 */
U = (double)u-u0;
V = (double)v-v0;
/* Q = ellipse quotent ( if Q<F then pixel is inside ellipse) */
Q = (resample_filter->A*U + resample_filter->B*V)*U + resample_filter->C*V*V;
DQ = resample_filter->A*(2.0*U+1) + resample_filter->B*V;
/* get the scanline of pixels for this v */
pixels=GetCacheViewVirtualPixels(resample_filter->view,u,v,(size_t) uw,
1,resample_filter->exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
indexes=GetCacheViewVirtualIndexQueue(resample_filter->view);
/* count up the weighted pixel colors */
for( u=0; u<uw; u++ ) {
weight = 0;
#if FILTER_LUT
/* Note that the ellipse has been pre-scaled so F = WLUT_WIDTH */
if ( Q < (double)WLUT_WIDTH ) {
weight = resample_filter->filter_lut[(int)Q];
#else
/* Note that the ellipse has been pre-scaled so F = support^2 */
if ( Q < (double)resample_filter->F ) {
weight = GetResizeFilterWeight(resample_filter->filter_def,
sqrt(Q)); /* a SquareRoot! Arrggghhhhh... */
#endif
if (pixel->matte != MagickFalse)
pixel->opacity += weight*pixels->opacity;
divisor_m += weight;
if (pixel->matte != MagickFalse)
weight *= QuantumScale*((MagickRealType)(QuantumRange-pixels->opacity));
pixel->red += weight*pixels->red;
pixel->green += weight*pixels->green;
pixel->blue += weight*pixels->blue;
if (pixel->colorspace == CMYKColorspace)
pixel->index += weight*(*indexes);
divisor_c += weight;
hit++;
#if DEBUG_HIT_MISS
/* mark the pixel according to hit/miss of the ellipse */
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n",
(long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1);
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n",
(long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1);
} else {
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n",
(long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1);
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n",
(long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1);
}
uu++;
#else
}
#endif
pixels++;
indexes++;
Q += DQ;
DQ += DDQ;
}
}
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "Hit=%ld; Total=%ld;\n", (long)hit, (long)uw*(v2-v1) );
#endif
/*
Result sanity check -- this should NOT happen
*/
if ( hit == 0 || divisor_m <= MagickEpsilon || divisor_c <= MagickEpsilon ) {
/* not enough pixels, or bad weighting in resampling,
resort to direct interpolation */
#if DEBUG_NO_PIXEL_HIT
pixel->opacity = pixel->red = pixel->green = pixel->blue = 0;
pixel->red = QuantumRange; /* show pixels for which EWA fails */
#else
status=InterpolateMagickPixelPacket(resample_filter->image,
resample_filter->view,resample_filter->interpolate,u0,v0,pixel,
resample_filter->exception);
#endif
return status;
}
/*
Finialize results of resampling
*/
divisor_m = 1.0/divisor_m;
if (pixel->matte != MagickFalse)
pixel->opacity = (MagickRealType) ClampToQuantum(divisor_m*pixel->opacity);
divisor_c = 1.0/divisor_c;
pixel->red = (MagickRealType) ClampToQuantum(divisor_c*pixel->red);
pixel->green = (MagickRealType) ClampToQuantum(divisor_c*pixel->green);
pixel->blue = (MagickRealType) ClampToQuantum(divisor_c*pixel->blue);
if (pixel->colorspace == CMYKColorspace)
pixel->index = (MagickRealType) ClampToQuantum(divisor_c*pixel->index);
return(MagickTrue);
}
#if EWA && EWA_CLAMP
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
- C l a m p U p A x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClampUpAxes() function converts the input vectors into a major and
% minor axis unit vectors, and their magnitude. This allows us to
% ensure that the ellipse generated is never smaller than the unit
% circle and thus never too small for use in EWA resampling.
%
% This purely mathematical 'magic' was provided by Professor Nicolas
% Robidoux and his Masters student Chantal Racette.
%
% Reference: "We Recommend Singular Value Decomposition", David Austin
% http://www.ams.org/samplings/feature-column/fcarc-svd
%
% By generating major and minor axis vectors, we can actually use the
% ellipse in its "canonical form", by remapping the dx,dy of the
% sampled point into distances along the major and minor axis unit
% vectors.
%
% Reference: http://en.wikipedia.org/wiki/Ellipse#Canonical_form
*/
static inline void ClampUpAxes(const double dux,
const double dvx,
const double duy,
const double dvy,
double *major_mag,
double *minor_mag,
double *major_unit_x,
double *major_unit_y,
double *minor_unit_x,
double *minor_unit_y)
{
/*
* ClampUpAxes takes an input 2x2 matrix
*
* [ a b ] = [ dux duy ]
* [ c d ] = [ dvx dvy ]
*
* and computes from it the major and minor axis vectors [major_x,
* major_y] and [minor_x,minor_y] of the smallest ellipse containing
* both the unit disk and the ellipse which is the image of the unit
* disk by the linear transformation
*
* [ dux duy ] [S] = [s]
* [ dvx dvy ] [T] = [t]
*
* (The vector [S,T] is the difference between a position in output
* space and [X,Y]; the vector [s,t] is the difference between a
* position in input space and [x,y].)
*/
/*
* Output:
*
* major_mag is the half-length of the major axis of the "new"
* ellipse.
*
* minor_mag is the half-length of the minor axis of the "new"
* ellipse.
*
* major_unit_x is the x-coordinate of the major axis direction vector
* of both the "old" and "new" ellipses.
*
* major_unit_y is the y-coordinate of the major axis direction vector.
*
* minor_unit_x is the x-coordinate of the minor axis direction vector.
*
* minor_unit_y is the y-coordinate of the minor axis direction vector.
*
* Unit vectors are useful for computing projections, in particular,
* to compute the distance between a point in output space and the
* center of a unit disk in output space, using the position of the
* corresponding point [s,t] in input space. Following the clamping,
* the square of this distance is
*
* ( ( s * major_unit_x + t * major_unit_y ) / major_mag )^2
* +
* ( ( s * minor_unit_x + t * minor_unit_y ) / minor_mag )^2
*
* If such distances will be computed for many [s,t]'s, it makes
* sense to actually compute the reciprocal of major_mag and
* minor_mag and multiply them by the above unit lengths.
*
* Now, if you want to modify the input pair of tangent vectors so
* that it defines the modified ellipse, all you have to do is set
*
* newdux = major_mag * major_unit_x
* newdvx = major_mag * major_unit_y
* newduy = minor_mag * minor_unit_x = minor_mag * -major_unit_y
* newdvy = minor_mag * minor_unit_y = minor_mag * major_unit_x
*
* and use these tangent vectors as if they were the original ones.
* Usually, this is a drastic change in the tangent vectors even if
* the singular values are not clamped; for example, the minor axis
* vector always points in a direction which is 90 degrees
* counterclockwise from the direction of the major axis vector.
*/
/*
* Discussion:
*
* GOAL: Fix things so that the pullback, in input space, of a disk
* of radius r in output space is an ellipse which contains, at
* least, a disc of radius r. (Make this hold for any r>0.)
*
* ESSENCE OF THE METHOD: Compute the product of the first two
* factors of an SVD of the linear transformation defining the
* ellipse and make sure that both its columns have norm at least 1.
* Because rotations and reflexions map disks to themselves, it is
* not necessary to compute the third (rightmost) factor of the SVD.
*
* DETAILS: Find the singular values and (unit) left singular
* vectors of Jinv, clampling up the singular values to 1, and
* multiply the unit left singular vectors by the new singular
* values in order to get the minor and major ellipse axis vectors.
*
* Image resampling context:
*
* The Jacobian matrix of the transformation at the output point
* under consideration is defined as follows:
*
* Consider the transformation (x,y) -> (X,Y) from input locations
* to output locations. (Anthony Thyssen, elsewhere in resample.c,
* uses the notation (u,v) -> (x,y).)
*
* The Jacobian matrix of the transformation at (x,y) is equal to
*
* J = [ A, B ] = [ dX/dx, dX/dy ]
* [ C, D ] [ dY/dx, dY/dy ]
*
* that is, the vector [A,C] is the tangent vector corresponding to
* input changes in the horizontal direction, and the vector [B,D]
* is the tangent vector corresponding to input changes in the
* vertical direction.
*
* In the context of resampling, it is natural to use the inverse
* Jacobian matrix Jinv because resampling is generally performed by
* pulling pixel locations in the output image back to locations in
* the input image. Jinv is
*
* Jinv = [ a, b ] = [ dx/dX, dx/dY ]
* [ c, d ] [ dy/dX, dy/dY ]
*
* Note: Jinv can be computed from J with the following matrix
* formula:
*
* Jinv = 1/(A*D-B*C) [ D, -B ]
* [ -C, A ]
*
* What we do is modify Jinv so that it generates an ellipse which
* is as close as possible to the original but which contains the
* unit disk. This can be accomplished as follows:
*
* Let
*
* Jinv = U Sigma V^T
*
* be an SVD decomposition of Jinv. (The SVD is not unique, but the
* final ellipse does not depend on the particular SVD.)
*
* We could clamp up the entries of the diagonal matrix Sigma so
* that they are at least 1, and then set
*
* Jinv = U newSigma V^T.
*
* However, we do not need to compute V for the following reason:
* V^T is an orthogonal matrix (that is, it represents a combination
* of rotations and reflexions) so that it maps the unit circle to
* itself. For this reason, the exact value of V does not affect the
* final ellipse, and we can choose V to be the identity
* matrix. This gives
*
* Jinv = U newSigma.
*
* In the end, we return the two diagonal entries of newSigma
* together with the two columns of U.
*/
/*
* ClampUpAxes was written by Nicolas Robidoux and Chantal Racette
* of Laurentian University with insightful suggestions from Anthony
* Thyssen and funding from the National Science and Engineering
* Research Council of Canada. It is distinguished from its
* predecessors by its efficient handling of degenerate cases.
*
* The idea of clamping up the EWA ellipse's major and minor axes so
* that the result contains the reconstruction kernel filter support
* is taken from Andreas Gustaffson's Masters thesis "Interactive
* Image Warping", Helsinki University of Technology, Faculty of
* Information Technology, 59 pages, 1993 (see Section 3.6).
*
* The use of the SVD to clamp up the singular values of the
* Jacobian matrix of the pullback transformation for EWA resampling
* is taken from the astrophysicist Craig DeForest. It is
* implemented in his PDL::Transform code (PDL = Perl Data
* Language).
*/
const double a = dux;
const double b = duy;
const double c = dvx;
const double d = dvy;
/*
* n is the matrix Jinv * transpose(Jinv). Eigenvalues of n are the
* squares of the singular values of Jinv.
*/
const double aa = a*a;
const double bb = b*b;
const double cc = c*c;
const double dd = d*d;
/*
* Eigenvectors of n are left singular vectors of Jinv.
*/
const double n11 = aa+bb;
const double n12 = a*c+b*d;
const double n21 = n12;
const double n22 = cc+dd;
const double det = a*d-b*c;
const double twice_det = det+det;
const double frobenius_squared = n11+n22;
const double discriminant =
(frobenius_squared+twice_det)*(frobenius_squared-twice_det);
/*
* In exact arithmetic, discriminant can't be negative. In floating
* point, it can, because of the bad conditioning of SVD
* decompositions done through the associated normal matrix.
*/
const double sqrt_discriminant =
sqrt(discriminant > 0.0 ? discriminant : 0.0);
/*
* s1 is the largest singular value of the inverse Jacobian
* matrix. In other words, its reciprocal is the smallest singular
* value of the Jacobian matrix itself.
* If s1 = 0, both singular values are 0, and any orthogonal pair of
* left and right factors produces a singular decomposition of Jinv.
*/
/*
* Initially, we only compute the squares of the singular values.
*/
const double s1s1 = 0.5*(frobenius_squared+sqrt_discriminant);
/*
* s2 the smallest singular value of the inverse Jacobian
* matrix. Its reciprocal is the largest singular value of the
* Jacobian matrix itself.
*/
const double s2s2 = 0.5*(frobenius_squared-sqrt_discriminant);
const double s1s1minusn11 = s1s1-n11;
const double s1s1minusn22 = s1s1-n22;
/*
* u1, the first column of the U factor of a singular decomposition
* of Jinv, is a (non-normalized) left singular vector corresponding
* to s1. It has entries u11 and u21. We compute u1 from the fact
* that it is an eigenvector of n corresponding to the eigenvalue
* s1^2.
*/
const double s1s1minusn11_squared = s1s1minusn11*s1s1minusn11;
const double s1s1minusn22_squared = s1s1minusn22*s1s1minusn22;
/*
* The following selects the largest row of n-s1^2 I as the one
* which is used to find the eigenvector. If both s1^2-n11 and
* s1^2-n22 are zero, n-s1^2 I is the zero matrix. In that case,
* any vector is an eigenvector; in addition, norm below is equal to
* zero, and, in exact arithmetic, this is the only case in which
* norm = 0. So, setting u1 to the simple but arbitrary vector [1,0]
* if norm = 0 safely takes care of all cases.
*/
const double temp_u11 =
( (s1s1minusn11_squared>=s1s1minusn22_squared) ? n12 : s1s1minusn22 );
const double temp_u21 =
( (s1s1minusn11_squared>=s1s1minusn22_squared) ? s1s1minusn11 : n21 );
const double norm = sqrt(temp_u11*temp_u11+temp_u21*temp_u21);
/*
* Finalize the entries of first left singular vector (associated
* with the largest singular value).
*/
const double u11 = ( (norm>0.0) ? temp_u11/norm : 1.0 );
const double u21 = ( (norm>0.0) ? temp_u21/norm : 0.0 );
/*
* Clamp the singular values up to 1.
*/
*major_mag = ( (s1s1<=1.0) ? 1.0 : sqrt(s1s1) );
*minor_mag = ( (s2s2<=1.0) ? 1.0 : sqrt(s2s2) );
/*
* Return the unit major and minor axis direction vectors.
*/
*major_unit_x = u11;
*major_unit_y = u21;
*minor_unit_x = -u21;
*minor_unit_y = u11;
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e R e s a m p l e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleResampleFilter() does all the calculations needed to resample an image
% at a specific scale, defined by two scaling vectors. This not using
% a orthogonal scaling, but two distorted scaling vectors, to allow the
% generation of a angled ellipse.
%
% As only two deritive scaling vectors are used the center of the ellipse
% must be the center of the lookup. That is any curvature that the
% distortion may produce is discounted.
%
% The input vectors are produced by either finding the derivitives of the
% distortion function, or the partial derivitives from a distortion mapping.
% They do not need to be the orthogonal dx,dy scaling vectors, but can be
% calculated from other derivatives. For example you could use dr,da/r
% polar coordinate vector scaling vectors
%
% If u,v = DistortEquation(x,y) OR u = Fu(x,y); v = Fv(x,y)
% Then the scaling vectors are determined from the deritives...
% du/dx, dv/dx and du/dy, dv/dy
% If the resulting scaling vectors is othogonally aligned then...
% dv/dx = 0 and du/dy = 0
% Producing an othogonally alligned ellipse in source space for the area to
% be resampled.
%
% Note that scaling vectors are different to argument order. Argument order
% is the general order the deritives are extracted from the distortion
% equations, and not the scaling vectors. As such the middle two vaules
% may be swapped from what you expect. Caution is advised.
%
% WARNING: It is assumed that any SetResampleFilter() method call will
% always be performed before the ScaleResampleFilter() method, so that the
% size of the ellipse will match the support for the resampling filter being
% used.
%
% The format of the ScaleResampleFilter method is:
%
% void ScaleResampleFilter(const ResampleFilter *resample_filter,
% const double dux,const double duy,const double dvx,const double dvy)
%
% A description of each parameter follows:
%
% o resample_filter: the resampling resample_filterrmation defining the
% image being resampled
%
% o dux,duy,dvx,dvy:
% The deritives or scaling vectors defining the EWA ellipse.
% NOTE: watch the order, which is based on the order deritives
% are usally determined from distortion equations (see above).
% The middle two values may need to be swapped if you are thinking
% in terms of scaling vectors.
%
*/
MagickExport void ScaleResampleFilter(ResampleFilter *resample_filter,
const double dux,const double duy,const double dvx,const double dvy)
{
double A,B,C,F;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
resample_filter->limit_reached = MagickFalse;
/* A 'point' filter forces use of interpolation instead of area sampling */
if ( resample_filter->filter == PointFilter )
return; /* EWA turned off - nothing to do */
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "# -----\n" );
(void) FormatLocaleFile(stderr, "dux=%lf; dvx=%lf; duy=%lf; dvy=%lf;\n",
dux, dvx, duy, dvy);
#endif
/* Find Ellipse Coefficents such that
A*u^2 + B*u*v + C*v^2 = F
With u,v relative to point around which we are resampling.
And the given scaling dx,dy vectors in u,v space
du/dx,dv/dx and du/dy,dv/dy
*/
#if EWA
/* Direct conversion of derivatives into elliptical coefficients
However when magnifying images, the scaling vectors will be small
resulting in a ellipse that is too small to sample properly.
As such we need to clamp the major/minor axis to a minumum of 1.0
to prevent it getting too small.
*/
#if EWA_CLAMP
{ double major_mag,
minor_mag,
major_x,
major_y,
minor_x,
minor_y;
ClampUpAxes(dux,dvx,duy,dvy, &major_mag, &minor_mag,
&major_x, &major_y, &minor_x, &minor_y);
major_x *= major_mag; major_y *= major_mag;
minor_x *= minor_mag; minor_y *= minor_mag;
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "major_x=%lf; major_y=%lf; minor_x=%lf; minor_y=%lf;\n",
major_x, major_y, minor_x, minor_y);
#endif
A = major_y*major_y+minor_y*minor_y;
B = -2.0*(major_x*major_y+minor_x*minor_y);
C = major_x*major_x+minor_x*minor_x;
F = major_mag*minor_mag;
F *= F; /* square it */
}
#else /* raw unclamped EWA */
A = dvx*dvx+dvy*dvy;
B = -2.0*(dux*dvx+duy*dvy);
C = dux*dux+duy*duy;
F = dux*dvy-duy*dvx;
F *= F; /* square it */
#endif /* EWA_CLAMP */
#else /* HQ_EWA */
/*
This Paul Heckbert's "Higher Quality EWA" formula, from page 60 in his
thesis, which adds a unit circle to the elliptical area so as to do both
Reconstruction and Prefiltering of the pixels in the resampling. It also
means it is always likely to have at least 4 pixels within the area of the
ellipse, for weighted averaging. No scaling will result with F == 4.0 and
a circle of radius 2.0, and F smaller than this means magnification is
being used.
NOTE: This method produces a very blury result at near unity scale while
producing perfect results for strong minitification and magnifications.
However filter support is fixed to 2.0 (no good for Windowed Sinc filters)
*/
A = dvx*dvx+dvy*dvy+1;
B = -2.0*(dux*dvx+duy*dvy);
C = dux*dux+duy*duy+1;
F = A*C - B*B/4;
#endif
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "A=%lf; B=%lf; C=%lf; F=%lf\n", A,B,C,F);
/* Figure out the various information directly about the ellipse.
This information currently not needed at this time, but may be
needed later for better limit determination.
It is also good to have as a record for future debugging
*/
{ double alpha, beta, gamma, Major, Minor;
double Eccentricity, Ellipse_Area, Ellipse_Angle;
alpha = A+C;
beta = A-C;
gamma = sqrt(beta*beta + B*B );
if ( alpha - gamma <= MagickEpsilon )
Major= MagickMaximumValue;
else
Major= sqrt(2*F/(alpha - gamma));
Minor = sqrt(2*F/(alpha + gamma));
(void) FormatLocaleFile(stderr, "# Major=%lf; Minor=%lf\n", Major, Minor );
/* other information about ellipse include... */
Eccentricity = Major/Minor;
Ellipse_Area = MagickPI*Major*Minor;
Ellipse_Angle = atan2(B, A-C);
(void) FormatLocaleFile(stderr, "# Angle=%lf Area=%lf\n",
(double) RadiansToDegrees(Ellipse_Angle), Ellipse_Area);
}
#endif
/* If one or both of the scaling vectors is impossibly large
(producing a very large raw F value), we may as well not bother
doing any form of resampling since resampled area is very large.
In this case some alternative means of pixel sampling, such as
the average of the whole image is needed to get a reasonable
result. Calculate only as needed.
*/
if ( (4*A*C - B*B) > MagickMaximumValue ) {
resample_filter->limit_reached = MagickTrue;
return;
}
/* Scale ellipse to match the filters support
(that is, multiply F by the square of the support)
Simplier to just multiply it by the support twice!
*/
F *= resample_filter->support;
F *= resample_filter->support;
/* Orthogonal bounds of the ellipse */
resample_filter->Ulimit = sqrt(C*F/(A*C-0.25*B*B));
resample_filter->Vlimit = sqrt(A*F/(A*C-0.25*B*B));
/* Horizontally aligned parallelogram fitted to Ellipse */
resample_filter->Uwidth = sqrt(F/A); /* Half of the parallelogram width */
resample_filter->slope = -B/(2.0*A); /* Reciprocal slope of the parallelogram */
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "Ulimit=%lf; Vlimit=%lf; UWidth=%lf; Slope=%lf;\n",
resample_filter->Ulimit, resample_filter->Vlimit,
resample_filter->Uwidth, resample_filter->slope );
#endif
/* Check the absolute area of the parallelogram involved.
* This limit needs more work, as it is too slow for larger images
* with tiled views of the horizon.
*/
if ( (resample_filter->Uwidth * resample_filter->Vlimit)
> (4.0*resample_filter->image_area)) {
resample_filter->limit_reached = MagickTrue;
return;
}
/* Scale ellipse formula to directly index the Filter Lookup Table */
{ register double scale;
#if FILTER_LUT
/* scale so that F = WLUT_WIDTH; -- hardcoded */
scale = (double)WLUT_WIDTH/F;
#else
/* scale so that F = resample_filter->F (support^2) */
scale = resample_filter->F/F;
#endif
resample_filter->A = A*scale;
resample_filter->B = B*scale;
resample_filter->C = C*scale;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilter() set the resampling filter lookup table based on a
% specific filter. Note that the filter is used as a radial filter not as a
% two pass othogonally aligned resampling filter.
%
% The format of the SetResampleFilter method is:
%
% void SetResampleFilter(ResampleFilter *resample_filter,
% const FilterTypes filter,const double blur)
%
% A description of each parameter follows:
%
% o resample_filter: resampling resample_filterrmation structure
%
% o filter: the resize filter for elliptical weighting LUT
%
% o blur: filter blur factor (radial scaling) for elliptical weighting LUT
%
*/
MagickExport void SetResampleFilter(ResampleFilter *resample_filter,
const FilterTypes filter,const double blur)
{
ResizeFilter
*resize_filter;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
resample_filter->do_interpolate = MagickFalse;
resample_filter->filter = filter;
/* Default cylindrical filter is a Cubic Keys filter */
if ( filter == UndefinedFilter )
resample_filter->filter = RobidouxFilter;
if ( resample_filter->filter == PointFilter ) {
resample_filter->do_interpolate = MagickTrue;
return; /* EWA turned off - nothing more to do */
}
resize_filter = AcquireResizeFilter(resample_filter->image,
resample_filter->filter,blur,MagickTrue,resample_filter->exception);
if (resize_filter == (ResizeFilter *) NULL) {
(void) ThrowMagickException(resample_filter->exception,GetMagickModule(),
ModuleError, "UnableToSetFilteringValue",
"Fall back to Interpolated 'Point' filter");
resample_filter->filter = PointFilter;
resample_filter->do_interpolate = MagickTrue;
return; /* EWA turned off - nothing more to do */
}
/* Get the practical working support for the filter,
* after any API call blur factors have been accoded for.
*/
#if EWA
resample_filter->support = GetResizeFilterSupport(resize_filter);
#else
resample_filter->support = 2.0; /* fixed support size for HQ-EWA */
#endif
#if FILTER_LUT
/* Fill the LUT with the weights from the selected filter function */
{ register int
Q;
double
r_scale;
/* Scale radius so the filter LUT covers the full support range */
r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH);
for(Q=0; Q<WLUT_WIDTH; Q++)
resample_filter->filter_lut[Q] = (double)
GetResizeFilterWeight(resize_filter,sqrt((double)Q)*r_scale);
/* finished with the resize filter */
resize_filter = DestroyResizeFilter(resize_filter);
}
#else
/* save the filter and the scaled ellipse bounds needed for filter */
resample_filter->filter_def = resize_filter;
resample_filter->F = resample_filter->support*resample_filter->support;
#endif
/*
Adjust the scaling of the default unit circle
This assumes that any real scaling changes will always
take place AFTER the filter method has been initialized.
*/
ScaleResampleFilter(resample_filter, 1.0, 0.0, 0.0, 1.0);
#if 0
/*
This is old code kept as a reference only. Basically it generates
a Gaussian bell curve, with sigma = 0.5 if the support is 2.0
Create Normal Gaussian 2D Filter Weighted Lookup Table.
A normal EWA guassual lookup would use exp(Q*ALPHA)
where Q = distance squared from 0.0 (center) to 1.0 (edge)
and ALPHA = -4.0*ln(2.0) ==> -2.77258872223978123767
The table is of length 1024, and equates to support radius of 2.0
thus needs to be scaled by ALPHA*4/1024 and any blur factor squared
The it comes from reference code provided by Fred Weinhaus.
*/
r_scale = -2.77258872223978123767/(WLUT_WIDTH*blur*blur);
for(Q=0; Q<WLUT_WIDTH; Q++)
resample_filter->filter_lut[Q] = exp((double)Q*r_scale);
resample_filter->support = WLUT_WIDTH;
#endif
#if FILTER_LUT
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp single
#endif
{
if (IsMagickTrue(GetImageArtifact(resample_filter->image,
"resample:verbose")) )
{
register int
Q;
double
r_scale;
/* Debug output of the filter weighting LUT
Gnuplot the LUT data, the x scale index has been adjusted
plot [0:2][-.2:1] "lut.dat" with lines
The filter values should be normalized for comparision
*/
printf("#\n");
printf("# Resampling Filter LUT (%d values) for '%s' filter\n",
WLUT_WIDTH, CommandOptionToMnemonic(MagickFilterOptions,
resample_filter->filter) );
printf("#\n");
printf("# Note: values in table are using a squared radius lookup.\n");
printf("# As such its distribution is not uniform.\n");
printf("#\n");
printf("# The X value is the support distance for the Y weight\n");
printf("# so you can use gnuplot to plot this cylindrical filter\n");
printf("# plot [0:2][-.2:1] \"lut.dat\" with lines\n");
printf("#\n");
/* Scale radius so the filter LUT covers the full support range */
r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH);
for(Q=0; Q<WLUT_WIDTH; Q++)
printf("%8.*g %.*g\n",
GetMagickPrecision(),sqrt((double)Q)*r_scale,
GetMagickPrecision(),resample_filter->filter_lut[Q] );
printf("\n\n"); /* generate a 'break' in gnuplot if multiple outputs */
}
/* Output the above once only for each image, and each setting
(void) DeleteImageArtifact(resample_filter->image,"resample:verbose");
*/
}
#endif /* FILTER_LUT */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r I n t e r p o l a t e M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilterInterpolateMethod() sets the resample filter interpolation
% method.
%
% The format of the SetResampleFilterInterpolateMethod method is:
%
% MagickBooleanType SetResampleFilterInterpolateMethod(
% ResampleFilter *resample_filter,const InterpolateMethod method)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o method: the interpolation method.
%
*/
MagickExport MagickBooleanType SetResampleFilterInterpolateMethod(
ResampleFilter *resample_filter,const InterpolatePixelMethod method)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->interpolate=method;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilterVirtualPixelMethod() changes the virtual pixel method
% associated with the specified resample filter.
%
% The format of the SetResampleFilterVirtualPixelMethod method is:
%
% MagickBooleanType SetResampleFilterVirtualPixelMethod(
% ResampleFilter *resample_filter,const VirtualPixelMethod method)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o method: the virtual pixel method.
%
*/
MagickExport MagickBooleanType SetResampleFilterVirtualPixelMethod(
ResampleFilter *resample_filter,const VirtualPixelMethod method)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->virtual_pixel=method;
if (method != UndefinedVirtualPixelMethod)
(void) SetCacheViewVirtualPixelMethod(resample_filter->view,method);
return(MagickTrue);
}
|
graph_io_block.h | // Copyright 2016, National University of Defense Technology
// Authors: Xuhao Chen <cxh@illinois.edu> and Pingfan Li <lipingfan@163.com>
#include <vector>
#include <set>
#include <iostream>
#include <fstream>
#include <sstream>
#include <string.h>
#include <algorithm>
//#include <iomanip>
typedef float WeightT;
struct Edge {
int dst;
WeightT wt;
};
//struct Block {
// int block_col;
// int block_row;
//};
bool compare_id(Edge a, Edge b) { return (a.dst < b.dst); }
void fill_data_block(int m, int &nnz, int *&row_offsets, int *&column_indices, WeightT *&weight, vector<vector<Edge> > vertices, int *&block, int *&block_row, int *& row_start, int &num_block_all, bool symmetrize, bool sorted, bool remove_selfloops, bool remove_redundents) {
int height=2;
int width=2;
printf("going to fill data block1\n");
//sort the neighbor list
if(sorted) {
printf("Sorting the neighbor lists...");
for(int i = 0; i < m; i++) {
std::sort(vertices[i].begin(), vertices[i].end(), compare_id);
}
printf(" Done\n");
}
//remove self loops
int num_selfloops = 0;
if(remove_selfloops) {
printf("Removing self loops...");
for(int i = 0; i < m; i++) {
for(unsigned j = 0; j < vertices[i].size(); j ++) {
if(i == vertices[i][j].dst) {
vertices[i].erase(vertices[i].begin()+j);
num_selfloops ++;
j --;
}
}
}
printf(" %d selfloops are removed\n", num_selfloops);
}
// remove redundent
int num_redundents = 0;
if(remove_redundents) {
printf("Removing redundent edges...");
for (int i = 0; i < m; i++) {
for (unsigned j = 1; j < vertices[i].size(); j ++) {
if (vertices[i][j].dst == vertices[i][j-1].dst) {
vertices[i].erase(vertices[i].begin()+j);
num_redundents ++;
j --;
}
}
}
printf(" %d redundent edges are removed\n", num_redundents);
}
/*
// print some neighbor lists
for (int i = 0; i < 3; i++) {
cout << "src " << i << ": ";
for (int j = 0; j < vertices[i].size(); j ++)
cout << vertices[i][j].dst << " ";
cout << endl;
}
*/
row_offsets = (int *)malloc((m + height) * sizeof(int));
int count = 0;
for (int i = 0; i < m; i++) {
row_offsets[i] = count;
count += vertices[i].size();
}
row_offsets[m] = count;
if (symmetrize) {
if(count != nnz) {
nnz = count;
}
} else {
if (count + num_selfloops + num_redundents != nnz)
printf("Error reading graph, number of edges in edge list %d != %d\n", count, nnz);
nnz = count;
}
printf("num_vertices %d num_edges %d\n", m, nnz);
/*
double avgdeg;
double variance = 0.0;
int maxdeg = 0;
int mindeg = m;
avgdeg = (double)nnz / m;
for (int i = 0; i < m; i++) {
int deg_i = row_offsets[i + 1] - row_offsets[i];
if (deg_i > maxdeg)
maxdeg = deg_i;
if (deg_i < mindeg)
mindeg = deg_i;
variance += (deg_i - avgdeg) * (deg_i - avgdeg) / m;
}
printf("min_degree %d max_degree %d avg_degree %.2f variance %.2f\n", mindeg, maxdeg, avgdeg, variance);
*/
column_indices = (int *)malloc(count * sizeof(int));
weight = (WeightT *)malloc(count * sizeof(WeightT));
vector<Edge>::iterator neighbor_list;
for (int i = 0, index = 0; i < m; i++) {
neighbor_list = vertices[i].begin();
while (neighbor_list != vertices[i].end()) {
column_indices[index] = (*neighbor_list).dst;
weight[index] = (*neighbor_list).wt;
index ++;
neighbor_list ++;
}
}
printf("going to fill data block2\n");
//zy second version, not test yet
int num_rows = m;
printf("Error 0 !!\n");
// int num_block[num_rows/height];
int *num_block = NULL;
num_block = (int *)malloc(num_rows/height * sizeof(int));
//printf("Error 1 !!\n");
//vector<int> block_column[num_rows/height];
vector<vector<int> > block_column(num_rows/height);
//block.resize(50);
vector<int> v_block;
vector<int> v_block_row;
vector<int> v_row_start;
//vector<Block> block_all;
//int num_block_all;
//printf("Error 2 !!\n");
num_block_all = 0;
v_row_start.push_back(num_block_all);
//printf("Error 3 !!\n");
for(int i = 0; i < num_rows/height; i ++){
num_block[i] = row_offsets[2*i+height] - row_offsets[2*i];
// printf("num_block_1: %d\n", num_block[i]);
for(int offset = row_offsets[2*i]; offset < row_offsets[2*i+height]; offset ++){
//printf("Error 4 !!\n");
block_column[i].push_back(column_indices[offset]/width);
//printf("Error 5 !!\n");
for(int offset_1 = row_offsets[2*i]; offset_1 < offset; offset_1++){
if((column_indices[offset]/width) == (column_indices[offset_1]/width)){
//printf("Error 6 !!\n");
block_column[i].pop_back();
num_block[i]--;
//printf("Error 7 !!\n");
break;
}
}
// printf("going to fill data block2.5\n");
}
//for(int j = 0; j < 26; j ++)
// printf("column: %d\n", column_indices[j]);
//for(int j = 0; j < 10; j ++)
// printf("block_column: %d\n", block_column[i][j]);
//printf("num_block: %d\n", num_block[i]);
//break;
// printf("num_block_2: %d\n", num_block[i]);
num_block_all += num_block[i];
//printf("Error 8 !!\n");
// printf("%d\n", block_column[i].size());
if(!block_column[i].empty()){
//printf("Error 8.5 !!\n");
v_block.insert(v_block.end(), block_column[i].begin(), block_column[i].end());
}
if(v_block.size() != num_block_all)printf("Error wrong block num\n");
//printf("Error 9 !!\n");
//block_all[i].block_col = block_column[i];
if(num_block[i] != 0){
for(int j = 0; j < num_block[i]; j ++)
v_block_row.push_back(i);
}
// for(int j = 0; j < num_block[i]; j ++)
v_row_start.push_back(num_block_all);
if(v_block.size() != v_block_row.size())printf("Error wrong block row size\n");
// block_all[i].block_row = i;
//Block tmp;
//tmp.block_col = block_column[i].block_col;
//tmp.block_row = i;
//block_all.insert(block_all.end, );
//block_all[i].block_col = block_column[i].block_col;
//block_all.block_row.push_back(i);
//block_all.push_back(tmp);
}
for(int j = 0; j < 20; j ++)
printf("block_column: %d\n", v_block[j]);
//column indices of the block (after /2)
block = (int *)malloc(v_block.size() * sizeof(int));
for(unsigned int j = 0; j < v_block.size(); j ++)
block[j] = v_block[j];
//row number of the block (after /2)
block_row = (int *)malloc(v_block_row.size() * sizeof(int));
for(unsigned int j = 0; j < v_block_row.size(); j ++)
block_row[j] = v_block_row[j];
row_start = (int *)malloc(v_row_start.size() * sizeof(int));
//row_start = &(v_row_start[0]);
for(unsigned int j = 0; j < v_row_start.size(); j ++)
row_start[j] = v_row_start[j];
printf("going to fill data block3\n");
printf("num_block_all_1: %d\n", num_block_all);
for(int j = 0; j < 4; j ++){
printf("block_row: %d \n", block_row[j]);
}
//for(int j = 0; j < 12; j ++){
// printf("column indices: %d \n", column_indices[j]);
//}
for(int j = 0; j < 40; j ++){
printf("block: %d \n", block[j]);
}
free(num_block);
//zy fist version, not efficient, not test right or wrong
/* vector<int> row_offsets_block;
vector<int> column_block;
row_offsets_block[0] = 0;
for (int i = 1; i < (m+1)/2; i++)
row_offsets_block[i] = row_offsets_block[i-1] + row_offsets[2*i] - row_offsets[2*(i-1)];
for (int i = 0; i < (m+1)/2; i++) {
num = row_offsets[2*i+1] - row_offsets[2*i];
if(num > 0){
for (int offset = row_offsets[2*i]; offset < row_offsets[2*i+1]-1; offset++){
if(column[offset]/2 == column[offset+1]/2){
row_offsets_block[i+1]--;
column_block.push_back(column[offset]/2);
offset ++;
}
else column_block.push_back(column[offset]/2);
}
for (int offset = row_offsets[2*i+1]; offset < row_offsets[2*i+2]-1; offset++){
if(column[offset]/2 == column[offset+1]/2){
row_offsets_block[i+1]--;
column_block.push_back(column[offset]/2);
offset ++;
}
else column_block.push_back(column[offset]/2);
for (int offset_1 = row_offsets[2*i]; offset_1 < row_offsets[2*i+1]-1; offset_1++){
if(column[offset]/2 == column[offset_1]/2){
row_offsets_block[i+1]--;
column_block.erase(column[offset]/2);
}
}
}
}
row_offsets_block.push_back(row_offsets[i]);
if((i > 0) && (row_offsets[i] == row_offsets[i-1]))
row_offsets_block.erase(row_offsets[i]);
if((i%2) != 0)
column_indices[m]
}
*/
/*
// print some neighbor lists
for (int i = 0; i < 6; i++) {
int row_begin = row_offsets[i];
int row_end = row_offsets[i + 1];
cout << "src " << i << ": ";
for (int j = row_begin; j < row_end; j ++)
cout << column_indices[j] << " ";
cout << endl;
}
//*/
//for (int i = 0; i < 10; i++) cout << weight[i] << ", ";
//cout << endl;
}
void fill_data(int m, int &nnz, int *&row_offsets, int *&column_indices, WeightT *&weight, vector<vector<Edge> > vertices, bool symmetrize, bool sorted, bool remove_selfloops, bool remove_redundents) {
//sort the neighbor list
if(sorted) {
printf("Sorting the neighbor lists...");
for(int i = 0; i < m; i++) {
std::sort(vertices[i].begin(), vertices[i].end(), compare_id);
}
printf(" Done\n");
}
//remove self loops
int num_selfloops = 0;
if(remove_selfloops) {
printf("Removing self loops...");
for(int i = 0; i < m; i++) {
for(unsigned j = 0; j < vertices[i].size(); j ++) {
if(i == vertices[i][j].dst) {
vertices[i].erase(vertices[i].begin()+j);
num_selfloops ++;
j --;
}
}
}
printf(" %d selfloops are removed\n", num_selfloops);
}
// remove redundent
int num_redundents = 0;
if(remove_redundents) {
printf("Removing redundent edges...");
for (int i = 0; i < m; i++) {
for (unsigned j = 1; j < vertices[i].size(); j ++) {
if (vertices[i][j].dst == vertices[i][j-1].dst) {
vertices[i].erase(vertices[i].begin()+j);
num_redundents ++;
j --;
}
}
}
printf(" %d redundent edges are removed\n", num_redundents);
}
/*
// print some neighbor lists
for (int i = 0; i < 3; i++) {
cout << "src " << i << ": ";
for (int j = 0; j < vertices[i].size(); j ++)
cout << vertices[i][j].dst << " ";
cout << endl;
}
*/
row_offsets = (int *)malloc((m + 1) * sizeof(int));
int count = 0;
for (int i = 0; i < m; i++) {
row_offsets[i] = count;
count += vertices[i].size();
}
row_offsets[m] = count;
if (symmetrize) {
if(count != nnz) {
nnz = count;
}
} else {
if (count + num_selfloops + num_redundents != nnz)
printf("Error reading graph, number of edges in edge list %d != %d\n", count, nnz);
nnz = count;
}
printf("num_vertices %d num_edges %d\n", m, nnz);
/*
double avgdeg;
double variance = 0.0;
int maxdeg = 0;
int mindeg = m;
avgdeg = (double)nnz / m;
for (int i = 0; i < m; i++) {
int deg_i = row_offsets[i + 1] - row_offsets[i];
if (deg_i > maxdeg)
maxdeg = deg_i;
if (deg_i < mindeg)
mindeg = deg_i;
variance += (deg_i - avgdeg) * (deg_i - avgdeg) / m;
}
printf("min_degree %d max_degree %d avg_degree %.2f variance %.2f\n", mindeg, maxdeg, avgdeg, variance);
*/
column_indices = (int *)malloc(count * sizeof(int));
weight = (WeightT *)malloc(count * sizeof(WeightT));
vector<Edge>::iterator neighbor_list;
for (int i = 0, index = 0; i < m; i++) {
neighbor_list = vertices[i].begin();
while (neighbor_list != vertices[i].end()) {
column_indices[index] = (*neighbor_list).dst;
weight[index] = (*neighbor_list).wt;
index ++;
neighbor_list ++;
}
}
/*
// print some neighbor lists
for (int i = 0; i < 6; i++) {
int row_begin = row_offsets[i];
int row_end = row_offsets[i + 1];
cout << "src " << i << ": ";
for (int j = row_begin; j < row_end; j ++)
cout << column_indices[j] << " ";
cout << endl;
}
//*/
//for (int i = 0; i < 10; i++) cout << weight[i] << ", ";
//cout << endl;
}
// transfer R-MAT generated gr graph to CSR format
void gr2csr(char *gr, int &m, int &nnz, int *&row_offsets, int *&column_indices, WeightT *&weight, bool symmetrize, bool transpose, bool sorted, bool remove_selfloops, bool remove_redundents) {
printf("Reading RMAT (.gr) input file %s\n", gr);
std::ifstream cfile;
cfile.open(gr);
std::string str;
getline(cfile, str);
char c;
sscanf(str.c_str(), "%c", &c);
while (c == 'c') {
getline(cfile, str);
sscanf(str.c_str(), "%c", &c);
}
char sp[3];
sscanf(str.c_str(), "%c %s %d %d", &c, sp, &m, &nnz);
vector<vector<Edge> > vertices;
vector<Edge> neighbors;
for (int i = 0; i < m; i++)
vertices.push_back(neighbors);
int src, dst;
for (int i = 0; i < nnz; i++) {
getline(cfile, str);
sscanf(str.c_str(), "%c %d %d", &c, &src, &dst);
if (c != 'a')
printf("line %d\n", __LINE__);
src--;
dst--;
Edge e1, e2;
if(symmetrize) {
e2.dst = src; e2.wt = 1;
vertices[dst].push_back(e2);
transpose = false;
}
if(!transpose) {
e1.dst = dst; e1.wt = 1;
vertices[src].push_back(e1);
} else {
e1.dst = src; e1.wt = 1;
vertices[dst].push_back(e1);
}
}
fill_data(m, nnz, row_offsets, column_indices, weight, vertices, symmetrize, sorted, remove_selfloops, remove_redundents);
}
// transfer *.graph file to CSR format
void graph2csr(char *graph, int &m, int &nnz, int *&row_offsets, int *&column_indices, WeightT *&weight, bool symmetrize, bool transpose, bool sorted, bool remove_selfloops, bool remove_redundents) {
printf("Reading .graph input file %s\n", graph);
std::ifstream cfile;
cfile.open(graph);
std::string str;
getline(cfile, str);
sscanf(str.c_str(), "%d %d", &m, &nnz);
vector<vector<Edge> > vertices;
vector<Edge> neighbors;
for (int i = 0; i < m; i++)
vertices.push_back(neighbors);
int dst;
for (int src = 0; src < m; src ++) {
getline(cfile, str);
istringstream istr;
istr.str(str);
while(istr>>dst) {
dst --;
Edge e1, e2;
if(symmetrize) {
e2.dst = src; e2.wt = 1;
vertices[dst].push_back(e2);
transpose = false;
}
if(!transpose) {
e1.dst = dst; e1.wt = 1;
vertices[src].push_back(e1);
} else {
e1.dst = src; e1.wt = 1;
vertices[dst].push_back(e1);
}
}
istr.clear();
}
cfile.close();
fill_data(m, nnz, row_offsets, column_indices, weight, vertices, symmetrize, sorted, remove_selfloops, remove_redundents);
}
// transfer mtx graph to CSR format
void mtx2csr(char *mtx, int &m, int &n, int &nnz, int *&row_offsets, int *&column_indices, WeightT *&weight, int *&block, int *&block_row, int *&row_start, int &num_block_all, bool symmetrize, bool transpose, bool sorted, bool remove_selfloops, bool remove_redundents) {
printf("Reading (.mtx) input file %s\n", mtx);
std::ifstream cfile;
cfile.open(mtx);
std::string str;
getline(cfile, str);
char c;
sscanf(str.c_str(), "%c", &c);
while (c == '%') {
getline(cfile, str);
sscanf(str.c_str(), "%c", &c);
}
sscanf(str.c_str(), "%d %d %d", &m, &n, &nnz);
if (m != n) {
printf("Warning, m(%d) != n(%d)\n", m, n);
}
vector<vector<Edge> > vertices;
vector<Edge> neighbors;
for (int i = 0; i < m; i ++)
vertices.push_back(neighbors);
int dst, src;
WeightT wt = 1.0f;
for (int i = 0; i < nnz; i ++) {
getline(cfile, str);
int num = sscanf(str.c_str(), "%d %d %f", &src, &dst, &wt);
if (num == 2) wt = 1;
if (wt < 0) wt = -wt; // non-negtive weight
src--;
dst--;
Edge e1, e2;
if(symmetrize && src != dst) {
e2.dst = src; e2.wt = wt;
vertices[dst].push_back(e2);
transpose = false;
}
if(!transpose) {
e1.dst = dst; e1.wt = wt;
vertices[src].push_back(e1);
} else {
e1.dst = src; e1.wt = wt;
vertices[dst].push_back(e1);
}
}
cfile.close();
printf("going to fill data block\n");
fill_data_block(m, nnz, row_offsets, column_indices, weight, vertices, block, block_row, row_start, num_block_all, symmetrize, sorted, remove_selfloops, remove_redundents);
printf("num_block_all: %d\n", num_block_all);
}
/*
void sort_neighbors(int m, int *row_offsets, int *&column_indices) {
vector<int> neighbors;
#pragma omp parallel for
for(int i = 0; i < m; i++) {
int row_begin = row_offsets[i];
int row_end = row_offsets[i + 1];
for (int offset = row_begin; offset < row_end; ++ offset) {
neighbors.push_back(column_indices[offset]);
}
std::sort(neighbors.begin(), neighbors.end());
int k = 0;
for (int offset = row_begin; offset < row_end; ++ offset) {
column_indices[offset] = neighbors[k++];
}
}
}
*/
void read_graph(int argc, char *argv[], int &m, int &n, int &nnz, int *&row_offsets, int *&column_indices, int *°ree, WeightT *&weight, int *&block, int *&block_row, int *&row_start, int &num_block_all, bool is_symmetrize=false, bool is_transpose=false, bool sorted=true, bool remove_selfloops=true, bool remove_redundents=true) {
//if(is_symmetrize) printf("Requiring symmetric graphs for this algorithm\n");
if (strstr(argv[1], ".mtx"))
mtx2csr(argv[1], m, n, nnz, row_offsets, column_indices, weight, block, block_row, row_start, num_block_all, is_symmetrize, is_transpose, sorted, remove_selfloops, remove_redundents);
else if (strstr(argv[1], ".graph"))
graph2csr(argv[1], m, nnz, row_offsets, column_indices, weight, is_symmetrize, is_transpose, sorted, remove_selfloops, remove_redundents);
else if (strstr(argv[1], ".gr"))
gr2csr(argv[1], m, nnz, row_offsets, column_indices, weight, is_symmetrize, is_transpose, sorted, remove_selfloops, remove_redundents);
else { printf("Unrecognizable input file format\n"); exit(0); }
printf("Calculating degree...");
degree = (int *)malloc(m * sizeof(int));
for (int i = 0; i < m; i++) {
degree[i] = row_offsets[i + 1] - row_offsets[i];
}
printf(" Done\n");
for(int j = 0; j < 4; j ++){
printf("block_row: %d \n", block_row[j]);
}
// for(int j = 50; j < 55; j ++){
// printf("block: %d \n", block[j]);
// }
}
|
ScanSky_OpenMP.c | /*
* Contar cuerpos celestes
*
* Asignatura Computación Paralela (Grado Ingeniería Informática)
* Código secuencial base
*
* @author Ana Moretón Fernández
* @version v1.2
*
* (c) 2017, Grupo Trasgo, Universidad de Valladolid
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <time.h>
#include "cputils.h"
#include <omp.h>
/* Substituir min por el operador */
#define min(x,y) ((x) < (y)? (x) : (y))
/**
* Funcion secuencial para la busqueda de mi bloque
*/
int computation(int x, int y, int columns, int* matrixData, int *matrixResult/*, int *matrixResultCopy*/){
// Inicialmente cojo mi indice
int result=matrixResult[x*columns+y];
if( result!= -1){
//Si es de mi mismo grupo, entonces actualizo
if(matrixData[(x-1)*columns+y] == matrixData[x*columns+y])
{
result = min (result, matrixResult[(x-1)*columns+y]);
}
if(matrixData[(x+1)*columns+y] == matrixData[x*columns+y])
{
result = min (result, matrixResult[(x+1)*columns+y]);
}
if(matrixData[x*columns+y-1] == matrixData[x*columns+y])
{
result = min (result, matrixResult[x*columns+y-1]);
}
if(matrixData[x*columns+y+1] == matrixData[x*columns+y])
{
result = min (result, matrixResult[x*columns+y+1]);
}
// Si el indice no ha cambiado retorna 0
if(matrixResult[x*columns+y] == result){ return 0; }
// Si el indice cambia, actualizo matrix de resultados con el indice adecuado y retorno 1
else { matrixResult[x*columns+y]=result; return 1;}
}
return 0;
}
/**
* Funcion principal
*/
int main (int argc, char* argv[])
{
/* 1. Leer argumento y declaraciones */
if (argc < 2) {
printf("Uso: %s <imagen_a_procesar>\n", argv[0]);
return(EXIT_SUCCESS);
}
char* image_filename = argv[1];
int rows=-1;
int columns =-1;
int *matrixData=NULL;
int *matrixResult=NULL;
int *matrixResultCopy=NULL;
int numBlocks=-1;
/* 2. Leer Fichero de entrada e inicializar datos */
/* 2.1 Abrir fichero */
FILE *f = cp_abrir_fichero(image_filename);
// Compruebo que no ha habido errores
if (f==NULL)
{
perror ("Error al abrir fichero.txt");
return -1;
}
/* 2.2 Leo valores del fichero */
int i,j,valor;
fscanf (f, "%d\n", &rows);
fscanf (f, "%d\n", &columns);
// Añado dos filas y dos columnas mas para los bordes
rows=rows+2;
columns = columns+2;
/* 2.3 Reservo la memoria necesaria para la matriz de datos */
matrixData= (int *)malloc( rows*(columns) * sizeof(int) );
if ( (matrixData == NULL) ) {
perror ("Error reservando memoria");
return -1;
}
/* 2.4 Inicializo matrices */
for(i=0;i< rows; i++){
for(j=0;j< columns; j++){
matrixData[i*(columns)+j]=-1;
}
}
/* 2.5 Relleno bordes de la matriz */
for(i=1;i<rows-1;i++){
matrixData[i*(columns)+0]=0;
matrixData[i*(columns)+columns-1]=0;
}
for(i=1;i<columns-1;i++){
matrixData[0*(columns)+i]=0;
matrixData[(columns-1)*(columns)+i]=0;
}
/* 2.6 Relleno la matriz con los datos del fichero */
for(i=1;i<rows-1;i++){
for(j=1;j<columns-1;j++){
fscanf (f, "%d\n", &matrixData[i*(columns)+j]);
}
}
fclose(f);
#ifdef WRITE
printf("Inicializacion \n");
for(i=0;i<rows;i++){
for(j=0;j<columns;j++){
printf ("%d\t", matrixData[i*(columns)+j]);
}
printf("\n");
}
#endif
/* PUNTO DE INICIO MEDIDA DE TIEMPO */
double t_ini = cp_Wtime();
//
// EL CODIGO A PARALELIZAR COMIENZA AQUI
//
/* 3. Etiquetado inicial */
matrixResult= (int *)malloc( (rows)*(columns) * sizeof(int) );
matrixResultCopy= (int *)malloc( (rows)*(columns) * sizeof(int) );
if ( (matrixResult == NULL) || (matrixResultCopy == NULL) ) {
perror ("Error reservando memoria");
return -1;
}
#pragma omp parallel for shared(matrixResult, matrixData) private(i, j)
for(i=0;i< rows; i++){
for(j=0;j< columns; j++){
matrixResult[i*(columns)+j]=-1;
// Si es 0 se trata del fondo y no lo computamos
if(matrixData[i*(columns)+j]!=0){
matrixResult[i*(columns)+j]=i*(columns)+j;
}
}
}
/* 4. Computacion */
int t=0;
/* 4.1 Flag para ver si ha habido cambios y si se continua la ejecucion */
int flagCambio=1;
/* 4.2 Busqueda de los bloques similiares */
for(t=0; flagCambio!=0; t++){
flagCambio=0;
/* 4.2.1 Actualizacion copia */
#pragma omp parallel for shared (matrixResult, matrixResultCopy) private(i,j)
for(i=1;i<rows-1;i++){
for(j=1;j<columns-1;j++){
//if(matrixResult[i*(columns)+j]!=-1){
matrixResultCopy[i*(columns)+j]=matrixResult[i*(columns)+j];
//}
}
}
/* 4.2.2 Computo y detecto si ha habido cambios */
#pragma omp parallel for shared (matrixData, matrixResult/*, matrixResultCopy*/) private(i,j) reduction(+:flagCambio)
for(i=1;i<rows-1;i++){
for(j=1;j<columns-1;j++){
flagCambio= flagCambio+ computation(i,j,columns, matrixData, matrixResult/*, matrixResultCopy*/);
}
}
#ifdef DEBUG
printf("\nResultados iter %d: \n", t);
for(i=0;i<rows;i++){
for(j=0;j<columns;j++){
printf ("%d\t", matrixResult[i*columns+j]);
}
printf("\n");
}
#endif
}
/* 4.3 Inicio cuenta del numero de bloques */
numBlocks=0;
#pragma omp parallel for shared(matrixResult) private(i, j) reduction(+:numBlocks)
for(i=1;i<rows-1;i++){
for(j=1;j<columns-1;j++){
if(matrixResult[i*columns+j] == i*columns+j)
numBlocks++;
}
}
//
// EL CODIGO A PARALELIZAR TERMINA AQUI
//
/* PUNTO DE FINAL DE MEDIDA DE TIEMPO */
double t_fin = cp_Wtime();
/* 5. Comprobación de resultados */
double t_total = (double)(t_fin - t_ini);
printf("Result: %d\n", numBlocks);
printf("Time: %lf\n", t_total);
#ifdef WRITE
printf("Resultado: \n");
for(i=0;i<rows;i++){
for(j=0;j<columns;j++){
printf ("%d\t", matrixResult[i*columns+j]);
}
printf("\n");
}
#endif
/* 6. Liberacion de memoria */
free(matrixData);
free(matrixResult);
free(matrixResultCopy);
}
|
openmp-ex31.c | /* Of course, rand_r() is thread safe */
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
void safe_one (unsigned int *seed)
{
int random = rand_r(seed);
printf("This function does something thread safe, like calculating %d from rand_r.\n",random);
}
void safe_two (unsigned int *seed)
{
int random = rand_r(seed);
safe_one(seed);
printf("This function calculates another random number %d\n",random);
}
int main(void)
{
#pragma omp parallel
{
unsigned int seed = omp_get_thread_num();
safe_two(&seed);
}
return 0;
}
|
valid.res7.src.h | #pragma once
#include "ukr.h"
#include "omp.h"
#include "transpose.h"
#include "gen_ukr_A6B2gemm_1_256_14_14_128_3_3.h"
#include "gen_ukr_A4B2gemm_1_256_14_14_128_3_3.h"
void testrun(float* A ,float*B, float*C, float*oriB ){
int tid = omp_get_thread_num();
int Nx = 14;
int Ny = 14;
int Nh = 3;
long long Astrides[6] = {0,2,4,6,8,10};
int b1 = 0;
for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){
for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){
transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16);
transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16);
}
}
#pragma omp barrier// begin push button generated block
for(int c5=0;c5<128+0;c5+=128)
{
for(int f5=0;f5<256+0;f5+=256)
{
for(int xy5=0;xy5<196+0;xy5+=196)
{
for(int c4=c5;c4<min(128, 128+c5);c4+=128)
{
for(int f4=f5;f4<min(256, 256+f5);f4+=Tf2)
{
for(int xy4=xy5;xy4<min(196, 196+xy5);xy4+=196)
{
for(int c3=c4;c3<min(128, 128+c4);c3+=Tc1)
{
for(int xy3=xy4;xy3<min(196, 196+xy4);xy3+=Txy3)
{
for(int f3=f4;f3<min(256, Tf2+f4);f3+=Tf2)
{
for(int xy2=xy3;xy2<min(196, Txy3+xy3);xy2+=6)
{
for(int f2=f3;f2<min(256, Tf2+f3);f2+=16)
{
for(int c2=c3;c2<min(128, Tc1+c3);c2+=Tc1)
{
for(int c1=c2;c1<min(128, Tc1+c2);c1+=Tc1)
{
for(int xy1=xy2;xy1<min(196, 6+xy2);xy1+=6)
{
for(int f1=f2;f1<min(256, 16+f2);f1+=16)
{
int ctile=min(Tc1, 128-c1);
int x1=xy1/14;
int y1=xy1%14/1;
int c1_1=c1/1;
int c1_2=c1%1/1;
int kf1_1=f1/16;
int kf1_2=f1%16/1;
int of1_1=f1/1;
int of1_2=f1%1/1;
int offsetA=0+b1*115200+c1_1*900+2*x1*30+2*y1*1+c1_2*1;
int offsetB=0+kf1_1*18432+c1*144+0*48+0*16+kf1_2*1;
int offsetC=0+b1*50176+of1_1*196+x1*14+y1*1+of1_2*1;
if(14-y1>=6){
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
else if(14*14-xy1>=6){
for(int sti=14-y1;sti<6;sti+=1)
{
Astrides[sti]+=32;
}
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
for(int sti=14-y1;sti<6;sti+=1)
{
Astrides[sti]-=32;
}
}
else{
cnn_ukr_float_scatter_4x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
// end push button generated block
} |
mlp_openmp.c | /**
* @file app.c
* @brief Template for a Host Application Source File.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <unistd.h>
#include <getopt.h>
#include <assert.h>
#include <stdint.h>
#include <string.h>
#include "../../support/timer.h"
#include "../../support/common.h"
#include <sys/mman.h>
#include <omp.h>
T** A;
T* B;
T* B2;
T* B3;
T* C;
// Create input arrays
static void init_data(T** A, T* B, unsigned int m_size, unsigned int n_size){
for (unsigned int l = 0; l < NUM_LAYERS; l++)
for (unsigned int i = 0; i < m_size * n_size; i++){
if(i % 100 < 98){
A[l][i] = 0;
}else{
A[l][i] = (l+i) % 2;
}
}
for (unsigned int i = 0; i < n_size; i++){
if(i % 50 < 48){
B[i] = 0;
}
else{
B[i] = i % 2;
}
}
}
// Compute output in the host
static void mlp_host(T* C, T** A, T* B, unsigned int m_size, unsigned int n_size, int t) {
for (unsigned int nl = 0; nl < NUM_LAYERS; nl++){
for (unsigned int m = 0; m < m_size; m++){
C[m] = 0;
}
omp_set_num_threads(t);
#pragma omp parallel for
for (unsigned int m = 0; m < m_size; m++){
for (unsigned int n = 0; n < n_size; n++){
C[m] += A[nl][m * n_size + n] * B[n];
}
C[m] = max(0, C[m]);
}
for (unsigned int n = 0; n < n_size; n++){
B[n] = C[n];
}
}
}
// Compute output in the hwacha
static void mlp_hwacha(T* C, T** A, T* B, unsigned int m_size, unsigned int n_size) {
for (unsigned int nl = 0; nl < NUM_LAYERS; nl++){
for (unsigned int m = 0; m < m_size; m++){
C[m] = 0;
}
for (unsigned int m = 0; m < m_size; m++) {
T * result = (T *)calloc(n_size, sizeof(T));
int size = vec_vvmlp_asm(n_size, result, &A[nl][m * n_size], B);
T count_hwacha = 0;
for (int j = 0; j < size; j++) {
count_hwacha += result[j];
}
C[m] = max(0, count_hwacha);
free(result);
}
vec_vvcopy_asm(n_size, B, C);
}
}
static uint64_t mlp_host_sum(uint64_t n_size, uint64_t m_size) {
uint64_t sum = 0;
for (uint64_t m = 0; m < n_size; m++){
sum += B[m];
}
return sum;
}
// Params ---------------------------------------------------------------------
typedef struct Params {
char* dpu_type;
int nr_of_ranks;
int input_size_n;
int input_size_m;
int n_warmup;
int n_reps;
}Params;
void usage() {
fprintf(stderr,
"\nUsage: ./program [options]"
"\n"
"\nGeneral options:"
"\n -h help"
"\n -d <D> DPU type (default=fsim)"
"\n -r <R> # of ranks (default=2)"
"\n"
"\nBenchmark-specific options:"
"\n -i <I> input size (default=8M elements)"
"\n");
}
struct Params input_params(int argc, char **argv) {
struct Params p;
p.dpu_type = "fsim";
p.nr_of_ranks = 1;
p.input_size_n = 1 << 9;
p.input_size_m = 1 << 9;
p.n_warmup = 2;
p.n_reps = 3;
int opt;
while((opt = getopt(argc, argv, "hd:r:i:")) >= 0) {
switch(opt) {
case 'h':printf("\n");
usage();
exit(0);
break;
case 'd': p.dpu_type = optarg; break;
case 'r': p.nr_of_ranks = atoi(optarg); break;
case 'n': p.input_size_n = atoi(optarg); break;
case 'm': p.input_size_m = atoi(optarg); break;
default:
fprintf(stderr, "\nUnrecognized option!\n");
usage();
exit(0);
}
}
assert(p.nr_of_ranks > 0 && "Invalid # of ranks!");
return p;
}
/**
* @brief Main of the Host Application.
*/
int main(int argc, char **argv) {
if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
perror("mlockall failed:");
return 0;
}
struct Params p = input_params(argc, argv);
uint64_t n_size = 8192;
uint64_t m_size = 20480;
//uint64_t n_size = 20000;
//uint64_t m_size = 20000;
Timer timer;
A = malloc(NUM_LAYERS * sizeof(T*));
for(int l = 0; l < NUM_LAYERS; l++)
A[l] = malloc(n_size*m_size*sizeof(unsigned int));
B = malloc(m_size*sizeof(unsigned int));
C = malloc(m_size*sizeof(unsigned int));
// Create an input file with arbitrary data.
init_data(A, B, m_size, n_size);
start(&timer, 0, 0);
mlp_host(C, A, B, n_size, m_size, 1);
stop(&timer, 0);
free(A);
free(C);
A = malloc(NUM_LAYERS * sizeof(T*));
for(int l = 0; l < NUM_LAYERS; l++)
A[l] = malloc(n_size*m_size*sizeof(unsigned int));
B2 = malloc(m_size*sizeof(unsigned int));
C = malloc(m_size*sizeof(unsigned int));
// Create an input file with arbitrary data.
init_data(A, B2, m_size, n_size);
start(&timer, 1, 0);
mlp_hwacha(C, A, B2, n_size, m_size);
stop(&timer, 1);
free(A);
free(C);
A = malloc(NUM_LAYERS * sizeof(T*));
for(int l = 0; l < NUM_LAYERS; l++)
A[l] = malloc(n_size*m_size*sizeof(unsigned int));
B3 = malloc(m_size*sizeof(unsigned int));
C = malloc(m_size*sizeof(unsigned int));
// Create an input file with arbitrary data.
init_data(A, B3, m_size, n_size);
start(&timer, 2, 0);
mlp_host(C, A, B3, n_size, m_size, 4);
stop(&timer, 2);
bool correct = true;
bool hwacha_wrong = false;
for (int i = 0; i < n_size; i++) {
if (B[i] != B2[i] || B[i] != B3[i]) {
correct = false;
if (B[i] != B2[i]) hwacha_wrong = true;
break;
}
}
printf("******************************\n");
if (correct) {
printf("Hwacha works correctly.\n");
} else if (hwacha_wrong) {
printf("\n*** Hwacha outputs wrong result! ***\n");
printf("<<< Results >>>\n");
for (int i = 0; i < n_size; i++) {
printf("%d : %d %d\n", i, B[i], B2[i]);
}
printf("\n");
} else {
printf("Hwacha works well, but multi-threading doesn't.\n");
}
printf("******************************\n");
printf("CPU ");
print(&timer, 0, 1);
printf("\n");
printf("Hwacha ");
print(&timer, 1, 1);
printf("\n");
printf("4 threads ");
print(&timer, 2, 1);
printf("\n");
for(int l = 0; l < NUM_LAYERS; l++)
free(A[l]);
free(A);
free(B);
free(B2);
free(B3);
free(C);
return 0;
}
|
GB_unaryop__identity_uint64_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint64_int64
// op(A') function: GB_tran__identity_uint64_int64
// C type: uint64_t
// A type: int64_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
uint64_t z = (uint64_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint64_int64
(
uint64_t *Cx, // Cx and Ax may be aliased
int64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint64_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
test_nvector_openmpdev.c | /* -----------------------------------------------------------------
* Programmer(s): David J. Gardner @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2020, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* This is the testing routine to check the OpenMP 4.5 NVECTOR
* module implementation.
* -----------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <sundials/sundials_types.h>
#include <nvector/nvector_openmpdev.h>
#include <sundials/sundials_math.h>
#include "test_nvector.h"
#include <omp.h>
/* OpenMPDEV vector specific tests */
int Test_N_VMake_OpenMPDEV(N_Vector X, sunindextype length, int myid);
/* ----------------------------------------------------------------------
* Main NVector Testing Routine
* --------------------------------------------------------------------*/
int main(int argc, char *argv[])
{
int fails = 0; /* counter for test failures */
int retval; /* function return value */
sunindextype length; /* vector length */
N_Vector U, V, W, X, Y, Z; /* test vectors */
int print_timing; /* turn timing on/off */
/* check input and set vector length */
if (argc < 3){
printf("ERROR: TWO (2) Inputs required: vector length and print timing \n");
return(-1);
}
length = (sunindextype) atol(argv[1]);
if (length <= 0) {
printf("ERROR: length of vector must be a positive integer \n");
return(-1);
}
print_timing = atoi(argv[2]);
SetTiming(print_timing, 0);
printf("Testing the OpenMP DEV N_Vector \n");
printf("Vector length %ld \n", (long int) length);
printf("\n omp_get_default_device = %d \n", omp_get_default_device());
printf("\n omp_get_num_devices = %d \n", omp_get_num_devices());
printf("\n omp_get_initial_device = %d \n", omp_get_initial_device());
printf("\n omp_is_initial_device = %d \n", omp_is_initial_device());
/* Create new vectors */
W = N_VNewEmpty_OpenMPDEV(length);
if (W == NULL) {
printf("FAIL: Unable to create a new empty vector \n\n");
return(1);
}
X = N_VNew_OpenMPDEV(length);
if (X == NULL) {
N_VDestroy(W);
printf("FAIL: Unable to create a new vector \n\n");
return(1);
}
/* Check vector ID */
fails += Test_N_VGetVectorID(X, SUNDIALS_NVEC_OPENMPDEV, 0);
/* Check vector length */
fails += Test_N_VGetLength(X, 0);
/* Check vector communicator */
fails += Test_N_VGetCommunicator(X, NULL, 0);
/* Test clone functions */
fails += Test_N_VCloneEmpty(X, 0);
fails += Test_N_VClone(X, length, 0);
fails += Test_N_VCloneEmptyVectorArray(5, X, 0);
fails += Test_N_VCloneVectorArray(5, X, length, 0);
/* Clone additional vectors for testing */
Y = N_VClone(X);
if (Y == NULL) {
N_VDestroy(W);
N_VDestroy(X);
printf("FAIL: Unable to create a new vector \n\n");
return(1);
}
Z = N_VClone(X);
if (Z == NULL) {
N_VDestroy(W);
N_VDestroy(X);
N_VDestroy(Y);
printf("FAIL: Unable to create a new vector \n\n");
return(1);
}
/* Standard vector operation tests */
printf("\nTesting standard vector operations:\n\n");
fails += Test_N_VConst(X, length, 0);
fails += Test_N_VLinearSum(X, Y, Z, length, 0);
fails += Test_N_VProd(X, Y, Z, length, 0);
fails += Test_N_VDiv(X, Y, Z, length, 0);
fails += Test_N_VScale(X, Z, length, 0);
fails += Test_N_VAbs(X, Z, length, 0);
fails += Test_N_VInv(X, Z, length, 0);
fails += Test_N_VAddConst(X, Z, length, 0);
fails += Test_N_VDotProd(X, Y, length, 0);
fails += Test_N_VMaxNorm(X, length, 0);
fails += Test_N_VWrmsNorm(X, Y, length, 0);
fails += Test_N_VWrmsNormMask(X, Y, Z, length, 0);
fails += Test_N_VMin(X, length, 0);
fails += Test_N_VWL2Norm(X, Y, length, 0);
fails += Test_N_VL1Norm(X, length, 0);
fails += Test_N_VCompare(X, Z, length, 0);
fails += Test_N_VInvTest(X, Z, length, 0);
fails += Test_N_VConstrMask(X, Y, Z, length, 0);
fails += Test_N_VMinQuotient(X, Y, length, 0);
/* Fused and vector array operations tests (disabled) */
printf("\nTesting fused and vector array operations (disabled):\n\n");
/* create vector and disable all fused and vector array operations */
U = N_VNew_OpenMPDEV(length);
retval = N_VEnableFusedOps_OpenMPDEV(U, SUNFALSE);
if (U == NULL || retval != 0) {
N_VDestroy(W);
N_VDestroy(X);
N_VDestroy(Y);
N_VDestroy(Z);
printf("FAIL: Unable to create a new vector \n\n");
return(1);
}
/* fused operations */
fails += Test_N_VLinearCombination(U, length, 0);
fails += Test_N_VScaleAddMulti(U, length, 0);
fails += Test_N_VDotProdMulti(U, length, 0);
/* vector array operations */
fails += Test_N_VLinearSumVectorArray(U, length, 0);
fails += Test_N_VScaleVectorArray(U, length, 0);
fails += Test_N_VConstVectorArray(U, length, 0);
fails += Test_N_VWrmsNormVectorArray(U, length, 0);
fails += Test_N_VWrmsNormMaskVectorArray(U, length, 0);
fails += Test_N_VScaleAddMultiVectorArray(U, length, 0);
fails += Test_N_VLinearCombinationVectorArray(U, length, 0);
/* Fused and vector array operations tests (enabled) */
printf("\nTesting fused and vector array operations (enabled):\n\n");
/* create vector and enable all fused and vector array operations */
V = N_VNew_OpenMPDEV(length);
retval = N_VEnableFusedOps_OpenMPDEV(V, SUNTRUE);
if (V == NULL || retval != 0) {
N_VDestroy(W);
N_VDestroy(X);
N_VDestroy(Y);
N_VDestroy(Z);
N_VDestroy(U);
printf("FAIL: Unable to create a new vector \n\n");
return(1);
}
/* fused operations */
fails += Test_N_VLinearCombination(V, length, 0);
fails += Test_N_VScaleAddMulti(V, length, 0);
fails += Test_N_VDotProdMulti(V, length, 0);
/* vector array operations */
fails += Test_N_VLinearSumVectorArray(V, length, 0);
fails += Test_N_VScaleVectorArray(V, length, 0);
fails += Test_N_VConstVectorArray(V, length, 0);
fails += Test_N_VWrmsNormVectorArray(V, length, 0);
fails += Test_N_VWrmsNormMaskVectorArray(V, length, 0);
fails += Test_N_VScaleAddMultiVectorArray(V, length, 0);
fails += Test_N_VLinearCombinationVectorArray(V, length, 0);
/* local reduction operations */
printf("\nTesting local reduction operations:\n\n");
fails += Test_N_VDotProdLocal(X, Y, length, 0);
fails += Test_N_VMaxNormLocal(X, length, 0);
fails += Test_N_VMinLocal(X, length, 0);
fails += Test_N_VL1NormLocal(X, length, 0);
fails += Test_N_VWSqrSumLocal(X, Y, length, 0);
fails += Test_N_VWSqrSumMaskLocal(X, Y, Z, length, 0);
fails += Test_N_VInvTestLocal(X, Z, length, 0);
fails += Test_N_VConstrMaskLocal(X, Y, Z, length, 0);
fails += Test_N_VMinQuotientLocal(X, Y, length, 0);
/* Free vectors */
N_VDestroy(U);
N_VDestroy(V);
N_VDestroy(W);
N_VDestroy(X);
N_VDestroy(Y);
N_VDestroy(Z);
/* Print result */
if (fails) {
printf("FAIL: NVector module failed %i tests \n\n", fails);
} else {
printf("SUCCESS: NVector module passed all tests \n\n");
}
return(fails);
}
/* ----------------------------------------------------------------------
* OpenMPDEV specific tests
* --------------------------------------------------------------------*/
/* --------------------------------------------------------------------
* Test for the CUDA N_Vector N_VMake_OpenMPDEV function. Requires N_VConst
* to check data.
*/
int Test_N_VMake_OpenMPDEV(N_Vector X, sunindextype length, int myid)
{
int failure = 0;
realtype *h_data, *d_data;
N_Vector Y;
N_VConst(NEG_HALF, X);
N_VCopyFromDevice_OpenMPDEV(X);
h_data = N_VGetHostArrayPointer_OpenMPDEV(X);
d_data = N_VGetDeviceArrayPointer_OpenMPDEV(X);
/* Case 1: h_data and d_data are not null */
Y = N_VMake_OpenMPDEV(length, h_data, d_data);
if (Y == NULL) {
printf(">>> FAILED test -- N_VMake_OpenMPDEV, Proc %d \n", myid);
printf(" Vector is NULL \n \n");
return(1);
}
if (N_VGetHostArrayPointer_OpenMPDEV(Y) == NULL) {
printf(">>> FAILED test -- N_VMake_OpenMPDEV, Proc %d \n", myid);
printf(" Vector host data == NULL \n \n");
N_VDestroy(Y);
return(1);
}
if (N_VGetDeviceArrayPointer_OpenMPDEV(Y) == NULL) {
printf(">>> FAILED test -- N_VMake_OpenMPDEV, Proc %d \n", myid);
printf(" Vector device data -= NULL \n \n");
N_VDestroy(Y);
return(1);
}
failure += check_ans(NEG_HALF, Y, length);
if (failure) {
printf(">>> FAILED test -- N_VMake_OpenMPDEV Case 1, Proc %d \n", myid);
printf(" Failed N_VConst check \n \n");
N_VDestroy(Y);
return(1);
}
if (myid == 0) {
printf("PASSED test -- N_VMake_OpenMPDEV Case 1 \n");
}
N_VDestroy(Y);
/* Case 2: data is null */
Y = N_VMake_OpenMPDEV(length, NULL, NULL);
if (Y != NULL) {
printf(">>> FAILED test -- N_VMake_OpenMPDEV Case 2, Proc %d \n", myid);
printf(" Vector is not NULL \n \n");
return(1);
}
if (myid == 0) {
printf("PASSED test -- N_VMake_OpenMPDEV Case 2 \n");
}
N_VDestroy(Y);
return(failure);
}
/* ----------------------------------------------------------------------
* Implementation specific utility functions for vector tests
* --------------------------------------------------------------------*/
int check_ans(realtype ans, N_Vector X, sunindextype local_length)
{
int failure = 0;
sunindextype i;
realtype *Xdata;
N_VCopyFromDevice_OpenMPDEV(X);
Xdata = N_VGetHostArrayPointer_OpenMPDEV(X);
/* check vector data */
for (i = 0; i < local_length; i++) {
failure += FNEQ(Xdata[i], ans);
}
return (failure > ZERO) ? (1) : (0);
}
booleantype has_data(N_Vector X)
{
realtype *Xdata = N_VGetHostArrayPointer_OpenMPDEV(X);
if (Xdata == NULL)
return SUNFALSE;
else
return SUNTRUE;
}
void set_element(N_Vector X, sunindextype i, realtype val)
{
set_element_range(X, i, i, val);
}
void set_element_range(N_Vector X, sunindextype is, sunindextype ie,
realtype val)
{
realtype *xdev;
int dev;
sunindextype i;
xdev = N_VGetDeviceArrayPointer_OpenMPDEV(X);
dev = omp_get_default_device();
/* set elements [is,ie] of the data array */
#pragma omp target map(to:is,ie,val) is_device_ptr(xdev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
{
for(i = is; i <= ie; i++) xdev[i] = val;
}
}
realtype get_element(N_Vector X, sunindextype i)
{
realtype *data;
N_VCopyFromDevice_OpenMPDEV(X);
data = N_VGetHostArrayPointer_OpenMPDEV(X);
return data[i];
}
double max_time(N_Vector X, double time)
{
/* not running in parallel, just return input time */
return(time);
}
void sync_device()
{
/* not running on DEV, just return */
return;
}
|
GB_unop__identity_uint16_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint16_int16)
// op(A') function: GB (_unop_tran__identity_uint16_int16)
// C type: uint16_t
// A type: int16_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = (uint16_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint16_int16)
(
uint16_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint16_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__identity_int16_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int16_int8
// op(A') function: GB_tran__identity_int16_int8
// C type: int16_t
// A type: int8_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
int16_t z = (int16_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int16_int8
(
int16_t *Cx, // Cx and Ax may be aliased
int8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int16_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
return.c | // return.c
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <omp.h>
#include "fractal.h"
#include "init.h"
//#include "iteration.h"
#include "return.h"
#include "MT.h"
#define LOOP 50000000
#define LIMIT 10000000
#define STEPS 1000000
void FRACTAL_return (int FRACTAL_stage_num, int num, FRACTAL fractal) {
const int origin=(num/3)-1;
int m, n;
int diff_x=origin, diff_y=origin;
int return_times[STEPS];
int r, iteration, max=10;
FILE *time_log;
time_t time_s, time_g;
time(&time_s);
init_genrand((unsigned)time(NULL));
for (n=0; n<STEPS; n++) return_times[n] = 0;
//#pragma omp parallel for private(n)
for (m=0; m<LOOP; m++) {
diff_x = origin;
diff_y = origin;
iteration = 0;
// random walk
// #pragma vector always
for (n=0; n<LIMIT; n++) {
r = genrand_int31();
switch (r%4) {
case 0:
// right_0
if (diff_x!=(num-1) && fractal.init[diff_y][diff_x+1]!=0) {
diff_x += 1; iteration++;
if (diff_x==origin && diff_y==origin) return_times[iteration-1]++;
}
break;
case 1:
// up_1
if (diff_y!=0 && fractal.init[diff_y-1][diff_x]!=0) {
diff_y -= 1; iteration++;
if (diff_x==origin && diff_y==origin) return_times[iteration-1]++;
}
break;
case 2:
// left_2
if (diff_x!=0 && fractal.init[diff_y][diff_x-1]!=0) {
diff_x -= 1; iteration++;
if (diff_x==origin && diff_y==origin) return_times[iteration-1]++;
}
break;
case 3:
// down_3
if (diff_y!=(num-1) && fractal.init[diff_y+1][diff_x]!=0) {
diff_y += 1; iteration++;
if (diff_x==origin && diff_y==origin) return_times[iteration-1]++;
}
break;
}
if (iteration==STEPS) break;
// if (n==0) printf("int = %3d, x = %d, y = %d\n", r%4, diff_x, diff_y);
}
}
for (n=0; n<STEPS; n++) {
if ((n+1)%max == 0) {
printf("%10d %15.10f %% %10d\n", n+1, 100*(double)(return_times[n])/LOOP, return_times[n]);
max *= 5;
}
}
// time
time(&time_g);
time_log = fopen(".//bin//debug//time.log", "w");
fprintf(time_log, "start: %.24s\n", ctime(&time_s));
fprintf(time_log, "goal : %.24s\n", ctime(&time_g));
fprintf(time_log, "diff : %.6f\n" , difftime(time_g, time_s));
fclose(time_log);
return;
}
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
#include <assert.h>
#include "runner.h"
typedef struct scalar_ctx_t {
double *a;
double *b;
size_t len;
double res;
} scalar_ctx_t;
double random_double()
{
return (double)rand() / RAND_MAX * 2.0 - 1.0;
}
void ctor(void *context)
{
scalar_ctx_t *ctx = context;
ctx->a = calloc(ctx->len, sizeof(double));
assert(ctx->a);
ctx->b = calloc(ctx->len, sizeof(double));
assert(ctx->b);
srand(time(NULL));
for (uint i = 0; i < ctx->len; i++) {
ctx->a[i] = random_double();
ctx->b[i] = random_double();
}
}
void dtor(void *context)
{
scalar_ctx_t *ctx = context;
free(ctx->a);
free(ctx->b);
}
void scalar_serial(void *context)
{
scalar_ctx_t *ctx = context;
double sum = 0;
for (uint i = 0; i < ctx->len; i++) {
sum += ctx->a[i] * ctx->b[i];
}
printf("sum = %lf\n", sum);
ctx->res = sum;
}
void scalar_mutex(void *context)
{
scalar_ctx_t *ctx = context;
omp_lock_t lock;
omp_init_lock(&lock);
double sum = 0;
#pragma omp parallel for
for (uint i = 0; i < ctx->len; i++) {
double t = ctx->a[i] * ctx->b[i];
omp_set_lock(&lock);
sum += t;
omp_unset_lock(&lock);
}
omp_destroy_lock(&lock);
printf("sum = %lf\n", sum);
ctx->res = sum;
}
void scalar_critical(void *context)
{
scalar_ctx_t *ctx = context;
double sum = 0;
#pragma omp parallel for reduction(+ : sum)
for (uint i = 0; i < ctx->len; i++) {
double t = ctx->a[i] * ctx->b[i];
#pragma omp critical
{
sum += t;
}
}
printf("sum = %lf\n", sum);
ctx->res = sum;
}
void scalar_atomic(void *context)
{
scalar_ctx_t *ctx = context;
double sum = 0;
#pragma omp parallel for
for (uint i = 0; i < ctx->len; i++) {
#pragma omp atomic
sum += ctx->a[i] * ctx->b[i];
}
printf("sum = %lf\n", sum);
ctx->res = sum;
}
void scalar_reduction(void *context)
{
scalar_ctx_t *ctx = context;
double sum = 0;
#pragma omp parallel for reduction(+ : sum)
for (uint i = 0; i < ctx->len; i++) {
sum += ctx->a[i] * ctx->b[i];
}
printf("sum = %lf\n", sum);
ctx->res = sum;
}
double dot_product(double *a, double *b, int len)
{
if (len == 0) {
return 0;
}
if (len == 1) {
return *a * *b;
}
int half = len / 2;
double x, y;
x = dot_product(a, b, half);
y = dot_product(a + half, b + half, len - half);
return x + y;
}
void scalar_recursive(void *context)
{
scalar_ctx_t *ctx = context;
double sum = dot_product(ctx->a, ctx->b, ctx->len);
printf("sum = %lf\n", sum);
}
double dot_product_omp(double *a, double *b, int len)
{
if (len < 1000) {
double sum = 0;
for (int i = 0; i < len;i++) {
sum += a[i] * b[i];
}
return sum;
}
int half = len / 2;
double x, y;
#pragma omp parallel
{
#pragma omp single nowait
{
#pragma omp task
x = dot_product_omp(a, b, half);
#pragma omp task
y = dot_product_omp(a + half, b + half, len - half);
}
}
return x + y;
}
void scalar_recursive_omp(void *context)
{
scalar_ctx_t *ctx = context;
double sum = dot_product_omp(ctx->a, ctx->b, ctx->len);
printf("sum = %lf\n", sum);
}
int main(int argc, char **argv)
{
size_t n = atoi(argv[1]);
int nthreads = atoi(argv[2]);
omp_set_num_threads(nthreads);
scalar_ctx_t ctx = {
.len = n,
};
runner_pre(ctor, &ctx);
runner_run(scalar_serial, &ctx, "serial");
runner_run(scalar_mutex, &ctx, "mutex");
runner_run(scalar_critical, &ctx, "critical");
runner_run(scalar_atomic, &ctx, "atomic");
runner_run(scalar_reduction, &ctx, "reduction");
runner_run(scalar_recursive, &ctx, "recursive");
runner_run(scalar_recursive_omp, &ctx, "recursive_omp");
runner_post(dtor, &ctx);
return 0;
}
|
smacof_helper.c | /******************************************************************************
* Copyright (c) 2015 - 2016 Philipp Schubert. *
* All rights reserved. This program and the accompanying materials are made *
* available under the terms of LICENSE.txt. *
* *
* Contributors: *
* Philipp Schubert *
*****************************************************************************/
/**
* @file smacof_helper.c
* @brief Implementiert die Funktionsprototypen aus smacof_helper.h der für den
* SMACOF Algorithmus benötigten Funktionen.
*
* Diese Datei enthält die Implementationen der Funktionsprototypen der
* benötigten Funktionen, zur Formulierung des SMACOF Algorithmus.
*
* @author Philipp D. Schubert
* @bug Keine Bugs bekannt.
*/
#include "smacof_helper.h"
#include "cuda_disfuncs.cuh"
#include "cuda_linalg.cuh"
#include "cuda_smacof_helper.cuh"
#include "disfuncs.h"
#include "linalg.h"
#include "m.h"
#include "tm.h"
#include "utils.h"
#include <math.h>
#include <omp.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
m_t *guttmanTransformation(const tm_t *delta, const m_t *z, const tm_t *w,
const tm_t *pinv) {
m_t *update;
const real_t norm = 1.0 / delta->size;
// OPENMP C version of guttman-transformation
tm_t *dz = calcDeltaMatrix(z, EUCLIDEAN, 2);
tm_t *bz = computeBofZ(delta, dz, w);
m_t *bztimesz = TMmultM(bz, z);
if (w == NULL) {
// formula in 'linear' math: X_update = n^-1 * B(Z) * Z
update = Mmultscalar(bztimesz, norm);
} else {
update = TMmultM(pinv, bztimesz);
}
// cleanup temporary stuff
freeTM(dz);
dz = NULL;
freeTM(bz);
bz = NULL;
freeM(bztimesz);
bztimesz = NULL;
return update;
}
m_t *guttmanTransformation_nomem(const tm_t *delta, m_t *z, m_t *x,
const tm_t *w, const tm_t *pinv,
const tm_t *dz, tm_t *bz) {
const real_t norm = 1.0 / delta->size;
// OPENMP C version of guttman-transformation
computeBofZ_nomem(delta, dz, w, bz);
TMmultM_nomem(bz, z, x);
if (w == NULL) {
// formula in 'linear' math: X_update = n^-1 * B(Z) * Z
Mmultscalar_nomem(x, norm);
return x;
} else {
// formula in 'linear' math: X_update = V+ * B(Z) * Z
TMmultM_nomem(pinv, x, z);
memcpy(x->elems, z->elems, z->num_elems * sizeof(real_t));
return x;
}
}
tm_t *computeV(const tm_t *w) {
tm_t *v = initTM(w->size);
unsigned int i, j;
#pragma omp parallel for shared(w, v) private(i) schedule(static)
for (i = 0; i < w->num_elems; ++i) {
v->elems[i] = -w->elems[i];
}
real_t sum = 0;
#pragma omp parallel for shared(v) private(i, j, sum) schedule(dynamic)
for (i = 0; i < v->size; ++i) {
sum = 0;
for (j = 0; j < v->size; ++j) {
sum += (j <= i) ? v->elems[(i * (i + 1)) / 2 + j]
: v->elems[(j * (j + 1)) / 2 + i];
}
v->elems[(i * (i + 1)) / 2 + i] = -sum;
}
return v;
}
real_t computesigma(const m_t *x, const tm_t *delta, const tm_t *w) {
real_t sigma = 0;
unsigned int i;
tm_t *dx = calcDeltaMatrix(x, EUCLIDEAN, 2);
CERROR(dx->size != delta->size, "bad dimension to compute sigma");
if (w == NULL) {
#pragma omp parallel for shared(delta,dx) private(i) reduction(+:sigma) schedule(static)
for (i = 0; i < delta->num_elems; ++i) {
sigma += powf(delta->elems[i] - dx->elems[i], 2);
}
} // if we have weights, we have to use a more advanced formula
else {
#pragma omp parallel for shared(delta,dx,w) private(i) reduction(+:sigma) schedule(static)
for (i = 0; i < delta->num_elems; ++i) {
sigma += w->elems[i] * powf(delta->elems[i] - dx->elems[i], 2);
}
}
// cleanup temporary stuff
freeTM(dx);
dx = NULL;
return sigma;
}
real_t computesigma_nomem(const tm_t *dx, const tm_t *delta, const tm_t *w) {
real_t sigma = 0;
unsigned int i;
CERROR(dx->size != delta->size, "bad dimension to compute sigma");
if (w == NULL) {
#pragma omp parallel for shared(delta,dx) private(i) reduction(+:sigma) schedule(static)
for (i = 0; i < delta->num_elems; ++i) {
sigma += powf(delta->elems[i] - dx->elems[i], 2);
}
} // if we have weights, we have to use a more advanced formula
else {
#pragma omp parallel for shared(delta,dx,w) private(i) reduction(+:sigma) schedule(static)
for (i = 0; i < delta->num_elems; ++i) {
sigma += w->elems[i] * powf(delta->elems[i] - dx->elems[i], 2);
}
}
return sigma;
}
tm_t *computeBofZ(const tm_t *delta, const tm_t *dz, const tm_t *w) {
tm_t *b = initTM(delta->size);
unsigned int i, j;
if (w == NULL) {
#pragma omp parallel for shared(b, delta, dz, w) private(i, j) schedule(dynamic)
for (i = 0; i < b->size; ++i) {
for (j = 0; j < i; ++j) {
b->elems[(i * (i + 1) / 2) + j] =
(dz->elems[(i * (i + 1) / 2) + j] != 0)
? -(delta->elems[(i * (i + 1) / 2) + j] /
dz->elems[(i * (i + 1) / 2) + j])
: 0.0;
}
}
} else {
#pragma omp parallel for shared(b, delta, dz, w) private(i, j) schedule(dynamic)
for (i = 0; i < b->size; ++i) {
for (j = 0; j < i; ++j) {
b->elems[(i * (i + 1) / 2) + j] =
(dz->elems[(i * (i + 1) / 2) + j] != 0)
? -((w->elems[(i * (i + 1) / 2) + j] *
delta->elems[(i * (i + 1) / 2) + j]) /
dz->elems[(i * (i + 1) / 2) + j])
: 0.0;
}
}
}
// Caution: diagonal entries have to be calculated at last!
real_t sumcol;
#pragma omp parallel for shared(b) private(i, j, sumcol) schedule(dynamic)
for (i = 0; i < b->size; ++i) {
sumcol = 0;
for (j = 0; j < b->size; ++j) {
if (j < i)
sumcol += b->elems[(i * (i + 1) / 2) + j];
else
sumcol += b->elems[(j * (j + 1) / 2) + i];
}
b->elems[(i * (i + 1) / 2) + i] = -sumcol;
}
return b;
}
void computeBofZ_nomem(const tm_t *delta, const tm_t *dz, const tm_t *w,
tm_t *b) {
unsigned int i, j;
if (w == NULL) {
#pragma omp parallel for shared(b, delta, dz, w) private(i, j) schedule(dynamic)
for (i = 0; i < b->size; ++i) {
for (j = 0; j < i; ++j) {
b->elems[(i * (i + 1) / 2) + j] =
(dz->elems[(i * (i + 1) / 2) + j] != 0)
? -(delta->elems[(i * (i + 1) / 2) + j] /
dz->elems[(i * (i + 1) / 2) + j])
: 0.0;
}
}
} else {
#pragma omp parallel for shared(b, delta, dz, w) private(i, j) schedule(dynamic)
for (i = 0; i < b->size; ++i) {
for (j = 0; j < i; ++j) {
b->elems[(i * (i + 1) / 2) + j] =
(dz->elems[(i * (i + 1) / 2) + j] != 0)
? -((w->elems[(i * (i + 1) / 2) + j] *
delta->elems[(i * (i + 1) / 2) + j]) /
dz->elems[(i * (i + 1) / 2) + j])
: 0.0;
}
}
}
// Caution: diagonal entries have to be calculated at last!
real_t sumcol;
#pragma omp parallel for shared(b) private(i, j, sumcol) schedule(dynamic)
for (i = 0; i < b->size; ++i) {
sumcol = 0;
for (j = 0; j < b->size; ++j) {
if (i == j) {
} else if (j < i) {
sumcol += b->elems[(i * (i + 1) / 2) + j];
} else {
sumcol += b->elems[(j * (j + 1) / 2) + i];
}
}
b->elems[(i * (i + 1) / 2) + i] = -sumcol;
}
}
m_t *generateRandM(const unsigned int rows, const unsigned int cols,
const unsigned int min, const unsigned int max) {
m_t *mrand = initM(rows, cols);
// initialize the random generator (never try something parallel with c random
// functions!)
#if SEEDED == 1
srand(time(NULL));
#endif
for (unsigned int i = 0; i < mrand->num_elems; ++i) {
mrand->elems[i] = min + rand() / (RAND_MAX / (max - min + 1) + 1);
}
return mrand;
}
tm_t *convertMtoTM(const m_t *tmp_w) {
CERROR(tmp_w->rows != tmp_w->cols,
"weight matrix of type m has wrong dimensions");
tm_t *w = initTM(tmp_w->rows);
unsigned int i, j;
#pragma omp parallel for shared(w, tmp_w) private(i, j) schedule(dynamic)
for (i = 0; i < tmp_w->rows; ++i) {
for (j = 0; j < i; ++j) {
w->elems[(i * (i + 1)) / 2 + j] = tmp_w->elems[i * tmp_w->cols + j];
}
}
return w;
}
tm_t *get_test_delta() {
tm_t *test_delta = initTM(4);
test_delta->elems[0] = 0;
test_delta->elems[1] = 5;
test_delta->elems[2] = 0;
test_delta->elems[3] = 3;
test_delta->elems[4] = 2;
test_delta->elems[5] = 0;
test_delta->elems[6] = 4;
test_delta->elems[7] = 2;
test_delta->elems[8] = 1;
test_delta->elems[9] = 0;
return test_delta;
}
m_t *get_test_x() {
m_t *test_x = initM(4, 2);
test_x->elems[0] = -0.266;
test_x->elems[1] = -0.539;
test_x->elems[2] = 0.451;
test_x->elems[3] = 0.252;
test_x->elems[4] = 0.016;
test_x->elems[5] = -0.238;
test_x->elems[6] = -0.200;
test_x->elems[7] = 0.524;
return test_x;
}
|
colormap.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO L OOO RRRR M M AAA PPPP %
% C O O L O O R R MM MM A A P P %
% C O O L O O RRRR M M M AAAAA PPPP %
% C O O L O O R R M M A A P %
% CCCC OOO LLLLL OOO R R M M A A P %
% %
% %
% MagickCore Colormap Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% We use linked-lists because splay-trees do not currently support duplicate
% key / value pairs (.e.g X11 green compliance and SVG green compliance).
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/client.h"
#include "MagickCore/configure.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/xml-tree.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageColormap() allocates an image colormap and initializes
% it to a linear gray colorspace. If the image already has a colormap,
% it is replaced. AcquireImageColormap() returns MagickTrue if successful,
% otherwise MagickFalse if there is not enough memory.
%
% The format of the AcquireImageColormap method is:
%
% MagickBooleanType AcquireImageColormap(Image *image,const size_t colors,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colors: the number of colors in the image colormap.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AcquireImageColormap(Image *image,
const size_t colors,ExceptionInfo *exception)
{
register ssize_t
i;
/*
Allocate image colormap.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->colors=MagickMax(colors,1);
if (image->colormap == (PixelInfo *) NULL)
image->colormap=(PixelInfo *) AcquireQuantumMemory(image->colors+1,
sizeof(*image->colormap));
else
image->colormap=(PixelInfo *) ResizeQuantumMemory(image->colormap,
image->colors+1,sizeof(*image->colormap));
if (image->colormap == (PixelInfo *) NULL)
{
image->colors=0;
image->storage_class=DirectClass;
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
for (i=0; i < (ssize_t) image->colors; i++)
{
double
pixel;
pixel=(double) (i*(QuantumRange/MagickMax(colors-1,1)));
GetPixelInfo(image,image->colormap+i);
image->colormap[i].alpha_trait=BlendPixelTrait;
image->colormap[i].red=pixel;
image->colormap[i].green=pixel;
image->colormap[i].blue=pixel;
image->colormap[i].alpha=OpaqueAlpha;
}
return(SetImageStorageClass(image,PseudoClass,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C y c l e C o l o r m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CycleColormap() displaces an image's colormap by a given number of
% positions. If you cycle the colormap a number of times you can produce
% a psychodelic effect.
%
% WARNING: this assumes an images colormap is in a well know and defined
% order. Currently Imagemagick has no way of setting that order.
%
% The format of the CycleColormapImage method is:
%
% MagickBooleanType CycleColormapImage(Image *image,const ssize_t displace,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o displace: displace the colormap this amount.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CycleColormapImage(Image *image,
const ssize_t displace,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == DirectClass)
(void) SetImageType(image,PaletteType,exception);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
ssize_t
index;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) (GetPixelIndex(image,q)+displace) % image->colors;
if (index < 0)
index+=(ssize_t) image->colors;
SetPixelIndex(image,(Quantum) index,q);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S o r t C o l o r m a p B y I n t e n s i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SortColormapByIntensity() sorts the colormap of a PseudoClass image by
% decreasing color intensity.
%
% The format of the SortColormapByIntensity method is:
%
% MagickBooleanType SortColormapByIntensity(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: A pointer to an Image structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const PixelInfo
*color_1,
*color_2;
int
intensity;
color_1=(const PixelInfo *) x;
color_2=(const PixelInfo *) y;
intensity=(int) GetPixelInfoIntensity((const Image *) NULL,color_2)-(int)
GetPixelInfoIntensity((const Image *) NULL,color_1);
return(intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
MagickExport MagickBooleanType SortColormapByIntensity(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
y;
unsigned short
*pixels;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->storage_class != PseudoClass)
return(MagickTrue);
/*
Allocate memory for pixel indexes.
*/
pixels=(unsigned short *) AcquireQuantumMemory((size_t) image->colors,
sizeof(*pixels));
if (pixels == (unsigned short *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Assign index values to colormap entries.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].alpha=(double) i;
/*
Sort image colormap by decreasing color popularity.
*/
qsort((void *) image->colormap,(size_t) image->colors,
sizeof(*image->colormap),IntensityCompare);
/*
Update image colormap indexes to sorted colormap order.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
pixels[(ssize_t) image->colormap[i].alpha]=(unsigned short) i;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
index;
register ssize_t
x;
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(Quantum) pixels[(ssize_t) GetPixelIndex(image,q)];
SetPixelIndex(image,index,q);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (status == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
pixels=(unsigned short *) RelinquishMagickMemory(pixels);
return(status);
}
|
app_main.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "bmp_interface.h"
#include <omp.h>
extern int __htc_get_unit_count();
extern int global_radius;
void foo(uint8_t *a) {
#pragma omp target teams num_teams(8)
{
a[omp_get_team_num()] = omp_get_team_num();
}
printf("end of function foo\n");
}
int app_main(int argc, char **argv) {
uint32_t bufsize = 1000;
uint32_t bufsize2 = 1000;
// Allocate target temp buffer.
extern void *stencil_cp_alloc(size_t);
uint8_t *unew = (uint8_t *)stencil_cp_alloc(bufsize * sizeof(uint8_t));
printf("unit count is %d\n", __htc_get_unit_count());
int i;
int k;
#pragma omp target
#pragma omp teams distribute parallel for num_threads(17) firstprivate(k) schedule(static,33) num_teams(5)
for (i = 0; i < bufsize; i++) {
k = (int)omp_get_team_num() + bufsize - bufsize2;
// printf("first target team %d thread %d i is %d\n", k,
// (int)omp_get_thread_num(), i);
unew[i] = (omp_get_team_num()+1) * omp_get_thread_num() + k - k;
}
#pragma omp target
#pragma omp teams distribute parallel for num_threads(7) firstprivate(k) schedule(static,33) num_teams(8)
for (i = 0; i < bufsize; i++) {
k = (int)omp_get_team_num();
// printf("second target team %d thread %d i is %d\n", k,
// (int)omp_get_thread_num(), i);
unew[i] += (omp_get_team_num()+1) * omp_get_thread_num();
}
int sum = 0;
for (i = 0; i < bufsize; i++) {
sum += unew[i];
}
printf("sum is %d %s\n", sum, (sum == 13977) ? "PASSED" : "FAILED");
foo(unew);
for (i = 0; i < 8; i++) {
sum += unew[i];
}
printf("sum is %d %s\n", sum, (sum == (13977+28)) ? "PASSED" : "FAILED");
return 0;
}
|
GB_sort.c | //------------------------------------------------------------------------------
// GB_sort: sort all vectors in a matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB_sort.h"
#include "GB_werk.h"
#include "GB_transpose.h"
#include "GB_ek_slice.h"
// macros:
// GB_SORT (func) defined as GB_sort_func_TYPE_ascend or _descend,
// GB_msort_ISO_ascend or _descend,
// or GB_msort_func_UDT
// GB_TYPE bool, int8_, ... or GB_void for UDT
// GB_ADDR(A,p) A+p for builtin, A + p * GB_SIZE otherwise
// GB_SIZE size of each entry: sizeof (GB_TYPE) for built-in
// GB_GET(x,X,i) x = (op->xtype) X [i]
// GB_COPY(A,i,C,k) A [i] = C [k]
// GB_SWAP(A,i,k) swap A [i] and A [k]
// GB_LT compare two entries, x < y
//------------------------------------------------------------------------------
// macros for all built-in types
//------------------------------------------------------------------------------
#define GB_SORT_UDT 0
#define GB_ADDR(A,i) ((A) + (i))
#define GB_GET(x,A,i) GB_TYPE x = A [i]
#define GB_COPY(A,i,B,j) A [i] = B [j]
#define GB_SIZE sizeof (GB_TYPE)
#define GB_SWAP(A,i,j) { GB_TYPE t = A [i] ; A [i] = A [j] ; A [j] = t ; }
//------------------------------------------------------------------------------
// ascending sort for built-in types
//------------------------------------------------------------------------------
#define GB_LT(less,a,i,b,j) \
less = (((a) < (b)) ? true : (((a) == (b)) ? ((i) < (j)) : false))
#define GB_TYPE bool
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_BOOL)
#include "GB_sort_template.c"
#define GB_TYPE int8_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_INT8)
#include "GB_sort_template.c"
#define GB_TYPE int16_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_INT16)
#include "GB_sort_template.c"
#define GB_TYPE int32_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_INT32)
#include "GB_sort_template.c"
#define GB_TYPE int64_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_INT64)
#include "GB_sort_template.c"
#define GB_TYPE uint8_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_UINT8)
#include "GB_sort_template.c"
#define GB_TYPE uint16_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_UINT16)
#include "GB_sort_template.c"
#define GB_TYPE uint32_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_UINT32)
#include "GB_sort_template.c"
#define GB_TYPE uint64_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_UINT64)
#include "GB_sort_template.c"
#define GB_TYPE float
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_FP32)
#include "GB_sort_template.c"
#define GB_TYPE double
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _ascend_FP64)
#include "GB_sort_template.c"
//------------------------------------------------------------------------------
// descending sort for built-in types
//------------------------------------------------------------------------------
#undef GB_LT
#define GB_LT(less,a,i,b,j) \
less = (((a) > (b)) ? true : (((a) == (b)) ? ((i) < (j)) : false))
#define GB_TYPE bool
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_BOOL)
#include "GB_sort_template.c"
#define GB_TYPE int8_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_INT8)
#include "GB_sort_template.c"
#define GB_TYPE int16_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_INT16)
#include "GB_sort_template.c"
#define GB_TYPE int32_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_INT32)
#include "GB_sort_template.c"
#define GB_TYPE int64_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_INT64)
#include "GB_sort_template.c"
#define GB_TYPE uint8_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_UINT8)
#include "GB_sort_template.c"
#define GB_TYPE uint16_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_UINT16)
#include "GB_sort_template.c"
#define GB_TYPE uint32_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_UINT32)
#include "GB_sort_template.c"
#define GB_TYPE uint64_t
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_UINT64)
#include "GB_sort_template.c"
#define GB_TYPE float
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_FP32)
#include "GB_sort_template.c"
#define GB_TYPE double
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _descend_FP64)
#include "GB_sort_template.c"
//------------------------------------------------------------------------------
// macros for user-defined types and when typecasting is performed
//------------------------------------------------------------------------------
#undef GB_ADDR
#undef GB_GET
#undef GB_COPY
#undef GB_SIZE
#undef GB_SWAP
#undef GB_LT
#define GB_ADDR(A,i) ((A) + (i) * csize)
#define GB_GET(x,A,i) GB_void x [GB_VLA(xsize)] ; \
fcast (x, GB_ADDR (A, i), csize)
#define GB_COPY(A,i,B,j) memcpy (GB_ADDR (A, i), GB_ADDR (B, j), csize)
#define GB_SIZE csize
#define GB_TYPE GB_void
#define GB_SWAP(A,i,j) \
{ \
GB_void t [GB_VLA(csize)] ; /* declare the scalar t */ \
memcpy (t, GB_ADDR (A, i), csize) ; /* t = A [i] */ \
GB_COPY (A, i, A, j) ; /* A [i] = A [j] */ \
memcpy (GB_ADDR (A, j), t, csize) ; /* A [j] = t */ \
}
#define GB_LT(less,a,i,b,j) \
{ \
flt (&less, a, b) ; /* less = (a < b) */ \
if (!less) \
{ \
/* check for equality and tie-break on index */ \
bool more ; \
flt (&more, b, a) ; /* more = (b < a) */ \
less = (more) ? false : ((i) < (j)) ; \
} \
}
#undef GB_SORT_UDT
#define GB_SORT_UDT 1
#define GB_SORT(func) GB_EVAL3 (GB(sort_), func, _UDT)
#include "GB_sort_template.c"
//------------------------------------------------------------------------------
// GB_sort
//------------------------------------------------------------------------------
#undef GB_FREE_WORKSPACE
#define GB_FREE_WORKSPACE \
{ \
GB_WERK_POP (C_ek_slicing, int64_t) ; \
GB_phbix_free (T) ; \
}
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_FREE_WORKSPACE ; \
GB_phbix_free (C) ; \
GB_phbix_free (P) ; \
}
GrB_Info GB_sort
(
// output:
GrB_Matrix C, // matrix with sorted vectors on output
GrB_Matrix P, // matrix with permutations on output
// input:
GrB_BinaryOp op, // comparator for the sort
GrB_Matrix A, // matrix to sort
const bool A_transpose, // false: sort each row, true: sort each column
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
ASSERT_MATRIX_OK (A, "A for GB_sort", GB0) ;
ASSERT_BINARYOP_OK (op, "op for GB_sort", GB0) ;
GrB_Matrix T = NULL ;
struct GB_Matrix_opaque T_header ;
GB_WERK_DECLARE (C_ek_slicing, int64_t) ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
bool C_is_NULL = (C == NULL) ;
if (C_is_NULL && P == NULL)
{
// either C, or P, or both must be present
return (GrB_NULL_POINTER) ;
}
GrB_Type atype = A->type ;
GrB_Type ctype = (C_is_NULL) ? atype : C->type ;
GrB_Type ptype = (P == NULL) ? GrB_INT64 : P->type ;
if (op->ztype != GrB_BOOL || op->xtype != op->ytype || atype != ctype
|| !(ptype == GrB_INT64 || ptype == GrB_UINT64)
|| !GB_Type_compatible (atype, op->xtype))
{
// op must return bool, and its inputs x and y must have the same type;
// the types of A and C must match exactly; P must be INT64 or UINT64;
// A and C must be typecasted to the input type of the op.
return (GrB_DOMAIN_MISMATCH) ;
}
int64_t anrows = GB_NROWS (A) ;
int64_t ancols = GB_NCOLS (A) ;
if ((C != NULL && (GB_NROWS (C) != anrows || GB_NCOLS (C) != ancols)) ||
(P != NULL && (GB_NROWS (P) != anrows || GB_NCOLS (P) != ancols)))
{
// C and P must have the same dimensions as A
return (GrB_DIMENSION_MISMATCH) ;
}
bool A_iso = A->iso ;
bool sort_in_place = (A == C) ;
// free any prior content of C and P
GB_phbix_free (P) ;
if (!sort_in_place)
{
GB_phbix_free (C) ;
}
//--------------------------------------------------------------------------
// make a copy of A, unless it is aliased with C
//--------------------------------------------------------------------------
if (C_is_NULL)
{
// C is a temporary matrix, which is freed when done
T = GB_clear_static_header (&T_header) ;
C = T ;
}
if (A_transpose)
{
// ensure C is in sparse or hypersparse CSC format
if (A->is_csc)
{
// A is already CSC
if (!sort_in_place)
{
// A = C
GB_OK (GB_dup_worker (&C, A_iso, A, true, atype, Context)) ;
}
}
else
{
// A is CSR but C must be CSC
if (sort_in_place)
{
// A = A'
GB_OK (GB_transpose_in_place (A, true, Context)) ;
}
else
{
// C = A'
GB_OK (GB_transpose_cast (C, atype, true, A, false, Context)) ;
}
}
}
else
{
// ensure C is in sparse or hypersparse CSR format
if (!A->is_csc)
{
// A is already CSR
if (!sort_in_place)
{
// A = C
GB_OK (GB_dup_worker (&C, A_iso, A, true, atype, Context)) ;
}
}
else
{
// A is CSC but C must be CSR
if (sort_in_place)
{
// A = A'
GB_OK (GB_transpose_in_place (A, false, Context)) ;
}
else
{
// C = A'
GB_OK (GB_transpose_cast (C, atype, false, A, false, Context)) ;
}
}
}
// ensure C is sparse or hypersparse CSC
if (GB_IS_BITMAP (C) || GB_IS_FULL (C))
{
GB_OK (GB_convert_any_to_sparse (C, Context)) ;
}
//--------------------------------------------------------------------------
// sort C in place
//--------------------------------------------------------------------------
GB_Opcode opcode = op->opcode ;
GB_Type_code acode = atype->code ;
if ((op->xtype == atype) && (op->ytype == atype) &&
(opcode == GB_LT_binop_code || opcode == GB_GT_binop_code) &&
(acode < GB_UDT_code))
{
//----------------------------------------------------------------------
// no typecasting, using built-in < or > operators, builtin types
//----------------------------------------------------------------------
if (opcode == GB_LT_binop_code)
{
// ascending sort
switch (acode)
{
case GB_BOOL_code :
GB_OK (GB(sort_matrix_ascend_BOOL )(C, Context)) ; break ;
case GB_INT8_code :
GB_OK (GB(sort_matrix_ascend_INT8 )(C, Context)) ; break ;
case GB_INT16_code :
GB_OK (GB(sort_matrix_ascend_INT16 )(C, Context)) ; break ;
case GB_INT32_code :
GB_OK (GB(sort_matrix_ascend_INT32 )(C, Context)) ; break ;
case GB_INT64_code :
GB_OK (GB(sort_matrix_ascend_INT64 )(C, Context)) ; break ;
case GB_UINT8_code :
GB_OK (GB(sort_matrix_ascend_UINT8 )(C, Context)) ; break ;
case GB_UINT16_code :
GB_OK (GB(sort_matrix_ascend_UINT16 )(C, Context)) ; break ;
case GB_UINT32_code :
GB_OK (GB(sort_matrix_ascend_UINT32 )(C, Context)) ; break ;
case GB_UINT64_code :
GB_OK (GB(sort_matrix_ascend_UINT64 )(C, Context)) ; break ;
case GB_FP32_code :
GB_OK (GB(sort_matrix_ascend_FP32 )(C, Context)) ; break ;
case GB_FP64_code :
GB_OK (GB(sort_matrix_ascend_FP64 )(C, Context)) ; break ;
default:;
}
}
else // opcode == GB_GT_binop_code
{
// descending sort
switch (acode)
{
case GB_BOOL_code :
GB_OK (GB(sort_matrix_descend_BOOL )(C, Context)) ; break ;
case GB_INT8_code :
GB_OK (GB(sort_matrix_descend_INT8 )(C, Context)) ; break ;
case GB_INT16_code :
GB_OK (GB(sort_matrix_descend_INT16 )(C, Context)) ; break ;
case GB_INT32_code :
GB_OK (GB(sort_matrix_descend_INT32 )(C, Context)) ; break ;
case GB_INT64_code :
GB_OK (GB(sort_matrix_descend_INT64 )(C, Context)) ; break ;
case GB_UINT8_code :
GB_OK (GB(sort_matrix_descend_UINT8 )(C, Context)) ; break ;
case GB_UINT16_code :
GB_OK (GB(sort_matrix_descend_UINT16)(C, Context)) ; break ;
case GB_UINT32_code :
GB_OK (GB(sort_matrix_descend_UINT32)(C, Context)) ; break ;
case GB_UINT64_code :
GB_OK (GB(sort_matrix_descend_UINT64)(C, Context)) ; break ;
case GB_FP32_code :
GB_OK (GB(sort_matrix_descend_FP32 )(C, Context)) ; break ;
case GB_FP64_code :
GB_OK (GB(sort_matrix_descend_FP64 )(C, Context)) ; break ;
default:;
}
}
}
else
{
//----------------------------------------------------------------------
// typecasting, user-defined types, or unconventional operators
//----------------------------------------------------------------------
GB_OK (GB (sort_matrix_UDT) (C, op, Context)) ;
}
//--------------------------------------------------------------------------
// constuct the final indices
//--------------------------------------------------------------------------
int64_t cnz = GB_nnz (C) ;
int64_t cnvec = C->nvec ;
int64_t *restrict Ti = NULL ;
if (P == NULL)
{
// P is not constructed; use C->i to construct the new indices
Ti = C->i ;
}
else
{
// allocate P->i and use it to construct the new indices
P->i = GB_MALLOC (cnz, int64_t, &(P->i_size)) ;
if (P->i == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
Ti = P->i ;
}
int C_nthreads, C_ntasks ;
GB_SLICE_MATRIX (C, 1, chunk) ;
int64_t *restrict Cp = C->p ;
const int64_t cvlen = C->vlen ;
int tid ;
#pragma omp parallel for num_threads(C_nthreads) schedule(static,1)
for (tid = 0 ; tid < C_ntasks ; tid++)
{
int64_t kfirst = kfirst_Cslice [tid] ;
int64_t klast = klast_Cslice [tid] ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
const int64_t pC0 = Cp [k] ;
int64_t pC_start, pC_end ;
GB_get_pA (&pC_start, &pC_end, tid, k,
kfirst, klast, pstart_Cslice, Cp, cvlen) ;
for (int64_t pC = pC_start ; pC < pC_end ; pC++)
{
Ti [pC] = pC - pC0 ;
}
}
}
//--------------------------------------------------------------------------
// construct P
//--------------------------------------------------------------------------
bool C_is_hyper = GB_IS_HYPERSPARSE (C) ;
if (P != NULL)
{
P->is_csc = C->is_csc ;
P->nvec = C->nvec ;
P->nvec_nonempty = C->nvec_nonempty ;
P->iso = false ;
P->vlen = C->vlen ;
P->vdim = C->vdim ;
if (C_is_NULL)
{
// the values of C are not needed. The indices of C become the
// values of P, Cp becomes Pp, and Ch (if present) becomes Ph.
P->x = C->i ; C->i = NULL ; P->x_size = C->i_size ;
P->p = C->p ; C->p = NULL ; P->p_size = C->p_size ;
P->h = C->h ; C->h = NULL ; P->h_size = C->h_size ;
P->plen = C->plen ;
}
else
{
// C is required on output. The indices of C are copied and
// become the values of P. Cp is copied to Pp, and Ch (if present)
// is copied to Ph.
P->plen = cnvec ;
P->x = GB_MALLOC (cnz, int64_t, &(P->x_size)) ;
P->p = GB_MALLOC (cnvec+1, int64_t, &(P->p_size)) ;
P->h = NULL ;
if (C_is_hyper)
{
P->h = GB_MALLOC (cnvec, int64_t, &(P->h_size)) ;
}
if (P->x == NULL || P->p == NULL || (C_is_hyper && P->h == NULL))
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
// copy from C to P
GB_memcpy (P->x, C->i, cnz * sizeof (int64_t), nthreads_max) ;
GB_memcpy (P->p, C->p, (cnvec+1) * sizeof (int64_t), nthreads_max) ;
if (C_is_hyper)
{
GB_memcpy (P->h, C->h, cnvec * sizeof (int64_t), nthreads_max) ;
}
}
P->magic = GB_MAGIC ;
}
//--------------------------------------------------------------------------
// finalize the pattern of C
//--------------------------------------------------------------------------
if (!C_is_NULL && P != NULL)
{
// copy P->i into C->i
GB_memcpy (C->i, P->i, cnz * sizeof (int64_t), nthreads_max) ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORKSPACE ;
if (!C_is_NULL) { ASSERT_MATRIX_OK (C, "C output of GB_sort", GB0) ; }
if (P != NULL) { ASSERT_MATRIX_OK (P, "P output of GB_sort", GB0) ; }
return (GrB_SUCCESS) ;
}
|
concurrent_unordered_map.cuh.h | /*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CONCURRENT_UNORDERED_MAP_CUH
#define CONCURRENT_UNORDERED_MAP_CUH
#include <thrust/pair.h>
#include <cassert>
#include <iostream>
#include <iterator>
#include <type_traits>
#include "hash_functions.cuh"
#include "managed.cuh"
#include "managed_allocator.cuh"
// TODO: replace this with CUDA_TRY and propagate the error
#ifndef CUDA_RT_CALL
#define CUDA_RT_CALL(call) \
{ \
cudaError_t cudaStatus = call; \
if (cudaSuccess != cudaStatus) { \
fprintf(stderr, \
"ERROR: CUDA RT call \"%s\" in line %d of file %s failed with " \
"%s (%d).\n", \
#call, __LINE__, __FILE__, cudaGetErrorString(cudaStatus), \
cudaStatus); \
exit(1); \
} \
}
#endif
// TODO: can we do this more efficiently?
__inline__ __device__ int8_t atomicCAS(int8_t* address, int8_t compare,
int8_t val) {
int32_t* base_address = (int32_t*)((char*)address - ((size_t)address & 3));
int32_t int_val = (int32_t)val << (((size_t)address & 3) * 8);
int32_t int_comp = (int32_t)compare << (((size_t)address & 3) * 8);
return (int8_t)atomicCAS(base_address, int_comp, int_val);
}
// TODO: can we do this more efficiently?
__inline__ __device__ int16_t atomicCAS(int16_t* address, int16_t compare,
int16_t val) {
int32_t* base_address = (int32_t*)((char*)address - ((size_t)address & 2));
int32_t int_val = (int32_t)val << (((size_t)address & 2) * 8);
int32_t int_comp = (int32_t)compare << (((size_t)address & 2) * 8);
return (int16_t)atomicCAS(base_address, int_comp, int_val);
}
__inline__ __device__ int64_t atomicCAS(int64_t* address, int64_t compare,
int64_t val) {
return (int64_t)atomicCAS((unsigned long long*)address,
(unsigned long long)compare,
(unsigned long long)val);
}
__inline__ __device__ uint64_t atomicCAS(uint64_t* address, uint64_t compare,
uint64_t val) {
return (uint64_t)atomicCAS((unsigned long long*)address,
(unsigned long long)compare,
(unsigned long long)val);
}
__inline__ __device__ long long int atomicCAS(long long int* address,
long long int compare,
long long int val) {
return (long long int)atomicCAS((unsigned long long*)address,
(unsigned long long)compare,
(unsigned long long)val);
}
__inline__ __device__ double atomicCAS(double* address, double compare,
double val) {
return __longlong_as_double(atomicCAS((unsigned long long int*)address,
__double_as_longlong(compare),
__double_as_longlong(val)));
}
__inline__ __device__ float atomicCAS(float* address, float compare,
float val) {
return __int_as_float(
atomicCAS((int*)address, __float_as_int(compare), __float_as_int(val)));
}
__inline__ __device__ int64_t atomicAdd(int64_t* address, int64_t val) {
return (int64_t)atomicAdd((unsigned long long*)address,
(unsigned long long)val);
}
__inline__ __device__ uint64_t atomicAdd(uint64_t* address, uint64_t val) {
return (uint64_t)atomicAdd((unsigned long long*)address,
(unsigned long long)val);
}
template <typename pair_type>
__forceinline__ __device__ pair_type
load_pair_vectorized(const pair_type* __restrict__ const ptr) {
if (sizeof(uint4) == sizeof(pair_type)) {
union pair_type2vec_type {
uint4 vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0, 0, 0, 0};
converter.vec_val = *reinterpret_cast<const uint4*>(ptr);
return converter.pair_val;
} else if (sizeof(uint2) == sizeof(pair_type)) {
union pair_type2vec_type {
uint2 vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0, 0};
converter.vec_val = *reinterpret_cast<const uint2*>(ptr);
return converter.pair_val;
} else if (sizeof(int) == sizeof(pair_type)) {
union pair_type2vec_type {
int vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0};
converter.vec_val = *reinterpret_cast<const int*>(ptr);
return converter.pair_val;
} else if (sizeof(short) == sizeof(pair_type)) {
union pair_type2vec_type {
short vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0};
converter.vec_val = *reinterpret_cast<const short*>(ptr);
return converter.pair_val;
} else {
return *ptr;
}
}
template <typename pair_type>
__forceinline__ __device__ void store_pair_vectorized(
pair_type* __restrict__ const ptr, const pair_type val) {
if (sizeof(uint4) == sizeof(pair_type)) {
union pair_type2vec_type {
uint4 vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0, 0, 0, 0};
converter.pair_val = val;
*reinterpret_cast<uint4*>(ptr) = converter.vec_val;
} else if (sizeof(uint2) == sizeof(pair_type)) {
union pair_type2vec_type {
uint2 vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0, 0};
converter.pair_val = val;
*reinterpret_cast<uint2*>(ptr) = converter.vec_val;
} else if (sizeof(int) == sizeof(pair_type)) {
union pair_type2vec_type {
int vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0};
converter.pair_val = val;
*reinterpret_cast<int*>(ptr) = converter.vec_val;
} else if (sizeof(short) == sizeof(pair_type)) {
union pair_type2vec_type {
short vec_val;
pair_type pair_val;
};
pair_type2vec_type converter = {0};
converter.pair_val = val;
*reinterpret_cast<short*>(ptr) = converter.vec_val;
} else {
*ptr = val;
}
}
template <typename value_type, typename size_type, typename key_type,
typename elem_type>
__global__ void init_hashtbl( // Init every entry of the table with
// <unused_key, unused_value> pair
value_type* __restrict__ const hashtbl_values, const size_type n,
const key_type key_val, const elem_type elem_val) {
const size_type idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
store_pair_vectorized(
hashtbl_values + idx,
thrust::make_pair(
key_val, elem_val)); // Simply store every element a <K, V> pair
}
}
template <typename T>
struct equal_to {
using result_type = bool;
using first_argument_type = T;
using second_argument_type = T;
__forceinline__ __host__ __device__ constexpr bool operator()(
const first_argument_type& lhs, const second_argument_type& rhs) const {
return lhs == rhs;
}
};
template <typename Iterator>
class cycle_iterator_adapter {
public:
using value_type = typename std::iterator_traits<Iterator>::value_type;
using difference_type =
typename std::iterator_traits<Iterator>::difference_type;
using pointer = typename std::iterator_traits<Iterator>::pointer;
using reference = typename std::iterator_traits<Iterator>::reference;
using iterator_type = Iterator;
cycle_iterator_adapter() = delete;
__host__ __device__ explicit cycle_iterator_adapter(
const iterator_type& begin, const iterator_type& end,
const iterator_type& current)
: m_begin(begin), m_end(end), m_current(current) {}
__host__ __device__ cycle_iterator_adapter& operator++() {
if (m_end == (m_current + 1))
m_current = m_begin;
else
++m_current;
return *this;
}
__host__ __device__ const cycle_iterator_adapter& operator++() const {
if (m_end == (m_current + 1))
m_current = m_begin;
else
++m_current;
return *this;
}
__host__ __device__ cycle_iterator_adapter& operator++(int) {
cycle_iterator_adapter<iterator_type> old(m_begin, m_end, m_current);
if (m_end == (m_current + 1))
m_current = m_begin;
else
++m_current;
return old;
}
__host__ __device__ const cycle_iterator_adapter& operator++(int)const {
cycle_iterator_adapter<iterator_type> old(m_begin, m_end, m_current);
if (m_end == (m_current + 1))
m_current = m_begin;
else
++m_current;
return old;
}
__host__ __device__ bool equal(
const cycle_iterator_adapter<iterator_type>& other) const {
return m_current == other.m_current && m_begin == other.m_begin &&
m_end == other.m_end;
}
__host__ __device__ reference& operator*() { return *m_current; }
__host__ __device__ const reference& operator*() const { return *m_current; }
__host__ __device__ const pointer operator->() const {
return m_current.operator->();
}
__host__ __device__ pointer operator->() { return m_current; }
__host__ __device__ iterator_type getter() const { return m_current; }
private:
iterator_type m_current;
iterator_type m_begin;
iterator_type m_end;
};
template <class T>
__host__ __device__ bool operator==(const cycle_iterator_adapter<T>& lhs,
const cycle_iterator_adapter<T>& rhs) {
return lhs.equal(rhs);
}
template <class T>
__host__ __device__ bool operator!=(const cycle_iterator_adapter<T>& lhs,
const cycle_iterator_adapter<T>& rhs) {
return !lhs.equal(rhs);
}
/**
* Does support concurrent insert, but not concurrent insert and probping.
*
* TODO:
* - add constructor that takes pointer to hash_table to avoid allocations
* - extend interface to accept streams
*/
template <typename Key, typename Element, Key unused_key,
typename Hasher = default_hash<Key>,
typename Equality = equal_to<Key>,
typename Allocator = managed_allocator<thrust::pair<Key, Element>>,
bool count_collisions = false>
class concurrent_unordered_map : public managed {
public:
using size_type = size_t;
using hasher = Hasher;
using key_equal = Equality;
using allocator_type = Allocator;
using key_type = Key;
using value_type = thrust::pair<Key, Element>;
using mapped_type = Element;
using iterator = cycle_iterator_adapter<value_type*>;
using const_iterator = const cycle_iterator_adapter<value_type*>;
private:
union pair2longlong {
unsigned long long int longlong;
value_type pair;
};
public:
concurrent_unordered_map(const concurrent_unordered_map&) = delete;
concurrent_unordered_map& operator=(const concurrent_unordered_map&) = delete;
explicit concurrent_unordered_map(size_type n,
const mapped_type unused_element,
const Hasher& hf = hasher(),
const Equality& eql = key_equal(),
const allocator_type& a = allocator_type())
: m_hf(hf),
m_equal(eql),
m_allocator(a),
m_hashtbl_size(n),
m_hashtbl_capacity(n),
m_collisions(0),
m_unused_element(
unused_element) { // allocate the raw data of hash table:
// m_hashtbl_values,pre-alloc it on current GPU if UM.
m_hashtbl_values = m_allocator.allocate(m_hashtbl_capacity);
constexpr int block_size = 128;
{
cudaPointerAttributes hashtbl_values_ptr_attributes;
cudaError_t status = cudaPointerGetAttributes(
&hashtbl_values_ptr_attributes, m_hashtbl_values);
#if CUDART_VERSION >= 10000
if (cudaSuccess == status &&
hashtbl_values_ptr_attributes.type == cudaMemoryTypeManaged)
#else
if (cudaSuccess == status && hashtbl_values_ptr_attributes.isManaged)
#endif
{
int dev_id = 0;
CUDA_RT_CALL(cudaGetDevice(&dev_id));
CUDA_RT_CALL(cudaMemPrefetchAsync(
m_hashtbl_values, m_hashtbl_size * sizeof(value_type), dev_id, 0));
}
}
// Initialize kernel, set all entry to unused <K,V>
init_hashtbl<<<((m_hashtbl_size - 1) / block_size) + 1, block_size>>>(
m_hashtbl_values, m_hashtbl_size, unused_key, m_unused_element);
// CUDA_RT_CALL( cudaGetLastError() );
CUDA_RT_CALL(cudaStreamSynchronize(0));
CUDA_RT_CALL(cudaGetLastError());
}
~concurrent_unordered_map() {
m_allocator.deallocate(m_hashtbl_values, m_hashtbl_capacity);
}
__host__ __device__ iterator begin() {
return iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size,
m_hashtbl_values);
}
__host__ __device__ const_iterator begin() const {
return const_iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size,
m_hashtbl_values);
}
__host__ __device__ iterator end() {
return iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size,
m_hashtbl_values + m_hashtbl_size);
}
__host__ __device__ const_iterator end() const {
return const_iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size,
m_hashtbl_values + m_hashtbl_size);
}
__host__ __device__ size_type size() const { return m_hashtbl_size; }
__host__ __device__ value_type* data() const { return m_hashtbl_values; }
__forceinline__ static constexpr __host__ __device__ key_type
get_unused_key() {
return unused_key;
}
// Generic update of a hash table value for any aggregator
template <typename aggregation_type>
__forceinline__ __device__ void update_existing_value(
mapped_type& existing_value, value_type const& insert_pair,
aggregation_type) {
// update without CAS
existing_value = insert_pair.second;
}
__forceinline__ __device__ void accum_existing_value_atomic(
mapped_type& existing_value, value_type const& accum_pair) {
// update with CAS
// existing_value = insert_pair.second;
int num_element =
sizeof(existing_value.data) / sizeof(*(existing_value.data));
const mapped_type& accumulator = accum_pair.second;
for (int i = 0; i < num_element; i++) {
atomicAdd(existing_value.data + i, accumulator.data[i]);
}
// atomicAdd(&existing_value, double val)
}
// TODO Overload atomicAdd for 1 byte and 2 byte types, until then, overload
// specifically for the
// types where atomicAdd already has an overload. Otherwise the generic
// update_existing_value will
// be used. Specialization for COUNT aggregator
/*
__forceinline__ __host__ __device__
void update_existing_value(mapped_type & existing_value, value_type const &
insert_pair,
count_op<int32_t> op)
{
atomicAdd(&existing_value, static_cast<mapped_type>(1));
}
// Specialization for COUNT aggregator
__forceinline__ __host__ __device__
void update_existing_value(mapped_type & existing_value, value_type const &
insert_pair,
count_op<int64_t> op)
{
atomicAdd(&existing_value, static_cast<mapped_type>(1));
}
// Specialization for COUNT aggregator
__forceinline__ __host__ __device__
void update_existing_value(mapped_type & existing_value, value_type const &
insert_pair,
count_op<float> op)
{
atomicAdd(&existing_value, static_cast<mapped_type>(1));
}
// Specialization for COUNT aggregator
__forceinline__ __host__ __device__
void update_existing_value(mapped_type & existing_value, value_type const &
insert_pair,
count_op<double> op)
{
atomicAdd(&existing_value, static_cast<mapped_type>(1));
}
*/
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Inserts a new (key, value) pair. If the key already exists in
the map
an aggregation operation is performed with the new value and
existing value.
E.g., if the aggregation operation is 'max', then the maximum is
computed
between the new value and existing value and the result is
stored in the map.
*
* @Param[in] x The new (key, value) pair to insert
* @Param[in] op The aggregation operation to perform
* @Param[in] keys_equal An optional functor for comparing two keys
* @Param[in] precomputed_hash Indicates if a precomputed hash value is being
passed in to use
* to determine the write location of the new key
* @Param[in] precomputed_hash_value The precomputed hash value
* @tparam aggregation_type A functor for a binary operation that performs the
aggregation
* @tparam comparison_type A functor for comparing two keys
*
* @Returns An iterator to the newly inserted key,value pair
*/
/* ----------------------------------------------------------------------------*/
template <typename aggregation_type, class comparison_type = key_equal,
typename hash_value_type = typename Hasher::result_type>
__forceinline__ __device__ iterator insert(
const value_type& x, aggregation_type op,
comparison_type keys_equal = key_equal(), bool precomputed_hash = false,
hash_value_type precomputed_hash_value = 0) {
const size_type hashtbl_size = m_hashtbl_size;
value_type* hashtbl_values = m_hashtbl_values;
hash_value_type hash_value{0};
// If a precomputed hash value has been passed in, then use it to determine
// the write location of the new key
if (true == precomputed_hash) {
hash_value = precomputed_hash_value;
}
// Otherwise, compute the hash value from the new key
else {
hash_value = m_hf(x.first);
}
size_type current_index = hash_value % hashtbl_size;
value_type* current_hash_bucket = &(hashtbl_values[current_index]);
const key_type insert_key = x.first;
bool insert_success = false;
size_type counter = 0;
while (false == insert_success) {
if (counter++ >= hashtbl_size) {
return end();
}
key_type& existing_key = current_hash_bucket->first;
mapped_type& existing_value = current_hash_bucket->second;
// Try and set the existing_key for the current hash bucket to insert_key
const key_type old_key = atomicCAS(&existing_key, unused_key, insert_key);
// If old_key == unused_key, the current hash bucket was empty
// and existing_key was updated to insert_key by the atomicCAS.
// If old_key == insert_key, this key has already been inserted.
// In either case, perform the atomic aggregation of existing_value and
// insert_value
// Because the hash table is initialized with the identity value of the
// aggregation
// operation, it is safe to perform the operation when the existing_value
// still
// has its initial value
// TODO: Use template specialization to make use of native atomic
// functions
// TODO: How to handle data types less than 32 bits?
if (keys_equal(unused_key, old_key) || keys_equal(insert_key, old_key)) {
update_existing_value(existing_value, x, op);
insert_success = true;
}
current_index = (current_index + 1) % hashtbl_size;
current_hash_bucket = &(hashtbl_values[current_index]);
}
return iterator(m_hashtbl_values, m_hashtbl_values + hashtbl_size,
current_hash_bucket);
}
/* This function is not currently implemented
__forceinline__
__host__ __device__ iterator insert(const value_type& x)
{
const size_type hashtbl_size = m_hashtbl_size;
value_type* hashtbl_values = m_hashtbl_values;
const size_type key_hash = m_hf( x.first );
size_type hash_tbl_idx = key_hash%hashtbl_size;
value_type* it = 0;
while (0 == it) {
value_type* tmp_it = hashtbl_values + hash_tbl_idx;
#ifdef __CUDA_ARCH__
if ( std::numeric_limits<key_type>::is_integer &&
std::numeric_limits<mapped_type>::is_integer && sizeof(unsigned long long int)
== sizeof(value_type)
)
{
pair2longlong converter = {0ull};
converter.pair = thrust::make_pair( unused_key, m_unused_element
);
const unsigned long long int unused = converter.longlong;
converter.pair = x;
const unsigned long long int value = converter.longlong;
const unsigned long long int old_val = atomicCAS(
reinterpret_cast<unsigned long long
int*>(tmp_it), unused, value ); if ( old_val == unused ) { it = tmp_it;
}
else if ( count_collisions )
{
atomicAdd( &m_collisions, 1 );
}
} else {
const key_type old_key = atomicCAS( &(tmp_it->first), unused_key,
x.first );
if ( m_equal( unused_key, old_key ) ) {
(m_hashtbl_values+hash_tbl_idx)->second = x.second;
it = tmp_it;
}
else if ( count_collisions )
{
atomicAdd( &m_collisions, 1 );
}
}
#else
#pragma omp critical
{
if ( m_equal( unused_key, tmp_it->first ) ) {
hashtbl_values[hash_tbl_idx] = thrust::make_pair( x.first,
x.second );
it = tmp_it;
}
}
#endif
hash_tbl_idx = (hash_tbl_idx+1)%hashtbl_size;
}
return iterator( m_hashtbl_values,m_hashtbl_values+hashtbl_size,it);
}
*/
__forceinline__ __host__ __device__ const_iterator
find(const key_type& k) const {
size_type key_hash = m_hf(k);
size_type hash_tbl_idx = key_hash % m_hashtbl_size;
value_type* begin_ptr = 0;
size_type counter = 0;
while (0 == begin_ptr) {
value_type* tmp_ptr = m_hashtbl_values + hash_tbl_idx;
const key_type tmp_val = tmp_ptr->first;
if (m_equal(k, tmp_val)) {
begin_ptr = tmp_ptr;
break;
}
if (m_equal(unused_key, tmp_val) || counter > m_hashtbl_size) {
begin_ptr = m_hashtbl_values + m_hashtbl_size;
break;
}
hash_tbl_idx = (hash_tbl_idx + 1) % m_hashtbl_size;
++counter;
}
return const_iterator(m_hashtbl_values, m_hashtbl_values + m_hashtbl_size,
begin_ptr);
}
template <typename aggregation_type, typename counter_type,
class comparison_type = key_equal,
typename hash_value_type = typename Hasher::result_type>
__forceinline__ __device__ iterator get_insert(
const key_type& k, aggregation_type op, counter_type* value_counter,
comparison_type keys_equal = key_equal(), bool precomputed_hash = false,
hash_value_type precomputed_hash_value = 0) {
const size_type hashtbl_size = m_hashtbl_size;
value_type* hashtbl_values = m_hashtbl_values;
hash_value_type hash_value{0};
// If a precomputed hash value has been passed in, then use it to determine
// the write location of the new key
if (true == precomputed_hash) {
hash_value = precomputed_hash_value;
}
// Otherwise, compute the hash value from the new key
else {
hash_value = m_hf(k);
}
size_type current_index = hash_value % hashtbl_size;
value_type* current_hash_bucket = &(hashtbl_values[current_index]);
const key_type insert_key = k;
bool insert_success = false;
size_type counter = 0;
while (false == insert_success) {
// Situation %5: No slot: All slot in the hashtable is occupied by other
// key, both get and
// insert fail. Return empty iterator
if (counter++ >= hashtbl_size) {
return end();
}
key_type& existing_key = current_hash_bucket->first;
volatile mapped_type& existing_value = current_hash_bucket->second;
// Try and set the existing_key for the current hash bucket to insert_key
const key_type old_key = atomicCAS(&existing_key, unused_key, insert_key);
// If old_key == unused_key, the current hash bucket was empty
// and existing_key was updated to insert_key by the atomicCAS.
// If old_key == insert_key, this key has already been inserted.
// In either case, perform the atomic aggregation of existing_value and
// insert_value
// Because the hash table is initialized with the identity value of the
// aggregation
// operation, it is safe to perform the operation when the existing_value
// still
// has its initial value
// TODO: Use template specialization to make use of native atomic
// functions
// TODO: How to handle data types less than 32 bits?
// Situation #1: Empty slot: this key never exist in the table, ready to
// insert.
if (keys_equal(unused_key, old_key)) {
// update_existing_value(existing_value, x, op);
existing_value = (mapped_type)(atomicAdd(value_counter, 1));
break;
} // Situation #2+#3: Target slot: This slot is the slot for this key
else if (keys_equal(insert_key, old_key)) {
while (existing_value == m_unused_element) {
// Situation #2: This slot is inserting by another CUDA thread and the
// value is not yet
// ready, just wait
}
// Situation #3: This slot is already ready, get successfully and return
// (iterator of) the
// value
break;
}
// Situation 4: Wrong slot: This slot is occupied by other key, get fail,
// do nothing and
// linear probing to next slot.
current_index = (current_index + 1) % hashtbl_size;
current_hash_bucket = &(hashtbl_values[current_index]);
}
return iterator(m_hashtbl_values, m_hashtbl_values + hashtbl_size,
current_hash_bucket);
}
int assign_async(const concurrent_unordered_map& other,
gpuStream_t stream = 0) {
m_collisions = other.m_collisions;
if (other.m_hashtbl_size <= m_hashtbl_capacity) {
m_hashtbl_size = other.m_hashtbl_size;
} else {
m_allocator.deallocate(m_hashtbl_values, m_hashtbl_capacity);
m_hashtbl_capacity = other.m_hashtbl_size;
m_hashtbl_size = other.m_hashtbl_size;
m_hashtbl_values = m_allocator.allocate(m_hashtbl_capacity);
}
CUDA_RT_CALL(cudaMemcpyAsync(m_hashtbl_values, other.m_hashtbl_values,
m_hashtbl_size * sizeof(value_type),
cudaMemcpyDefault, stream));
return 0;
}
void clear_async(gpuStream_t stream = 0) {
constexpr int block_size = 128;
init_hashtbl<<<((m_hashtbl_size - 1) / block_size) + 1, block_size, 0,
stream>>>(m_hashtbl_values, m_hashtbl_size, unused_key,
m_unused_element);
if (count_collisions) m_collisions = 0;
}
unsigned long long get_num_collisions() const { return m_collisions; }
void print() {
for (size_type i = 0; i < 10; ++i) {
std::cout << i << ": " << m_hashtbl_values[i].first << ","
<< m_hashtbl_values[i].second << std::endl;
}
}
int prefetch(const int dev_id, gpuStream_t stream = 0) {
cudaPointerAttributes hashtbl_values_ptr_attributes;
cudaError_t status = cudaPointerGetAttributes(
&hashtbl_values_ptr_attributes, m_hashtbl_values);
#if CUDART_VERSION >= 10000
if (cudaSuccess == status &&
hashtbl_values_ptr_attributes.type == cudaMemoryTypeManaged)
#else
if (cudaSuccess == status && hashtbl_values_ptr_attributes.isManaged)
#endif
{
CUDA_RT_CALL(cudaMemPrefetchAsync(m_hashtbl_values,
m_hashtbl_size * sizeof(value_type),
dev_id, stream));
}
CUDA_RT_CALL(cudaMemPrefetchAsync(this, sizeof(*this), dev_id, stream));
return 0;
}
template <class comparison_type = key_equal,
typename hash_value_type = typename Hasher::result_type>
__forceinline__ __device__ const_iterator
accum(const value_type& x, comparison_type keys_equal = key_equal(),
bool precomputed_hash = false,
hash_value_type precomputed_hash_value = 0) {
const key_type& dst_key = x.first;
auto it = find(dst_key);
if (it == end()) {
return it;
}
value_type* dst = it.getter();
accum_existing_value_atomic(dst->second, x);
return it;
}
private:
const hasher m_hf;
const key_equal m_equal;
const mapped_type m_unused_element;
allocator_type m_allocator;
size_type m_hashtbl_size;
size_type m_hashtbl_capacity;
value_type* m_hashtbl_values;
unsigned long long m_collisions;
};
#endif // CONCURRENT_UNORDERED_MAP_CUH
|
otfft_avxdit16omp.h | // Copyright (c) 2015, OK おじさん(岡久卓也)
// Copyright (c) 2015, OK Ojisan(Takuya OKAHISA)
// Copyright (c) 2017 to the present, DEWETRON GmbH
// OTFFT Implementation Version 9.5
// based on Stockham FFT algorithm
// from OK Ojisan(Takuya OKAHISA), source: http://www.moon.sannet.ne.jp/okahisa/stockham/stockham.html
#pragma once
#include "otfft_misc.h"
namespace OTFFT_NAMESPACE {
namespace OTFFT_AVXDIT16omp { /////////////////////////////////////////////////
using namespace OTFFT;
using namespace OTFFT_MISC;
///////////////////////////////////////////////////////////////////////////////
// Forward Buffterfly Operation
///////////////////////////////////////////////////////////////////////////////
template <int n, int s> struct fwdcore
{
static const int N = n*s;
static const int N0 = 0;
static const int N1 = N/16;
static const int N2 = N1*2;
static const int N3 = N1*3;
static const int N4 = N1*4;
static const int N5 = N1*5;
static const int N6 = N1*6;
static const int N7 = N1*7;
static const int N8 = N1*8;
static const int N9 = N1*9;
static const int Na = N1*10;
static const int Nb = N1*11;
static const int Nc = N1*12;
static const int Nd = N1*13;
static const int Ne = N1*14;
static const int Nf = N1*15;
void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
#pragma omp for schedule(static)
for (int i = 0; i < N/32; i++) {
const int p = i / (s/2);
const int q = i % (s/2) * 2;
const int sp = s*p;
const int s16p = 16*sp;
complex_vector xq_sp = x + q + sp;
complex_vector yq_s16p = y + q + s16p;
const ymm y0 = getpz2(yq_s16p+s*0x0);
const ymm w1p = duppz3(W[sp]);
const ymm y1 = mulpz2(w1p, getpz2(yq_s16p+s*0x1));
const ymm w2p = mulpz2(w1p, w1p);
const ymm y2 = mulpz2(w2p, getpz2(yq_s16p+s*0x2));
const ymm w3p = mulpz2(w1p, w2p);
const ymm y3 = mulpz2(w3p, getpz2(yq_s16p+s*0x3));
const ymm w4p = mulpz2(w2p, w2p);
const ymm y4 = mulpz2(w4p, getpz2(yq_s16p+s*0x4));
const ymm w5p = mulpz2(w2p, w3p);
const ymm y5 = mulpz2(w5p, getpz2(yq_s16p+s*0x5));
const ymm w6p = mulpz2(w3p, w3p);
const ymm y6 = mulpz2(w6p, getpz2(yq_s16p+s*0x6));
const ymm w7p = mulpz2(w3p, w4p);
const ymm y7 = mulpz2(w7p, getpz2(yq_s16p+s*0x7));
const ymm w8p = mulpz2(w4p, w4p);
const ymm y8 = mulpz2(w8p, getpz2(yq_s16p+s*0x8));
const ymm w9p = mulpz2(w4p, w5p);
const ymm y9 = mulpz2(w9p, getpz2(yq_s16p+s*0x9));
const ymm wap = mulpz2(w5p, w5p);
const ymm ya = mulpz2(wap, getpz2(yq_s16p+s*0xa));
const ymm wbp = mulpz2(w5p, w6p);
const ymm yb = mulpz2(wbp, getpz2(yq_s16p+s*0xb));
const ymm wcp = mulpz2(w6p, w6p);
const ymm yc = mulpz2(wcp, getpz2(yq_s16p+s*0xc));
const ymm wdp = mulpz2(w6p, w7p);
const ymm yd = mulpz2(wdp, getpz2(yq_s16p+s*0xd));
const ymm wep = mulpz2(w7p, w7p);
const ymm ye = mulpz2(wep, getpz2(yq_s16p+s*0xe));
const ymm wfp = mulpz2(w7p, w8p);
const ymm yf = mulpz2(wfp, getpz2(yq_s16p+s*0xf));
const ymm a08 = addpz2(y0, y8); const ymm s08 = subpz2(y0, y8);
const ymm a4c = addpz2(y4, yc); const ymm s4c = subpz2(y4, yc);
const ymm a2a = addpz2(y2, ya); const ymm s2a = subpz2(y2, ya);
const ymm a6e = addpz2(y6, ye); const ymm s6e = subpz2(y6, ye);
const ymm a19 = addpz2(y1, y9); const ymm s19 = subpz2(y1, y9);
const ymm a5d = addpz2(y5, yd); const ymm s5d = subpz2(y5, yd);
const ymm a3b = addpz2(y3, yb); const ymm s3b = subpz2(y3, yb);
const ymm a7f = addpz2(y7, yf); const ymm s7f = subpz2(y7, yf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(xq_sp+N0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq_sp+N1, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq_sp+N2, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq_sp+N3, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq_sp+N4, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq_sp+N5, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq_sp+N6, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq_sp+N7, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq_sp+N8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq_sp+N9, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq_sp+Na, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq_sp+Nb, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq_sp+Nc, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq_sp+Nd, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq_sp+Ne, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq_sp+Nf, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
}
}
};
template <int N> struct fwdcore<N,1>
{
static const int N0 = 0;
static const int N1 = N/16;
static const int N2 = N1*2;
static const int N3 = N1*3;
static const int N4 = N1*4;
static const int N5 = N1*5;
static const int N6 = N1*6;
static const int N7 = N1*7;
static const int N8 = N1*8;
static const int N9 = N1*9;
static const int Na = N1*10;
static const int Nb = N1*11;
static const int Nc = N1*12;
static const int Nd = N1*13;
static const int Ne = N1*14;
static const int Nf = N1*15;
void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
#pragma omp for schedule(static) nowait
for (int p = 0; p < N1; p += 2) {
complex_vector x_p = x + p;
complex_vector y_16p = y + 16*p;
const ymm w1p = getpz2(W+p);
const ymm w2p = mulpz2(w1p, w1p);
const ymm w3p = mulpz2(w1p, w2p);
const ymm w4p = mulpz2(w2p, w2p);
const ymm w5p = mulpz2(w2p, w3p);
const ymm w6p = mulpz2(w3p, w3p);
const ymm w7p = mulpz2(w3p, w4p);
const ymm w8p = mulpz2(w4p, w4p);
const ymm w9p = mulpz2(w4p, w5p);
const ymm wap = mulpz2(w5p, w5p);
const ymm wbp = mulpz2(w5p, w6p);
const ymm wcp = mulpz2(w6p, w6p);
const ymm wdp = mulpz2(w6p, w7p);
const ymm wep = mulpz2(w7p, w7p);
const ymm wfp = mulpz2(w7p, w8p);
const ymm ab = getpz2(y_16p+0x00);
const ymm cd = getpz2(y_16p+0x02);
const ymm ef = getpz2(y_16p+0x04);
const ymm gh = getpz2(y_16p+0x06);
const ymm ij = getpz2(y_16p+0x08);
const ymm kl = getpz2(y_16p+0x0a);
const ymm mn = getpz2(y_16p+0x0c);
const ymm op = getpz2(y_16p+0x0e);
const ymm AB = getpz2(y_16p+0x10);
const ymm CD = getpz2(y_16p+0x12);
const ymm EF = getpz2(y_16p+0x14);
const ymm GH = getpz2(y_16p+0x16);
const ymm IJ = getpz2(y_16p+0x18);
const ymm KL = getpz2(y_16p+0x1a);
const ymm MN = getpz2(y_16p+0x1c);
const ymm OP = getpz2(y_16p+0x1e);
const ymm y0 = catlo(ab, AB);
const ymm y1 = mulpz2(w1p, cathi(ab, AB));
const ymm y2 = mulpz2(w2p, catlo(cd, CD));
const ymm y3 = mulpz2(w3p, cathi(cd, CD));
const ymm y4 = mulpz2(w4p, catlo(ef, EF));
const ymm y5 = mulpz2(w5p, cathi(ef, EF));
const ymm y6 = mulpz2(w6p, catlo(gh, GH));
const ymm y7 = mulpz2(w7p, cathi(gh, GH));
const ymm y8 = mulpz2(w8p, catlo(ij, IJ));
const ymm y9 = mulpz2(w9p, cathi(ij, IJ));
const ymm ya = mulpz2(wap, catlo(kl, KL));
const ymm yb = mulpz2(wbp, cathi(kl, KL));
const ymm yc = mulpz2(wcp, catlo(mn, MN));
const ymm yd = mulpz2(wdp, cathi(mn, MN));
const ymm ye = mulpz2(wep, catlo(op, OP));
const ymm yf = mulpz2(wfp, cathi(op, OP));
const ymm a08 = addpz2(y0, y8); const ymm s08 = subpz2(y0, y8);
const ymm a4c = addpz2(y4, yc); const ymm s4c = subpz2(y4, yc);
const ymm a2a = addpz2(y2, ya); const ymm s2a = subpz2(y2, ya);
const ymm a6e = addpz2(y6, ye); const ymm s6e = subpz2(y6, ye);
const ymm a19 = addpz2(y1, y9); const ymm s19 = subpz2(y1, y9);
const ymm a5d = addpz2(y5, yd); const ymm s5d = subpz2(y5, yd);
const ymm a3b = addpz2(y3, yb); const ymm s3b = subpz2(y3, yb);
const ymm a7f = addpz2(y7, yf); const ymm s7f = subpz2(y7, yf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(x_p+N0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(x_p+N1, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(x_p+N2, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(x_p+N3, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(x_p+N4, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(x_p+N5, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(x_p+N6, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(x_p+N7, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(x_p+N8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(x_p+N9, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(x_p+Na, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(x_p+Nb, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(x_p+Nc, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(x_p+Nd, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(x_p+Ne, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(x_p+Nf, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
}
}
};
///////////////////////////////////////////////////////////////////////////////
template <int n, int s, bool eo, int mode> struct fwdend;
//-----------------------------------------------------------------------------
template <int s, bool eo, int mode> struct fwdend<16,s,eo,mode>
{
static const int N = 16*s;
void operator()(complex_vector x, complex_vector y) const noexcept
{
complex_vector z = eo ? y : x;
#pragma omp for schedule(static)
for (int q = 0; q < s; q += 2) {
complex_vector xq = x + q;
complex_vector zq = z + q;
const ymm z0 = scalepz2<N,mode>(getpz2(zq+s*0x0));
const ymm z1 = scalepz2<N,mode>(getpz2(zq+s*0x1));
const ymm z2 = scalepz2<N,mode>(getpz2(zq+s*0x2));
const ymm z3 = scalepz2<N,mode>(getpz2(zq+s*0x3));
const ymm z4 = scalepz2<N,mode>(getpz2(zq+s*0x4));
const ymm z5 = scalepz2<N,mode>(getpz2(zq+s*0x5));
const ymm z6 = scalepz2<N,mode>(getpz2(zq+s*0x6));
const ymm z7 = scalepz2<N,mode>(getpz2(zq+s*0x7));
const ymm z8 = scalepz2<N,mode>(getpz2(zq+s*0x8));
const ymm z9 = scalepz2<N,mode>(getpz2(zq+s*0x9));
const ymm za = scalepz2<N,mode>(getpz2(zq+s*0xa));
const ymm zb = scalepz2<N,mode>(getpz2(zq+s*0xb));
const ymm zc = scalepz2<N,mode>(getpz2(zq+s*0xc));
const ymm zd = scalepz2<N,mode>(getpz2(zq+s*0xd));
const ymm ze = scalepz2<N,mode>(getpz2(zq+s*0xe));
const ymm zf = scalepz2<N,mode>(getpz2(zq+s*0xf));
const ymm a08 = addpz2(z0, z8); const ymm s08 = subpz2(z0, z8);
const ymm a4c = addpz2(z4, zc); const ymm s4c = subpz2(z4, zc);
const ymm a2a = addpz2(z2, za); const ymm s2a = subpz2(z2, za);
const ymm a6e = addpz2(z6, ze); const ymm s6e = subpz2(z6, ze);
const ymm a19 = addpz2(z1, z9); const ymm s19 = subpz2(z1, z9);
const ymm a5d = addpz2(z5, zd); const ymm s5d = subpz2(z5, zd);
const ymm a3b = addpz2(z3, zb); const ymm s3b = subpz2(z3, zb);
const ymm a7f = addpz2(z7, zf); const ymm s7f = subpz2(z7, zf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(xq+s*0x0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x1, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq+s*0x2, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0x3, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0x4, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0x5, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0x6, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0x7, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq+s*0x8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x9, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq+s*0xa, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0xb, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0xc, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0xd, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0xe, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0xf, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
}
}
};
template <bool eo, int mode> struct fwdend<16,1,eo,mode>
{
inline void operator()(complex_vector x, complex_vector y) const noexcept
{
#pragma omp single
{
zeroupper();
complex_vector z = eo ? y : x;
const xmm z0 = scalepz<16,mode>(getpz(z[0x0]));
const xmm z1 = scalepz<16,mode>(getpz(z[0x1]));
const xmm z2 = scalepz<16,mode>(getpz(z[0x2]));
const xmm z3 = scalepz<16,mode>(getpz(z[0x3]));
const xmm z4 = scalepz<16,mode>(getpz(z[0x4]));
const xmm z5 = scalepz<16,mode>(getpz(z[0x5]));
const xmm z6 = scalepz<16,mode>(getpz(z[0x6]));
const xmm z7 = scalepz<16,mode>(getpz(z[0x7]));
const xmm z8 = scalepz<16,mode>(getpz(z[0x8]));
const xmm z9 = scalepz<16,mode>(getpz(z[0x9]));
const xmm za = scalepz<16,mode>(getpz(z[0xa]));
const xmm zb = scalepz<16,mode>(getpz(z[0xb]));
const xmm zc = scalepz<16,mode>(getpz(z[0xc]));
const xmm zd = scalepz<16,mode>(getpz(z[0xd]));
const xmm ze = scalepz<16,mode>(getpz(z[0xe]));
const xmm zf = scalepz<16,mode>(getpz(z[0xf]));
const xmm a08 = addpz(z0, z8); const xmm s08 = subpz(z0, z8);
const xmm a4c = addpz(z4, zc); const xmm s4c = subpz(z4, zc);
const xmm a2a = addpz(z2, za); const xmm s2a = subpz(z2, za);
const xmm a6e = addpz(z6, ze); const xmm s6e = subpz(z6, ze);
const xmm a19 = addpz(z1, z9); const xmm s19 = subpz(z1, z9);
const xmm a5d = addpz(z5, zd); const xmm s5d = subpz(z5, zd);
const xmm a3b = addpz(z3, zb); const xmm s3b = subpz(z3, zb);
const xmm a7f = addpz(z7, zf); const xmm s7f = subpz(z7, zf);
const xmm js4c = jxpz(s4c);
const xmm js6e = jxpz(s6e);
const xmm js5d = jxpz(s5d);
const xmm js7f = jxpz(s7f);
const xmm a08p1a4c = addpz(a08, a4c); const xmm s08mjs4c = subpz(s08, js4c);
const xmm a08m1a4c = subpz(a08, a4c); const xmm s08pjs4c = addpz(s08, js4c);
const xmm a2ap1a6e = addpz(a2a, a6e); const xmm s2amjs6e = subpz(s2a, js6e);
const xmm a2am1a6e = subpz(a2a, a6e); const xmm s2apjs6e = addpz(s2a, js6e);
const xmm a19p1a5d = addpz(a19, a5d); const xmm s19mjs5d = subpz(s19, js5d);
const xmm a19m1a5d = subpz(a19, a5d); const xmm s19pjs5d = addpz(s19, js5d);
const xmm a3bp1a7f = addpz(a3b, a7f); const xmm s3bmjs7f = subpz(s3b, js7f);
const xmm a3bm1a7f = subpz(a3b, a7f); const xmm s3bpjs7f = addpz(s3b, js7f);
const xmm w8_s2amjs6e = w8xpz(s2amjs6e);
const xmm j_a2am1a6e = jxpz(a2am1a6e);
const xmm v8_s2apjs6e = v8xpz(s2apjs6e);
const xmm a08p1a4c_p1_a2ap1a6e = addpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_pw_s2amjs6e = addpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_mj_a2am1a6e = subpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_mv_s2apjs6e = subpz(s08pjs4c, v8_s2apjs6e);
const xmm a08p1a4c_m1_a2ap1a6e = subpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_mw_s2amjs6e = subpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_pj_a2am1a6e = addpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_pv_s2apjs6e = addpz(s08pjs4c, v8_s2apjs6e);
const xmm w8_s3bmjs7f = w8xpz(s3bmjs7f);
const xmm j_a3bm1a7f = jxpz(a3bm1a7f);
const xmm v8_s3bpjs7f = v8xpz(s3bpjs7f);
const xmm a19p1a5d_p1_a3bp1a7f = addpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_pw_s3bmjs7f = addpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_mj_a3bm1a7f = subpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_mv_s3bpjs7f = subpz(s19pjs5d, v8_s3bpjs7f);
const xmm a19p1a5d_m1_a3bp1a7f = subpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_mw_s3bmjs7f = subpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_pj_a3bm1a7f = addpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_pv_s3bpjs7f = addpz(s19pjs5d, v8_s3bpjs7f);
const xmm h1_s19mjs5d_pw_s3bmjs7f = h1xpz(s19mjs5d_pw_s3bmjs7f);
const xmm w8_a19m1a5d_mj_a3bm1a7f = w8xpz(a19m1a5d_mj_a3bm1a7f);
const xmm h3_s19pjs5d_mv_s3bpjs7f = h3xpz(s19pjs5d_mv_s3bpjs7f);
const xmm j_a19p1a5d_m1_a3bp1a7f = jxpz(a19p1a5d_m1_a3bp1a7f);
const xmm hd_s19mjs5d_mw_s3bmjs7f = hdxpz(s19mjs5d_mw_s3bmjs7f);
const xmm v8_a19m1a5d_pj_a3bm1a7f = v8xpz(a19m1a5d_pj_a3bm1a7f);
const xmm hf_s19pjs5d_pv_s3bpjs7f = hfxpz(s19pjs5d_pv_s3bpjs7f);
setpz(x[0x0], addpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x1], addpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(x[0x2], addpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0x3], addpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0x4], subpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0x5], subpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0x6], subpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0x7], subpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(x[0x8], subpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x9], subpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(x[0xa], subpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0xb], subpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0xc], addpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0xd], addpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0xe], addpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0xf], addpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
}
}
};
///////////////////////////////////////////////////////////////////////////////
// Forward FFT
///////////////////////////////////////////////////////////////////////////////
template <int n, int s, bool eo, int mode> struct fwdfft
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
fwdfft<n/16,16*s,!eo,mode>()(y, x, W);
fwdcore<n,s>()(x, y, W);
}
};
template <int s, bool eo, int mode> struct fwdfft<16,s,eo,mode>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
fwdend<16,s,eo,mode>()(x, y);
}
};
template <int s, bool eo, int mode> struct fwdfft<8,s,eo,mode>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIT8omp::fwdend<8,s,eo,mode>()(x, y);
}
};
template <int s, bool eo, int mode> struct fwdfft<4,s,eo,mode>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIT4omp::fwdend<4,s,eo,mode>()(x, y);
}
};
template <int s, bool eo, int mode> struct fwdfft<2,s,eo,mode>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIT4omp::fwdend<2,s,eo,mode>()(x, y);
}
};
///////////////////////////////////////////////////////////////////////////////
// Inverse Butterfly Operation
///////////////////////////////////////////////////////////////////////////////
template <int n, int s> struct invcore
{
static const int N = n*s;
static const int N0 = 0;
static const int N1 = N/16;
static const int N2 = N1*2;
static const int N3 = N1*3;
static const int N4 = N1*4;
static const int N5 = N1*5;
static const int N6 = N1*6;
static const int N7 = N1*7;
static const int N8 = N1*8;
static const int N9 = N1*9;
static const int Na = N1*10;
static const int Nb = N1*11;
static const int Nc = N1*12;
static const int Nd = N1*13;
static const int Ne = N1*14;
static const int Nf = N1*15;
void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
#pragma omp for schedule(static)
for (int i = 0; i < N/32; i++) {
const int p = i / (s/2);
const int q = i % (s/2) * 2;
const int sp = s*p;
const int s16p = 16*sp;
complex_vector xq_sp = x + q + sp;
complex_vector yq_s16p = y + q + s16p;
const ymm y0 = getpz2(yq_s16p+s*0x0);
const ymm w1p = duppz3(W[N-sp]);
const ymm y1 = mulpz2(w1p, getpz2(yq_s16p+s*0x1));
const ymm w2p = mulpz2(w1p, w1p);
const ymm y2 = mulpz2(w2p, getpz2(yq_s16p+s*0x2));
const ymm w3p = mulpz2(w1p, w2p);
const ymm y3 = mulpz2(w3p, getpz2(yq_s16p+s*0x3));
const ymm w4p = mulpz2(w2p, w2p);
const ymm y4 = mulpz2(w4p, getpz2(yq_s16p+s*0x4));
const ymm w5p = mulpz2(w2p, w3p);
const ymm y5 = mulpz2(w5p, getpz2(yq_s16p+s*0x5));
const ymm w6p = mulpz2(w3p, w3p);
const ymm y6 = mulpz2(w6p, getpz2(yq_s16p+s*0x6));
const ymm w7p = mulpz2(w3p, w4p);
const ymm y7 = mulpz2(w7p, getpz2(yq_s16p+s*0x7));
const ymm w8p = mulpz2(w4p, w4p);
const ymm y8 = mulpz2(w8p, getpz2(yq_s16p+s*0x8));
const ymm w9p = mulpz2(w4p, w5p);
const ymm y9 = mulpz2(w9p, getpz2(yq_s16p+s*0x9));
const ymm wap = mulpz2(w5p, w5p);
const ymm ya = mulpz2(wap, getpz2(yq_s16p+s*0xa));
const ymm wbp = mulpz2(w5p, w6p);
const ymm yb = mulpz2(wbp, getpz2(yq_s16p+s*0xb));
const ymm wcp = mulpz2(w6p, w6p);
const ymm yc = mulpz2(wcp, getpz2(yq_s16p+s*0xc));
const ymm wdp = mulpz2(w6p, w7p);
const ymm yd = mulpz2(wdp, getpz2(yq_s16p+s*0xd));
const ymm wep = mulpz2(w7p, w7p);
const ymm ye = mulpz2(wep, getpz2(yq_s16p+s*0xe));
const ymm wfp = mulpz2(w7p, w8p);
const ymm yf = mulpz2(wfp, getpz2(yq_s16p+s*0xf));
const ymm a08 = addpz2(y0, y8); const ymm s08 = subpz2(y0, y8);
const ymm a4c = addpz2(y4, yc); const ymm s4c = subpz2(y4, yc);
const ymm a2a = addpz2(y2, ya); const ymm s2a = subpz2(y2, ya);
const ymm a6e = addpz2(y6, ye); const ymm s6e = subpz2(y6, ye);
const ymm a19 = addpz2(y1, y9); const ymm s19 = subpz2(y1, y9);
const ymm a5d = addpz2(y5, yd); const ymm s5d = subpz2(y5, yd);
const ymm a3b = addpz2(y3, yb); const ymm s3b = subpz2(y3, yb);
const ymm a7f = addpz2(y7, yf); const ymm s7f = subpz2(y7, yf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(xq_sp+N0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq_sp+N1, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq_sp+N2, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq_sp+N3, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq_sp+N4, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq_sp+N5, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq_sp+N6, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq_sp+N7, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq_sp+N8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq_sp+N9, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq_sp+Na, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq_sp+Nb, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq_sp+Nc, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq_sp+Nd, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq_sp+Ne, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq_sp+Nf, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
}
}
};
template <int N> struct invcore<N,1>
{
static const int N0 = 0;
static const int N1 = N/16;
static const int N2 = N1*2;
static const int N3 = N1*3;
static const int N4 = N1*4;
static const int N5 = N1*5;
static const int N6 = N1*6;
static const int N7 = N1*7;
static const int N8 = N1*8;
static const int N9 = N1*9;
static const int Na = N1*10;
static const int Nb = N1*11;
static const int Nc = N1*12;
static const int Nd = N1*13;
static const int Ne = N1*14;
static const int Nf = N1*15;
void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
#pragma omp for schedule(static) nowait
for (int p = 0; p < N1; p += 2) {
complex_vector x_p = x + p;
complex_vector y_16p = y + 16*p;
const ymm w1p = cnjpz2(getpz2(W+p));
const ymm w2p = mulpz2(w1p, w1p);
const ymm w3p = mulpz2(w1p, w2p);
const ymm w4p = mulpz2(w2p, w2p);
const ymm w5p = mulpz2(w2p, w3p);
const ymm w6p = mulpz2(w3p, w3p);
const ymm w7p = mulpz2(w3p, w4p);
const ymm w8p = mulpz2(w4p, w4p);
const ymm w9p = mulpz2(w4p, w5p);
const ymm wap = mulpz2(w5p, w5p);
const ymm wbp = mulpz2(w5p, w6p);
const ymm wcp = mulpz2(w6p, w6p);
const ymm wdp = mulpz2(w6p, w7p);
const ymm wep = mulpz2(w7p, w7p);
const ymm wfp = mulpz2(w7p, w8p);
const ymm ab = getpz2(y_16p+0x00);
const ymm cd = getpz2(y_16p+0x02);
const ymm ef = getpz2(y_16p+0x04);
const ymm gh = getpz2(y_16p+0x06);
const ymm ij = getpz2(y_16p+0x08);
const ymm kl = getpz2(y_16p+0x0a);
const ymm mn = getpz2(y_16p+0x0c);
const ymm op = getpz2(y_16p+0x0e);
const ymm AB = getpz2(y_16p+0x10);
const ymm CD = getpz2(y_16p+0x12);
const ymm EF = getpz2(y_16p+0x14);
const ymm GH = getpz2(y_16p+0x16);
const ymm IJ = getpz2(y_16p+0x18);
const ymm KL = getpz2(y_16p+0x1a);
const ymm MN = getpz2(y_16p+0x1c);
const ymm OP = getpz2(y_16p+0x1e);
const ymm y0 = catlo(ab, AB);
const ymm y1 = mulpz2(w1p, cathi(ab, AB));
const ymm y2 = mulpz2(w2p, catlo(cd, CD));
const ymm y3 = mulpz2(w3p, cathi(cd, CD));
const ymm y4 = mulpz2(w4p, catlo(ef, EF));
const ymm y5 = mulpz2(w5p, cathi(ef, EF));
const ymm y6 = mulpz2(w6p, catlo(gh, GH));
const ymm y7 = mulpz2(w7p, cathi(gh, GH));
const ymm y8 = mulpz2(w8p, catlo(ij, IJ));
const ymm y9 = mulpz2(w9p, cathi(ij, IJ));
const ymm ya = mulpz2(wap, catlo(kl, KL));
const ymm yb = mulpz2(wbp, cathi(kl, KL));
const ymm yc = mulpz2(wcp, catlo(mn, MN));
const ymm yd = mulpz2(wdp, cathi(mn, MN));
const ymm ye = mulpz2(wep, catlo(op, OP));
const ymm yf = mulpz2(wfp, cathi(op, OP));
const ymm a08 = addpz2(y0, y8); const ymm s08 = subpz2(y0, y8);
const ymm a4c = addpz2(y4, yc); const ymm s4c = subpz2(y4, yc);
const ymm a2a = addpz2(y2, ya); const ymm s2a = subpz2(y2, ya);
const ymm a6e = addpz2(y6, ye); const ymm s6e = subpz2(y6, ye);
const ymm a19 = addpz2(y1, y9); const ymm s19 = subpz2(y1, y9);
const ymm a5d = addpz2(y5, yd); const ymm s5d = subpz2(y5, yd);
const ymm a3b = addpz2(y3, yb); const ymm s3b = subpz2(y3, yb);
const ymm a7f = addpz2(y7, yf); const ymm s7f = subpz2(y7, yf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(x_p+N0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(x_p+N1, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(x_p+N2, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(x_p+N3, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(x_p+N4, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(x_p+N5, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(x_p+N6, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(x_p+N7, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(x_p+N8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(x_p+N9, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(x_p+Na, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(x_p+Nb, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(x_p+Nc, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(x_p+Nd, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(x_p+Ne, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(x_p+Nf, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
}
}
};
///////////////////////////////////////////////////////////////////////////////
template <int n, int s, bool eo, int mode> struct invend;
//-----------------------------------------------------------------------------
template <int s, bool eo, int mode> struct invend<16,s,eo,mode>
{
static const int N = 16*s;
void operator()(complex_vector x, complex_vector y) const noexcept
{
complex_vector z = eo ? y : x;
#pragma omp for schedule(static)
for (int q = 0; q < s; q += 2) {
complex_vector xq = x + q;
complex_vector zq = z + q;
const ymm z0 = scalepz2<N,mode>(getpz2(zq+s*0x0));
const ymm z1 = scalepz2<N,mode>(getpz2(zq+s*0x1));
const ymm z2 = scalepz2<N,mode>(getpz2(zq+s*0x2));
const ymm z3 = scalepz2<N,mode>(getpz2(zq+s*0x3));
const ymm z4 = scalepz2<N,mode>(getpz2(zq+s*0x4));
const ymm z5 = scalepz2<N,mode>(getpz2(zq+s*0x5));
const ymm z6 = scalepz2<N,mode>(getpz2(zq+s*0x6));
const ymm z7 = scalepz2<N,mode>(getpz2(zq+s*0x7));
const ymm z8 = scalepz2<N,mode>(getpz2(zq+s*0x8));
const ymm z9 = scalepz2<N,mode>(getpz2(zq+s*0x9));
const ymm za = scalepz2<N,mode>(getpz2(zq+s*0xa));
const ymm zb = scalepz2<N,mode>(getpz2(zq+s*0xb));
const ymm zc = scalepz2<N,mode>(getpz2(zq+s*0xc));
const ymm zd = scalepz2<N,mode>(getpz2(zq+s*0xd));
const ymm ze = scalepz2<N,mode>(getpz2(zq+s*0xe));
const ymm zf = scalepz2<N,mode>(getpz2(zq+s*0xf));
const ymm a08 = addpz2(z0, z8); const ymm s08 = subpz2(z0, z8);
const ymm a4c = addpz2(z4, zc); const ymm s4c = subpz2(z4, zc);
const ymm a2a = addpz2(z2, za); const ymm s2a = subpz2(z2, za);
const ymm a6e = addpz2(z6, ze); const ymm s6e = subpz2(z6, ze);
const ymm a19 = addpz2(z1, z9); const ymm s19 = subpz2(z1, z9);
const ymm a5d = addpz2(z5, zd); const ymm s5d = subpz2(z5, zd);
const ymm a3b = addpz2(z3, zb); const ymm s3b = subpz2(z3, zb);
const ymm a7f = addpz2(z7, zf); const ymm s7f = subpz2(z7, zf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(xq+s*0x0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x1, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq+s*0x2, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0x3, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0x4, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0x5, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0x6, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0x7, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq+s*0x8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x9, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq+s*0xa, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0xb, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0xc, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0xd, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0xe, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0xf, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
}
}
};
template <bool eo, int mode> struct invend<16,1,eo,mode>
{
inline void operator()(complex_vector x, complex_vector y) const noexcept
{
#pragma omp single
{
zeroupper();
complex_vector z = eo ? y : x;
const xmm z0 = scalepz<16,mode>(getpz(z[0x0]));
const xmm z1 = scalepz<16,mode>(getpz(z[0x1]));
const xmm z2 = scalepz<16,mode>(getpz(z[0x2]));
const xmm z3 = scalepz<16,mode>(getpz(z[0x3]));
const xmm z4 = scalepz<16,mode>(getpz(z[0x4]));
const xmm z5 = scalepz<16,mode>(getpz(z[0x5]));
const xmm z6 = scalepz<16,mode>(getpz(z[0x6]));
const xmm z7 = scalepz<16,mode>(getpz(z[0x7]));
const xmm z8 = scalepz<16,mode>(getpz(z[0x8]));
const xmm z9 = scalepz<16,mode>(getpz(z[0x9]));
const xmm za = scalepz<16,mode>(getpz(z[0xa]));
const xmm zb = scalepz<16,mode>(getpz(z[0xb]));
const xmm zc = scalepz<16,mode>(getpz(z[0xc]));
const xmm zd = scalepz<16,mode>(getpz(z[0xd]));
const xmm ze = scalepz<16,mode>(getpz(z[0xe]));
const xmm zf = scalepz<16,mode>(getpz(z[0xf]));
const xmm a08 = addpz(z0, z8); const xmm s08 = subpz(z0, z8);
const xmm a4c = addpz(z4, zc); const xmm s4c = subpz(z4, zc);
const xmm a2a = addpz(z2, za); const xmm s2a = subpz(z2, za);
const xmm a6e = addpz(z6, ze); const xmm s6e = subpz(z6, ze);
const xmm a19 = addpz(z1, z9); const xmm s19 = subpz(z1, z9);
const xmm a5d = addpz(z5, zd); const xmm s5d = subpz(z5, zd);
const xmm a3b = addpz(z3, zb); const xmm s3b = subpz(z3, zb);
const xmm a7f = addpz(z7, zf); const xmm s7f = subpz(z7, zf);
const xmm js4c = jxpz(s4c);
const xmm js6e = jxpz(s6e);
const xmm js5d = jxpz(s5d);
const xmm js7f = jxpz(s7f);
const xmm a08p1a4c = addpz(a08, a4c); const xmm s08mjs4c = subpz(s08, js4c);
const xmm a08m1a4c = subpz(a08, a4c); const xmm s08pjs4c = addpz(s08, js4c);
const xmm a2ap1a6e = addpz(a2a, a6e); const xmm s2amjs6e = subpz(s2a, js6e);
const xmm a2am1a6e = subpz(a2a, a6e); const xmm s2apjs6e = addpz(s2a, js6e);
const xmm a19p1a5d = addpz(a19, a5d); const xmm s19mjs5d = subpz(s19, js5d);
const xmm a19m1a5d = subpz(a19, a5d); const xmm s19pjs5d = addpz(s19, js5d);
const xmm a3bp1a7f = addpz(a3b, a7f); const xmm s3bmjs7f = subpz(s3b, js7f);
const xmm a3bm1a7f = subpz(a3b, a7f); const xmm s3bpjs7f = addpz(s3b, js7f);
const xmm w8_s2amjs6e = w8xpz(s2amjs6e);
const xmm j_a2am1a6e = jxpz(a2am1a6e);
const xmm v8_s2apjs6e = v8xpz(s2apjs6e);
const xmm a08p1a4c_p1_a2ap1a6e = addpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_pw_s2amjs6e = addpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_mj_a2am1a6e = subpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_mv_s2apjs6e = subpz(s08pjs4c, v8_s2apjs6e);
const xmm a08p1a4c_m1_a2ap1a6e = subpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_mw_s2amjs6e = subpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_pj_a2am1a6e = addpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_pv_s2apjs6e = addpz(s08pjs4c, v8_s2apjs6e);
const xmm w8_s3bmjs7f = w8xpz(s3bmjs7f);
const xmm j_a3bm1a7f = jxpz(a3bm1a7f);
const xmm v8_s3bpjs7f = v8xpz(s3bpjs7f);
const xmm a19p1a5d_p1_a3bp1a7f = addpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_pw_s3bmjs7f = addpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_mj_a3bm1a7f = subpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_mv_s3bpjs7f = subpz(s19pjs5d, v8_s3bpjs7f);
const xmm a19p1a5d_m1_a3bp1a7f = subpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_mw_s3bmjs7f = subpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_pj_a3bm1a7f = addpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_pv_s3bpjs7f = addpz(s19pjs5d, v8_s3bpjs7f);
const xmm h1_s19mjs5d_pw_s3bmjs7f = h1xpz(s19mjs5d_pw_s3bmjs7f);
const xmm w8_a19m1a5d_mj_a3bm1a7f = w8xpz(a19m1a5d_mj_a3bm1a7f);
const xmm h3_s19pjs5d_mv_s3bpjs7f = h3xpz(s19pjs5d_mv_s3bpjs7f);
const xmm j_a19p1a5d_m1_a3bp1a7f = jxpz(a19p1a5d_m1_a3bp1a7f);
const xmm hd_s19mjs5d_mw_s3bmjs7f = hdxpz(s19mjs5d_mw_s3bmjs7f);
const xmm v8_a19m1a5d_pj_a3bm1a7f = v8xpz(a19m1a5d_pj_a3bm1a7f);
const xmm hf_s19pjs5d_pv_s3bpjs7f = hfxpz(s19pjs5d_pv_s3bpjs7f);
setpz(x[0x0], addpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x1], addpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(x[0x2], addpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0x3], addpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0x4], addpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0x5], subpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0x6], subpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0x7], subpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(x[0x8], subpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x9], subpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(x[0xa], subpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0xb], subpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0xc], subpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0xd], addpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0xe], addpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0xf], addpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
}
}
};
///////////////////////////////////////////////////////////////////////////////
// Inverse FFT
///////////////////////////////////////////////////////////////////////////////
template <int n, int s, bool eo, int mode> struct invfft
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
invfft<n/16,16*s,!eo,mode>()(y, x, W);
invcore<n,s>()(x, y, W);
}
};
template <int s, bool eo, int mode> struct invfft<16,s,eo,mode>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
invend<16,s,eo,mode>()(x, y);
}
};
template <int s, bool eo, int mode> struct invfft<8,s,eo,mode>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIT8omp::invend<8,s,eo,mode>()(x, y);
}
};
template <int s, bool eo, int mode> struct invfft<4,s,eo,mode>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIT4omp::invend<4,s,eo,mode>()(x, y);
}
};
template <int s, bool eo, int mode> struct invfft<2,s,eo,mode>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIT4omp::invend<2,s,eo,mode>()(x, y);
}
};
///////////////////////////////////////////////////////////////////////////////
// Power of 2 FFT Routine
///////////////////////////////////////////////////////////////////////////////
inline void fwd(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
static const int mode = scale_length;
#pragma omp parallel firstprivate(x,y,W)
switch (log_N) {
case 0: break;
case 1: fwdfft<(1<< 1),1,0,mode>()(x, y, W); break;
case 2: fwdfft<(1<< 2),1,0,mode>()(x, y, W); break;
case 3: fwdfft<(1<< 3),1,0,mode>()(x, y, W); break;
case 4: fwdfft<(1<< 4),1,0,mode>()(x, y, W); break;
case 5: fwdfft<(1<< 5),1,0,mode>()(x, y, W); break;
case 6: fwdfft<(1<< 6),1,0,mode>()(x, y, W); break;
case 7: fwdfft<(1<< 7),1,0,mode>()(x, y, W); break;
case 8: fwdfft<(1<< 8),1,0,mode>()(x, y, W); break;
case 9: fwdfft<(1<< 9),1,0,mode>()(x, y, W); break;
case 10: fwdfft<(1<<10),1,0,mode>()(x, y, W); break;
case 11: fwdfft<(1<<11),1,0,mode>()(x, y, W); break;
case 12: fwdfft<(1<<12),1,0,mode>()(x, y, W); break;
case 13: fwdfft<(1<<13),1,0,mode>()(x, y, W); break;
case 14: fwdfft<(1<<14),1,0,mode>()(x, y, W); break;
case 15: fwdfft<(1<<15),1,0,mode>()(x, y, W); break;
case 16: fwdfft<(1<<16),1,0,mode>()(x, y, W); break;
case 17: fwdfft<(1<<17),1,0,mode>()(x, y, W); break;
case 18: fwdfft<(1<<18),1,0,mode>()(x, y, W); break;
case 19: fwdfft<(1<<19),1,0,mode>()(x, y, W); break;
case 20: fwdfft<(1<<20),1,0,mode>()(x, y, W); break;
case 21: fwdfft<(1<<21),1,0,mode>()(x, y, W); break;
case 22: fwdfft<(1<<22),1,0,mode>()(x, y, W); break;
case 23: fwdfft<(1<<23),1,0,mode>()(x, y, W); break;
case 24: fwdfft<(1<<24),1,0,mode>()(x, y, W); break;
}
}
inline void fwd0(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
static const int mode = scale_1;
#pragma omp parallel firstprivate(x,y,W)
switch (log_N) {
case 0: break;
case 1: fwdfft<(1<< 1),1,0,mode>()(x, y, W); break;
case 2: fwdfft<(1<< 2),1,0,mode>()(x, y, W); break;
case 3: fwdfft<(1<< 3),1,0,mode>()(x, y, W); break;
case 4: fwdfft<(1<< 4),1,0,mode>()(x, y, W); break;
case 5: fwdfft<(1<< 5),1,0,mode>()(x, y, W); break;
case 6: fwdfft<(1<< 6),1,0,mode>()(x, y, W); break;
case 7: fwdfft<(1<< 7),1,0,mode>()(x, y, W); break;
case 8: fwdfft<(1<< 8),1,0,mode>()(x, y, W); break;
case 9: fwdfft<(1<< 9),1,0,mode>()(x, y, W); break;
case 10: fwdfft<(1<<10),1,0,mode>()(x, y, W); break;
case 11: fwdfft<(1<<11),1,0,mode>()(x, y, W); break;
case 12: fwdfft<(1<<12),1,0,mode>()(x, y, W); break;
case 13: fwdfft<(1<<13),1,0,mode>()(x, y, W); break;
case 14: fwdfft<(1<<14),1,0,mode>()(x, y, W); break;
case 15: fwdfft<(1<<15),1,0,mode>()(x, y, W); break;
case 16: fwdfft<(1<<16),1,0,mode>()(x, y, W); break;
case 17: fwdfft<(1<<17),1,0,mode>()(x, y, W); break;
case 18: fwdfft<(1<<18),1,0,mode>()(x, y, W); break;
case 19: fwdfft<(1<<19),1,0,mode>()(x, y, W); break;
case 20: fwdfft<(1<<20),1,0,mode>()(x, y, W); break;
case 21: fwdfft<(1<<21),1,0,mode>()(x, y, W); break;
case 22: fwdfft<(1<<22),1,0,mode>()(x, y, W); break;
case 23: fwdfft<(1<<23),1,0,mode>()(x, y, W); break;
case 24: fwdfft<(1<<24),1,0,mode>()(x, y, W); break;
}
}
inline void fwdu(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
static const int mode = scale_unitary;
#pragma omp parallel firstprivate(x,y,W)
switch (log_N) {
case 0: break;
case 1: fwdfft<(1<< 1),1,0,mode>()(x, y, W); break;
case 2: fwdfft<(1<< 2),1,0,mode>()(x, y, W); break;
case 3: fwdfft<(1<< 3),1,0,mode>()(x, y, W); break;
case 4: fwdfft<(1<< 4),1,0,mode>()(x, y, W); break;
case 5: fwdfft<(1<< 5),1,0,mode>()(x, y, W); break;
case 6: fwdfft<(1<< 6),1,0,mode>()(x, y, W); break;
case 7: fwdfft<(1<< 7),1,0,mode>()(x, y, W); break;
case 8: fwdfft<(1<< 8),1,0,mode>()(x, y, W); break;
case 9: fwdfft<(1<< 9),1,0,mode>()(x, y, W); break;
case 10: fwdfft<(1<<10),1,0,mode>()(x, y, W); break;
case 11: fwdfft<(1<<11),1,0,mode>()(x, y, W); break;
case 12: fwdfft<(1<<12),1,0,mode>()(x, y, W); break;
case 13: fwdfft<(1<<13),1,0,mode>()(x, y, W); break;
case 14: fwdfft<(1<<14),1,0,mode>()(x, y, W); break;
case 15: fwdfft<(1<<15),1,0,mode>()(x, y, W); break;
case 16: fwdfft<(1<<16),1,0,mode>()(x, y, W); break;
case 17: fwdfft<(1<<17),1,0,mode>()(x, y, W); break;
case 18: fwdfft<(1<<18),1,0,mode>()(x, y, W); break;
case 19: fwdfft<(1<<19),1,0,mode>()(x, y, W); break;
case 20: fwdfft<(1<<20),1,0,mode>()(x, y, W); break;
case 21: fwdfft<(1<<21),1,0,mode>()(x, y, W); break;
case 22: fwdfft<(1<<22),1,0,mode>()(x, y, W); break;
case 23: fwdfft<(1<<23),1,0,mode>()(x, y, W); break;
case 24: fwdfft<(1<<24),1,0,mode>()(x, y, W); break;
}
}
inline void fwdn(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
fwd(log_N, x, y, W);
}
///////////////////////////////////////////////////////////////////////////////
inline void inv(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
static const int mode = scale_1;
#pragma omp parallel firstprivate(x,y,W)
switch (log_N) {
case 0: break;
case 1: invfft<(1<< 1),1,0,mode>()(x, y, W); break;
case 2: invfft<(1<< 2),1,0,mode>()(x, y, W); break;
case 3: invfft<(1<< 3),1,0,mode>()(x, y, W); break;
case 4: invfft<(1<< 4),1,0,mode>()(x, y, W); break;
case 5: invfft<(1<< 5),1,0,mode>()(x, y, W); break;
case 6: invfft<(1<< 6),1,0,mode>()(x, y, W); break;
case 7: invfft<(1<< 7),1,0,mode>()(x, y, W); break;
case 8: invfft<(1<< 8),1,0,mode>()(x, y, W); break;
case 9: invfft<(1<< 9),1,0,mode>()(x, y, W); break;
case 10: invfft<(1<<10),1,0,mode>()(x, y, W); break;
case 11: invfft<(1<<11),1,0,mode>()(x, y, W); break;
case 12: invfft<(1<<12),1,0,mode>()(x, y, W); break;
case 13: invfft<(1<<13),1,0,mode>()(x, y, W); break;
case 14: invfft<(1<<14),1,0,mode>()(x, y, W); break;
case 15: invfft<(1<<15),1,0,mode>()(x, y, W); break;
case 16: invfft<(1<<16),1,0,mode>()(x, y, W); break;
case 17: invfft<(1<<17),1,0,mode>()(x, y, W); break;
case 18: invfft<(1<<18),1,0,mode>()(x, y, W); break;
case 19: invfft<(1<<19),1,0,mode>()(x, y, W); break;
case 20: invfft<(1<<20),1,0,mode>()(x, y, W); break;
case 21: invfft<(1<<21),1,0,mode>()(x, y, W); break;
case 22: invfft<(1<<22),1,0,mode>()(x, y, W); break;
case 23: invfft<(1<<23),1,0,mode>()(x, y, W); break;
case 24: invfft<(1<<24),1,0,mode>()(x, y, W); break;
}
}
inline void inv0(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
inv(log_N, x, y, W);
}
inline void invu(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
static const int mode = scale_unitary;
#pragma omp parallel firstprivate(x,y,W)
switch (log_N) {
case 0: break;
case 1: invfft<(1<< 1),1,0,mode>()(x, y, W); break;
case 2: invfft<(1<< 2),1,0,mode>()(x, y, W); break;
case 3: invfft<(1<< 3),1,0,mode>()(x, y, W); break;
case 4: invfft<(1<< 4),1,0,mode>()(x, y, W); break;
case 5: invfft<(1<< 5),1,0,mode>()(x, y, W); break;
case 6: invfft<(1<< 6),1,0,mode>()(x, y, W); break;
case 7: invfft<(1<< 7),1,0,mode>()(x, y, W); break;
case 8: invfft<(1<< 8),1,0,mode>()(x, y, W); break;
case 9: invfft<(1<< 9),1,0,mode>()(x, y, W); break;
case 10: invfft<(1<<10),1,0,mode>()(x, y, W); break;
case 11: invfft<(1<<11),1,0,mode>()(x, y, W); break;
case 12: invfft<(1<<12),1,0,mode>()(x, y, W); break;
case 13: invfft<(1<<13),1,0,mode>()(x, y, W); break;
case 14: invfft<(1<<14),1,0,mode>()(x, y, W); break;
case 15: invfft<(1<<15),1,0,mode>()(x, y, W); break;
case 16: invfft<(1<<16),1,0,mode>()(x, y, W); break;
case 17: invfft<(1<<17),1,0,mode>()(x, y, W); break;
case 18: invfft<(1<<18),1,0,mode>()(x, y, W); break;
case 19: invfft<(1<<19),1,0,mode>()(x, y, W); break;
case 20: invfft<(1<<20),1,0,mode>()(x, y, W); break;
case 21: invfft<(1<<21),1,0,mode>()(x, y, W); break;
case 22: invfft<(1<<22),1,0,mode>()(x, y, W); break;
case 23: invfft<(1<<23),1,0,mode>()(x, y, W); break;
case 24: invfft<(1<<24),1,0,mode>()(x, y, W); break;
}
}
inline void invn(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
static const int mode = scale_length;
#pragma omp parallel firstprivate(x,y,W)
switch (log_N) {
case 0: break;
case 1: invfft<(1<< 1),1,0,mode>()(x, y, W); break;
case 2: invfft<(1<< 2),1,0,mode>()(x, y, W); break;
case 3: invfft<(1<< 3),1,0,mode>()(x, y, W); break;
case 4: invfft<(1<< 4),1,0,mode>()(x, y, W); break;
case 5: invfft<(1<< 5),1,0,mode>()(x, y, W); break;
case 6: invfft<(1<< 6),1,0,mode>()(x, y, W); break;
case 7: invfft<(1<< 7),1,0,mode>()(x, y, W); break;
case 8: invfft<(1<< 8),1,0,mode>()(x, y, W); break;
case 9: invfft<(1<< 9),1,0,mode>()(x, y, W); break;
case 10: invfft<(1<<10),1,0,mode>()(x, y, W); break;
case 11: invfft<(1<<11),1,0,mode>()(x, y, W); break;
case 12: invfft<(1<<12),1,0,mode>()(x, y, W); break;
case 13: invfft<(1<<13),1,0,mode>()(x, y, W); break;
case 14: invfft<(1<<14),1,0,mode>()(x, y, W); break;
case 15: invfft<(1<<15),1,0,mode>()(x, y, W); break;
case 16: invfft<(1<<16),1,0,mode>()(x, y, W); break;
case 17: invfft<(1<<17),1,0,mode>()(x, y, W); break;
case 18: invfft<(1<<18),1,0,mode>()(x, y, W); break;
case 19: invfft<(1<<19),1,0,mode>()(x, y, W); break;
case 20: invfft<(1<<20),1,0,mode>()(x, y, W); break;
case 21: invfft<(1<<21),1,0,mode>()(x, y, W); break;
case 22: invfft<(1<<22),1,0,mode>()(x, y, W); break;
case 23: invfft<(1<<23),1,0,mode>()(x, y, W); break;
case 24: invfft<(1<<24),1,0,mode>()(x, y, W); break;
}
}
} /////////////////////////////////////////////////////////////////////////////
}
|
phonon.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <math.h>
#include <string.h>
#include <stddef.h>
#include <dynmat.h>
#include <phonon.h>
#include <lapack_wrapper.h>
static size_t collect_undone_grid_points(size_t *undone,
char *phonon_done,
const size_t num_grid_points,
const size_t *grid_points);
static void get_undone_phonons(double *frequencies,
lapack_complex_double *eigenvectors,
const size_t *undone_grid_points,
const size_t num_undone_grid_points,
PHPYCONST int (*grid_address)[3],
const int mesh[3],
const double *fc2,
PHPYCONST double(*svecs_fc2)[27][3],
const int *multi_fc2,
const size_t num_patom,
const size_t num_satom,
const double *masses_fc2,
const int *p2s_fc2,
const int *s2p_fc2,
const double unit_conversion_factor,
PHPYCONST double (*born)[3][3],
PHPYCONST double dielectric[3][3],
PHPYCONST double reciprocal_lattice[3][3],
const double *q_direction,
const double nac_factor,
const char uplo);
static void get_gonze_undone_phonons(double *frequencies,
lapack_complex_double *eigenvectors,
const size_t *undone_grid_points,
const size_t num_undone_grid_points,
PHPYCONST int (*grid_address)[3],
const int mesh[3],
const double *fc2,
PHPYCONST double(*svecs_fc2)[27][3],
const int *multi_fc2,
PHPYCONST double (*positions)[3],
const size_t num_patom,
const size_t num_satom,
const double *masses_fc2,
const int *p2s_fc2,
const int *s2p_fc2,
const double unit_conversion_factor,
PHPYCONST double (*born)[3][3],
PHPYCONST double dielectric[3][3],
PHPYCONST double reciprocal_lattice[3][3],
const double *q_direction,
const double nac_factor,
const double *dd_q0,
PHPYCONST double(*G_list)[3],
const size_t num_G_points,
const double lambda,
const char uplo);
static int get_phonons(lapack_complex_double *eigvecs,
double *freqs,
const double q[3],
const double *fc2,
const double *masses,
const int *p2s,
const int *s2p,
const int *multi,
const size_t num_patom,
const size_t num_satom,
PHPYCONST double(*svecs)[27][3],
const int is_nac,
PHPYCONST double (*born)[3][3],
PHPYCONST double dielectric[3][3],
PHPYCONST double reciprocal_lattice[3][3],
const double *q_direction,
const double nac_factor,
const double unit_conversion_factor,
const char uplo);
static int get_gonze_phonons(lapack_complex_double *eigvecs,
double *freqs,
const double q[3],
const double *fc2,
const double *masses,
const int *p2s,
const int *s2p,
const int *multi,
PHPYCONST double (*positions)[3],
const size_t num_patom,
const size_t num_satom,
PHPYCONST double(*svecs)[27][3],
const int is_nac,
PHPYCONST double (*born)[3][3],
PHPYCONST double dielectric[3][3],
PHPYCONST double reciprocal_lattice[3][3],
const double *q_direction,
const double nac_factor,
const double *dd_q0,
PHPYCONST double(*G_list)[3],
const size_t num_G_points,
const double lambda,
const double unit_conversion_factor,
const char uplo);
static void
get_dynamical_matrix(lapack_complex_double *dynmat,
const double q[3],
const double *fc2,
const double *masses,
const int *p2s,
const int *s2p,
const int *multi,
const size_t num_patom,
const size_t num_satom,
PHPYCONST double(*svecs)[27][3],
const int is_nac,
PHPYCONST double (*born)[3][3], /* Wang NAC unless NULL */
PHPYCONST double dielectric[3][3],
PHPYCONST double reciprocal_lattice[3][3],
const double *q_direction,
const double nac_factor);
static void get_charge_sum(double (*charge_sum)[3][3],
const size_t num_patom,
const size_t num_satom,
const double q[3],
PHPYCONST double (*born)[3][3],
PHPYCONST double dielectric[3][3],
PHPYCONST double reciprocal_lattice[3][3],
const double *q_direction,
const double nac_factor);
static int needs_nac(PHPYCONST double (*born)[3][3],
PHPYCONST int (*grid_address)[3],
const size_t gp,
const double *q_direction);
void
phn_get_phonons_at_gridpoints(double *frequencies,
lapack_complex_double *eigenvectors,
char *phonon_done,
const size_t num_phonons,
const size_t *grid_points,
const size_t num_grid_points,
PHPYCONST int (*grid_address)[3],
const int mesh[3],
const double *fc2,
PHPYCONST double(*svecs_fc2)[27][3],
const int *multi_fc2,
const size_t num_patom,
const size_t num_satom,
const double *masses_fc2,
const int *p2s_fc2,
const int *s2p_fc2,
const double unit_conversion_factor,
PHPYCONST double (*born)[3][3],
PHPYCONST double dielectric[3][3],
PHPYCONST double reciprocal_lattice[3][3],
const double *q_direction, /* must be pointer */
const double nac_factor,
const char uplo)
{
size_t num_undone;
size_t *undone;
undone = (size_t*)malloc(sizeof(size_t) * num_phonons);
num_undone = collect_undone_grid_points(undone,
phonon_done,
num_grid_points,
grid_points);
get_undone_phonons(frequencies,
eigenvectors,
undone,
num_undone,
grid_address,
mesh,
fc2,
svecs_fc2,
multi_fc2,
num_patom,
num_satom,
masses_fc2,
p2s_fc2,
s2p_fc2,
unit_conversion_factor,
born,
dielectric,
reciprocal_lattice,
q_direction,
nac_factor,
uplo);
free(undone);
undone = NULL;
}
void
phn_get_gonze_phonons_at_gridpoints(double *frequencies,
lapack_complex_double *eigenvectors,
char *phonon_done,
const size_t num_phonons,
const size_t *grid_points,
const size_t num_grid_points,
PHPYCONST int (*grid_address)[3],
const int mesh[3],
const double *fc2,
PHPYCONST double(*svecs_fc2)[27][3],
const int *multi_fc2,
PHPYCONST double (*positions)[3],
const size_t num_patom,
const size_t num_satom,
const double *masses_fc2,
const int *p2s_fc2,
const int *s2p_fc2,
const double unit_conversion_factor,
PHPYCONST double (*born)[3][3],
PHPYCONST double dielectric[3][3],
PHPYCONST double reciprocal_lattice[3][3],
const double *q_direction, /* pointer */
const double nac_factor,
const double *dd_q0,
PHPYCONST double(*G_list)[3],
const size_t num_G_points,
const double lambda,
const char uplo)
{
size_t num_undone;
size_t *undone;
undone = (size_t*)malloc(sizeof(size_t) * num_phonons);
num_undone = collect_undone_grid_points(undone,
phonon_done,
num_grid_points,
grid_points);
get_gonze_undone_phonons(frequencies,
eigenvectors,
undone,
num_undone,
grid_address,
mesh,
fc2,
svecs_fc2,
multi_fc2,
positions,
num_patom,
num_satom,
masses_fc2,
p2s_fc2,
s2p_fc2,
unit_conversion_factor,
born,
dielectric,
reciprocal_lattice,
q_direction,
nac_factor,
dd_q0,
G_list,
num_G_points,
lambda,
uplo);
free(undone);
undone = NULL;
}
static size_t collect_undone_grid_points(size_t *undone,
char *phonon_done,
const size_t num_grid_points,
const size_t *grid_points)
{
size_t i, gp, num_undone;
num_undone = 0;
for (i = 0; i < num_grid_points; i++) {
gp = grid_points[i];
if (phonon_done[gp] == 0) {
undone[num_undone] = gp;
num_undone++;
phonon_done[gp] = 1;
}
}
return num_undone;
}
static void get_undone_phonons(double *frequencies,
lapack_complex_double *eigenvectors,
const size_t *undone_grid_points,
const size_t num_undone_grid_points,
PHPYCONST int (*grid_address)[3],
const int mesh[3],
const double *fc2,
PHPYCONST double(*svecs_fc2)[27][3],
const int *multi_fc2,
const size_t num_patom,
const size_t num_satom,
const double *masses_fc2,
const int *p2s_fc2,
const int *s2p_fc2,
const double unit_conversion_factor,
PHPYCONST double (*born)[3][3],
PHPYCONST double dielectric[3][3],
PHPYCONST double reciprocal_lattice[3][3],
const double *q_direction,
const double nac_factor,
const char uplo)
{
size_t i, j, gp, num_band;
int is_nac;
double q[3];
num_band = num_patom * 3;
/* To avoid multithreaded BLAS in OpenMP loop */
#ifndef MULTITHREADED_BLAS
#pragma omp parallel for private(j, q, gp, is_nac)
#endif
for (i = 0; i < num_undone_grid_points; i++) {
gp = undone_grid_points[i];
for (j = 0; j < 3; j++) {
q[j] = ((double)grid_address[gp][j]) / mesh[j];
}
is_nac = needs_nac(born, grid_address, gp, q_direction);
get_phonons(eigenvectors + num_band * num_band * gp,
frequencies + num_band * gp,
q,
fc2,
masses_fc2,
p2s_fc2,
s2p_fc2,
multi_fc2,
num_patom,
num_satom,
svecs_fc2,
is_nac,
born,
dielectric,
reciprocal_lattice,
q_direction,
nac_factor,
unit_conversion_factor,
uplo);
}
}
static void get_gonze_undone_phonons(double *frequencies,
lapack_complex_double *eigenvectors,
const size_t *undone_grid_points,
const size_t num_undone_grid_points,
PHPYCONST int (*grid_address)[3],
const int mesh[3],
const double *fc2,
PHPYCONST double(*svecs_fc2)[27][3],
const int *multi_fc2,
PHPYCONST double (*positions)[3],
const size_t num_patom,
const size_t num_satom,
const double *masses_fc2,
const int *p2s_fc2,
const int *s2p_fc2,
const double unit_conversion_factor,
PHPYCONST double (*born)[3][3],
PHPYCONST double dielectric[3][3],
PHPYCONST double reciprocal_lattice[3][3],
const double *q_direction,
const double nac_factor,
const double *dd_q0,
PHPYCONST double(*G_list)[3],
const size_t num_G_points,
const double lambda,
const char uplo)
{
size_t i, j, gp, num_band;
int is_nac;
double q[3];
num_band = num_patom * 3;
/* To avoid multithreaded BLAS in OpenMP loop */
#ifndef MULTITHREADED_BLAS
#pragma omp parallel for private(j, q, gp)
#endif
for (i = 0; i < num_undone_grid_points; i++) {
gp = undone_grid_points[i];
for (j = 0; j < 3; j++) {
q[j] = ((double)grid_address[gp][j]) / mesh[j];
}
is_nac = needs_nac(born, grid_address, gp, q_direction);
get_gonze_phonons(eigenvectors + num_band * num_band * gp,
frequencies + num_band * gp,
q,
fc2,
masses_fc2,
p2s_fc2,
s2p_fc2,
multi_fc2,
positions,
num_patom,
num_satom,
svecs_fc2,
is_nac,
born,
dielectric,
reciprocal_lattice,
q_direction,
nac_factor,
dd_q0,
G_list,
num_G_points,
lambda,
unit_conversion_factor,
uplo);
}
}
static int get_phonons(lapack_complex_double *eigvecs,
double *freqs,
const double q[3],
const double *fc2,
const double *masses,
const int *p2s,
const int *s2p,
const int *multi,
const size_t num_patom,
const size_t num_satom,
PHPYCONST double(*svecs)[27][3],
const int is_nac,
PHPYCONST double (*born)[3][3],
PHPYCONST double dielectric[3][3],
PHPYCONST double reciprocal_lattice[3][3],
const double *q_direction,
const double nac_factor,
const double unit_conversion_factor,
const char uplo)
{
size_t i, num_band;
int info;
num_band = num_patom * 3;
/* Store dynamical matrix in eigvecs array. */
get_dynamical_matrix(eigvecs,
q,
fc2,
masses,
p2s,
s2p,
multi,
num_patom,
num_satom,
svecs,
is_nac,
born,
dielectric,
reciprocal_lattice,
q_direction,
nac_factor);
/* Store eigenvalues in freqs array. */
/* Eigenvectors are overwritten on eigvecs array. */
info = phonopy_zheev(freqs, eigvecs, num_band, uplo);
/* Sqrt of eigenvalues are re-stored in freqs array.*/
for (i = 0; i < num_band; i++) {
freqs[i] = sqrt(fabs(freqs[i])) *
((freqs[i] > 0) - (freqs[i] < 0)) * unit_conversion_factor;
}
return info;
}
static int get_gonze_phonons(lapack_complex_double *eigvecs,
double *freqs,
const double q[3],
const double *fc2,
const double *masses,
const int *p2s,
const int *s2p,
const int *multi,
PHPYCONST double (*positions)[3],
const size_t num_patom,
const size_t num_satom,
PHPYCONST double(*svecs)[27][3],
const int is_nac,
PHPYCONST double (*born)[3][3],
PHPYCONST double dielectric[3][3],
PHPYCONST double reciprocal_lattice[3][3],
const double *q_direction,
const double nac_factor,
const double *dd_q0,
PHPYCONST double(*G_list)[3],
const size_t num_G_points,
const double lambda,
const double unit_conversion_factor,
const char uplo)
{
size_t i, j, k, l, adrs, num_band;
int info;
double mm;
double q_cart[3];
double *q_dir_cart;
lapack_complex_double *dd;
dd = NULL;
q_dir_cart = NULL;
num_band = num_patom * 3;
dym_get_dynamical_matrix_at_q((double*)eigvecs,
num_patom,
num_satom,
fc2,
q,
svecs,
multi,
masses,
s2p,
p2s,
NULL,
0);
dd = (lapack_complex_double*)
malloc(sizeof(lapack_complex_double) * num_band * num_band);
for (i = 0; i < 3; i++) {
q_cart[i] = 0;
for (j = 0; j < 3; j++) {
q_cart[i] += reciprocal_lattice[i][j] * q[j];
}
}
if (q_direction) {
q_dir_cart = (double*)malloc(sizeof(double) * 3);
for (i = 0; i < 3; i++) {
q_dir_cart[i] = 0;
for (j = 0; j < 3; j++) {
q_dir_cart[i] += reciprocal_lattice[i][j] * q_direction[j];
}
}
}
dym_get_dipole_dipole((double*)dd,
dd_q0,
G_list,
num_G_points,
num_patom,
q_cart,
q_dir_cart,
born,
dielectric,
positions,
nac_factor,
lambda,
1e-5);
if (q_direction) {
free(q_dir_cart);
q_dir_cart = NULL;
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
mm = sqrt(masses[i] * masses[j]);
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
adrs = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l;
eigvecs[adrs] = lapack_make_complex_double(
lapack_complex_double_real(eigvecs[adrs]) +
lapack_complex_double_real(dd[adrs]) / mm,
lapack_complex_double_imag(eigvecs[adrs]) +
lapack_complex_double_imag(dd[adrs]) / mm);
}
}
}
}
free(dd);
dd = NULL;
/* Store eigenvalues in freqs array. */
/* Eigenvectors are overwritten on eigvecs array. */
info = phonopy_zheev(freqs, eigvecs, num_band, uplo);
/* Sqrt of eigenvalues are re-stored in freqs array.*/
for (i = 0; i < num_band; i++) {
freqs[i] = sqrt(fabs(freqs[i])) *
((freqs[i] > 0) - (freqs[i] < 0)) * unit_conversion_factor;
}
return info;
}
static void
get_dynamical_matrix(lapack_complex_double *dynmat,
const double q[3],
const double *fc2,
const double *masses,
const int *p2s,
const int *s2p,
const int *multi,
const size_t num_patom,
const size_t num_satom,
PHPYCONST double(*svecs)[27][3],
const int is_nac,
PHPYCONST double (*born)[3][3], /* Wang NAC unless NULL */
PHPYCONST double dielectric[3][3],
PHPYCONST double reciprocal_lattice[3][3],
const double *q_direction,
const double nac_factor)
{
double (*charge_sum)[3][3];
charge_sum = NULL;
if (is_nac) {
charge_sum = (double(*)[3][3])
malloc(sizeof(double[3][3]) * num_patom * num_patom * 9);
get_charge_sum(charge_sum,
num_patom,
num_satom,
q,
born,
dielectric,
reciprocal_lattice,
q_direction,
nac_factor);
}
dym_get_dynamical_matrix_at_q((double*)dynmat,
num_patom,
num_satom,
fc2,
q,
svecs,
multi,
masses,
s2p,
p2s,
charge_sum,
0);
if (is_nac) {
free(charge_sum);
charge_sum = NULL;
}
}
static void get_charge_sum(double (*charge_sum)[3][3],
const size_t num_patom,
const size_t num_satom,
const double q[3],
PHPYCONST double (*born)[3][3],
PHPYCONST double dielectric[3][3],
PHPYCONST double reciprocal_lattice[3][3],
const double *q_direction,
const double nac_factor)
{
size_t i, j;
double inv_dielectric_factor, dielectric_factor, tmp_val;
double q_cart[3];
if (q_direction) {
for (i = 0; i < 3; i++) {
q_cart[i] = 0.0;
for (j = 0; j < 3; j++) {
q_cart[i] += reciprocal_lattice[i][j] * q_direction[j];
}
}
} else {
for (i = 0; i < 3; i++) {
q_cart[i] = 0.0;
for (j = 0; j < 3; j++) {
q_cart[i] += reciprocal_lattice[i][j] * q[j];
}
}
}
inv_dielectric_factor = 0.0;
for (i = 0; i < 3; i++) {
tmp_val = 0.0;
for (j = 0; j < 3; j++) {
tmp_val += dielectric[i][j] * q_cart[j];
}
inv_dielectric_factor += tmp_val * q_cart[i];
}
/* N = num_satom / num_patom = number of prim-cell in supercell */
/* N is used for Wang's method. */
dielectric_factor = nac_factor /
inv_dielectric_factor / num_satom * num_patom;
dym_get_charge_sum(charge_sum,
num_patom,
dielectric_factor,
q_cart,
born);
}
static int needs_nac(PHPYCONST double (*born)[3][3],
PHPYCONST int (*grid_address)[3],
const size_t gp,
const double *q_direction)
{
int is_nac;
if (born) {
if (grid_address[gp][0] == 0 &&
grid_address[gp][1] == 0 &&
grid_address[gp][2] == 0 &&
q_direction == NULL) {
is_nac = 0;
} else {
is_nac = 1;
}
} else {
is_nac = 0;
}
return is_nac;
}
|
GB_unop__identity_int32_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int32_fp64)
// op(A') function: GB (_unop_tran__identity_int32_fp64)
// C type: int32_t
// A type: double
// cast: int32_t cij = GB_cast_to_int32_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = GB_cast_to_int32_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = GB_cast_to_int32_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int32_fp64)
(
int32_t *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
int32_t z = GB_cast_to_int32_t ((double) (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
int32_t z = GB_cast_to_int32_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int32_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
MatrixInverseModel.h | /*
* Copyright 2016 [See AUTHORS file for list of authors]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use Data file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _MATRIXINVERSEMODEL_
#define _MATRIXINVERSEMODEL_
#include <iomanip>
#include "Model/Model.h"
DEFINE_int32(n_power_iterations, 10, "Number of power iterations to run to calculate lambda.");
class MatrixInverseModel : public Model {
private:
int n_coords;
double lambda;
std::vector<double> B;
void Initialize(const std::string &input_line) {
// Input line should have a single number containing
// number of coordinates (# of rows/columns in square matrix).
std::stringstream input(input_line);
input >> n_coords;
_n_params = n_coords;
_n_coords = 1;
// Allocate memory.
_loss = tasvir::Array<double>::Allocate("loss", FLAGS_wid, FLAGS_n_threads, 1);
_data =
tasvir::Array<double>::Allocate("model", FLAGS_wid, FLAGS_n_threads, NumParameters() * NumCoordinates());
// Set elements in model to be a random number in range.
if (FLAGS_wid == 0) {
tasvir_log(&Data(0, true), sizeof(double) * NumParameters() * NumCoordinates());
for (int i = 0; i < NumParameters(); i++) {
Data(i, true) = rand() % FLAGS_random_range;
}
}
}
void MatrixVectorMultiply(const std::vector<Datapoint *> &datapoints, std::vector<double> &input_vector,
std::vector<double> &output_vector) {
// Write to temporary vector to allow for input_vector
// and output_vector referencing the same vector.
std::vector<double> temp_vector;
for (const auto &datapoint : datapoints) {
double cross_product = 0;
// Each datapoint is like a sparse row in the sparse matrix.
for (int i = 0; i < datapoint->GetWeights().size(); i++) {
int index = datapoint->GetCoordinates()[i];
double weight = datapoint->GetWeights()[i];
cross_product += input_vector[index] * weight;
}
temp_vector.push_back(cross_product);
}
// Copy over.
std::copy(temp_vector.begin(), temp_vector.end(), output_vector.begin());
// Do some basic error checking of vector lengths.
if (temp_vector.size() != output_vector.size() || temp_vector.size() != n_coords) {
std::cerr << "MatrixInverseModel: Wrong size after matrix vector multiply." << std::endl;
std::cerr << output_vector.size() << " " << temp_vector.size() << " " << n_coords << std::endl;
exit(0);
}
}
void Normalize(std::vector<double> &vec) {
double norm = 0;
for (int i = 0; i < vec.size(); i++) {
norm += vec[i] * vec[i];
}
norm = sqrt(norm);
for (int i = 0; i < vec.size(); i++) {
vec[i] /= norm;
}
}
std::vector<Datapoint *> TransposeSparseMatrix(const std::vector<Datapoint *> &d) {
std::vector<Datapoint *> r;
for (int i = 0; i < d.size(); i++) {
r.push_back(new MatrixInverseDatapoint(std::to_string(i), i));
}
for (int row = 0; row < d.size(); row++) {
for (int i = 0; i < d[row]->GetWeights().size(); i++) {
int column_index = d[row]->GetCoordinates()[i];
double weight = d[row]->GetWeights()[i];
const_cast<std::vector<int> &>(r[column_index]->GetCoordinates()).push_back(row);
const_cast<std::vector<double> &>(r[column_index]->GetWeights()).push_back(weight);
}
}
return r;
}
public:
MatrixInverseModel(const std::string &input_line) { Initialize(input_line); }
~MatrixInverseModel() {}
void SetUp(const std::vector<Datapoint *> &datapoints) override {
// Normalize the rows formed by the datapoint.
for (int dp = 0; dp < datapoints.size(); dp++) {
double sum_sqr = 0;
for (const auto &w : datapoints[dp]->GetWeights()) {
sum_sqr += w * w;
}
double norm_factor = sqrt(sum_sqr);
for (auto &w : const_cast<std::vector<double> &>(datapoints[dp]->GetWeights())) {
w /= norm_factor;
}
for (auto &m_w : ((MatrixInverseDatapoint *)datapoints[dp])->coordinate_weight_map) {
m_w.second /= norm_factor;
}
}
// Let B be norm(model^2 * random_vector).
B.resize(n_coords);
std::vector<double> random_vector;
for (int i = 0; i < n_coords; i++) {
random_vector.push_back(rand() % FLAGS_random_range);
}
MatrixVectorMultiply(datapoints, random_vector, B);
MatrixVectorMultiply(datapoints, B, B);
Normalize(B);
// Calculate lambda via power iteration.
std::vector<Datapoint *> transpose = TransposeSparseMatrix(datapoints);
std::vector<double> x_k, x_k_prime;
for (int i = 0; i < n_coords; i++) {
x_k.push_back(rand() % FLAGS_random_range);
x_k_prime.push_back(0);
}
for (int i = 0; i < FLAGS_n_power_iterations; i++) {
MatrixVectorMultiply(datapoints, x_k, x_k);
MatrixVectorMultiply(transpose, x_k, x_k);
Normalize(x_k);
}
MatrixVectorMultiply(datapoints, x_k, x_k_prime);
MatrixVectorMultiply(transpose, x_k_prime, x_k_prime);
lambda = 0;
for (int i = 0; i < n_coords; i++) {
lambda += x_k_prime[i] * 1.1 * x_k[i];
}
// Free memory of transpose sparse matrix.
for_each(transpose.begin(), transpose.end(), std::default_delete<Datapoint>());
}
double ComputeLoss(const std::vector<Datapoint *> &datapoints) override {
/* sample
_loss->Barrier();
size_t nr_datapoints = datapoints.size();
size_t batch_size = nr_datapoints / FLAGS_n_threads + (nr_datapoints % FLAGS_n_threads > 0);
size_t index_lo = FLAGS_wid * batch_size;
size_t index_hi = std::min(nr_datapoints, index_lo + batch_size);
tasvir_log(&_loss->DataWorker()[0], sizeof(double));
_loss->DataWorker()[0] = 0;
for (size_t i = index_lo; i < index_hi; i++) {
const auto &labels = datapoints[i]->GetWeights();
const auto &c = datapoints[i]->GetCoordinates();
const auto &weight = labels[0];
double cross_product = 0;
for (size_t j = 0; j < NumCoordinates(); j++)
cross_product += (Data(c[0], j, true) + Data(c[1], j, true)) * (2 * Data(c[1], j, true));
_loss->DataWorker()[0] += weight * (log(weight) - cross_product - C) * (log(weight) - cross_product - C);
}
_loss->Barrier();
_loss->ReduceAdd();
_loss->Barrier();
return _loss->DataParent()[0] / nr_datapoints;
*/
double loss = 0, sum_sqr = 0, second = 0;
for (int i = 0; i < n_coords; i++) {
second += Data(i, true) * B[i];
sum_sqr += Data(i, true) * Data(i, true);
}
//#pragma omp parallel for num_threads(FLAGS_n_threads) reduction(+ : loss)
for (const auto &datapoint : datapoints) {
double ai_t_x = 0;
double first = sum_sqr / (double)n_coords * lambda;
for (int j = 0; j < datapoint->GetWeights().size(); j++) {
int index = datapoint->GetCoordinates()[j];
double weight = datapoint->GetWeights()[j];
ai_t_x += Data(index, true) * weight;
}
first -= ai_t_x * ai_t_x;
loss += first / 2 - second / (double)n_coords;
}
return loss + 2;
}
void PrecomputeCoefficients(const Datapoint &datapoint, Gradient &g, Model &local_model) override {
if (g.coeffs.size() != n_coords)
g.coeffs.resize(n_coords);
const auto &weights = datapoint.GetWeights();
const auto &coordinates = datapoint.GetCoordinates();
double product = 0;
for (int i = 0; i < coordinates.size(); i++) {
product += local_model.Data(coordinates[i], false) * weights[i];
}
for (int i = 0; i < coordinates.size(); i++) {
g.coeffs[coordinates[i]] = product * weights[i];
}
}
void Lambda(int coordinate, double &out, Model &local_model) override { out = lambda / (double)n_coords; }
void Kappa(int coordinate, std::vector<double> &out, Model &local_model) override {
out[0] = B[coordinate] / (double)n_coords;
}
void H_bar(int coordinate, std::vector<double> &out, Gradient &g, Model &local_model) override {
out[0] = g.coeffs[coordinate];
}
};
#endif
|
GB_unop__abs_uint16_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__abs_uint16_uint16)
// op(A') function: GB (_unop_tran__abs_uint16_uint16)
// C type: uint16_t
// A type: uint16_t
// cast: uint16_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__abs_uint16_uint16)
(
uint16_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
uint16_t z = aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
uint16_t z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__abs_uint16_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
shmem_ctx.c | /*
* Copyright (c) 2017 Intel Corporation. All rights reserved.
* This software is available to you under the BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* This test is derived from an example provided in the OpenSHMEM 1.4
* specification. Additional copyrights may apply.
*
*/
#include <stdio.h>
#include <shmem.h>
long pwrk[SHMEM_REDUCE_MIN_WRKDATA_SIZE];
long psync[SHMEM_REDUCE_SYNC_SIZE];
long task_cntr = 0; /* Next task counter */
long tasks_done = 0; /* Tasks done by this PE */
long total_done = 0; /* Total tasks done by all PEs */
int main(void) {
int tl, i, ret;
long ntasks = 1024; /* Total tasks per PE */
for (i = 0; i < SHMEM_REDUCE_SYNC_SIZE; i++)
psync[i] = SHMEM_SYNC_VALUE;
ret = shmem_init_thread(SHMEM_THREAD_MULTIPLE, &tl);
if (tl != SHMEM_THREAD_MULTIPLE || ret != 0) {
printf("Init failed (requested thread level %d, got %d, ret %d)\n",
SHMEM_THREAD_MULTIPLE, tl, ret);
if (ret == 0) {
shmem_global_exit(1);
} else {
return ret;
}
}
int me = shmem_my_pe();
int npes = shmem_n_pes();
#pragma omp parallel reduction (+:tasks_done)
{
shmem_ctx_t ctx;
int task_pe = me, pes_done = 0;
int ret = shmem_ctx_create(SHMEM_CTX_PRIVATE, &ctx);
if (ret != 0) {
printf("%d: Warning, unable to create context (%d)\n", me, ret);
ctx = SHMEM_CTX_DEFAULT;
}
/* Process tasks on all PEs, starting with the local PE. After
* all tasks on a PE are completed, help the next PE. */
while (pes_done < npes) {
long task = shmem_ctx_long_atomic_fetch_inc(ctx, &task_cntr, task_pe);
while (task < ntasks) {
/* Perform task (task_pe, task) */
tasks_done++;
task = shmem_ctx_long_atomic_fetch_inc(ctx, &task_cntr, task_pe);
}
pes_done++;
task_pe = (task_pe + 1) % npes;
}
if (ctx != SHMEM_CTX_DEFAULT) shmem_ctx_destroy(ctx);
}
shmem_long_sum_to_all(&total_done, &tasks_done, 1, 0, 0, npes, pwrk, psync);
int result = (total_done != ntasks * npes);
if (me == 0 && result)
printf("Error: total_done is %ld, expected %ld\n", total_done, ntasks * npes);
shmem_finalize();
return result;
}
|
serialized.c | // RUN: %libomp-compile-and-run | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8
#include "callback.h"
int main()
{
// print_frame(0);
#pragma omp parallel num_threads(1)
{
// print_frame(1);
print_ids(0);
print_ids(1);
// print_frame(0);
#pragma omp parallel num_threads(1)
{
// print_frame(1);
print_ids(0);
print_ids(1);
print_ids(2);
// print_frame(0);
#pragma omp task
{
// print_frame(1);
print_ids(0);
print_ids(1);
print_ids(2);
print_ids(3);
}
}
print_fuzzy_address(1);
}
print_fuzzy_address(2);
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_implicit_task_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_implicit_task_end'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// make sure initial data pointers are null
// CHECK-NOT: 0: parallel_data initially not null
// CHECK-NOT: 0: task_data initially not null
// CHECK-NOT: 0: thread_data initially not null
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=1, codeptr_ra=[[OUTER_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=1, codeptr_ra=[[INNER_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: parent_task_id=[[NESTED_IMPLICIT_TASK_ID]], parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id=[[EXPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule: first_task_id=[[NESTED_IMPLICIT_TASK_ID]], second_task_id=[[EXPLICIT_TASK_ID]], prior_task_status=ompt_task_switch=7
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[EXPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: task level 2: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule: first_task_id=[[EXPLICIT_TASK_ID]], second_task_id=[[NESTED_IMPLICIT_TASK_ID]], prior_task_status=ompt_task_complete=1
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_end: task_id=[[EXPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id=0, task_id=[[NESTED_IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]], codeptr_ra=[[INNER_RETURN_ADDRESS]]{{[0-f][0-f]}}
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[INNER_RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], invoker=[[PARALLEL_INVOKER]], codeptr_ra=[[OUTER_RETURN_ADDRESS]]{{[0-f][0-f]}}
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[OUTER_RETURN_ADDRESS]]
return 0;
}
|
GB_unop__acosh_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__acosh_fp32_fp32)
// op(A') function: GB (_unop_tran__acosh_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = acoshf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = acoshf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = acoshf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ACOSH || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__acosh_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = acoshf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = acoshf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__acosh_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__lnot_fp32_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp32_int16
// op(A') function: GB_tran__lnot_fp32_int16
// C type: float
// A type: int16_t
// cast: float cij = (float) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp32_int16
(
float *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp32_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Validation.h | //
// Created by kilian on 02/05/17.
//
#ifndef STERMPARSER_VALIDATION_H
#define STERMPARSER_VALIDATION_H
#include <iostream>
#include "LatentAnnotation.h"
#include "TraceManager.h"
#include "../util.h"
#include "HypergraphRanker.h"
namespace Trainer {
class ValidationLA {
public:
virtual double validation_score(const LatentAnnotation &latentAnnotation) = 0;
virtual double minimum_score() = 0;
virtual std::string quantity() const = 0;
virtual void clean_up() {};
virtual unsigned getParseFailures() const = 0;
};
class ValidationLikelihoodLA : public ValidationLA {
public:
virtual double validation_score(const LatentAnnotation &latentAnnotation) {
return log_likelihood(latentAnnotation);
}
virtual double log_likelihood(const LatentAnnotation &latentAnnotation) = 0;
virtual std::string quantity() const {
return "likelihood";
}
virtual double minimum_score() {
return minus_infinity;
}
};
template<typename Nonterminal, typename TraceID>
class SimpleLikelihoodLA : public ValidationLikelihoodLA {
protected:
template<typename T1, typename T2>
using MAPTYPE = typename std::unordered_map<T1, T2>;
using TraceIterator = ConstManagerIterator<Trace<Nonterminal, TraceID>>;
const TraceManagerPtr<Nonterminal, TraceID> traceManager;
std::shared_ptr<const GrammarInfo2> grammarInfo;
std::shared_ptr<StorageManager> storageManager;
unsigned parseFailures;
private:
const unsigned threads;
protected:
const bool debug;
std::vector<MAPTYPE<Element<Node<Nonterminal>>, WeightVector>> tracesInsideWeights;
public:
SimpleLikelihoodLA(
TraceManagerPtr<Nonterminal, TraceID> traceManager
, std::shared_ptr<const GrammarInfo2> grammarInfo
, std::shared_ptr<StorageManager> storageManager
, unsigned threads = 1
, bool debug = false
)
: traceManager(traceManager), grammarInfo(grammarInfo), storageManager(storageManager),
threads(threads), debug(debug) {};
virtual double log_likelihood(const LatentAnnotation &latentAnnotation) {
if (traceManager->cend() != traceManager->cbegin() + traceManager->size()) {
std::cerr << "end - begin " << traceManager->cend() - traceManager->cbegin() << std::endl;
std::cerr << "size: " << traceManager->size();
std::abort();
}
if (tracesInsideWeights.size() < traceManager->size()) {
tracesInsideWeights.resize(traceManager->size());
}
return log_likelihood_la(latentAnnotation);
}
void clean_up() {
storageManager->free_weight_maps(tracesInsideWeights);
}
private:
inline double log_likelihood_la(
const LatentAnnotation &latentAnnotation
) {
double logLikelihood{0.0};
unsigned failures{0};
#ifdef _OPENMP
omp_set_num_threads(threads);
#endif
// #pragma omp declare reduction (+ : omp_out += omp_in ) initializer (omp_priv = omp_orig)
#pragma omp parallel for schedule(dynamic, 10) reduction (+:logLikelihood, failures)
for (TraceIterator traceIterator = traceManager->cbegin();
traceIterator < traceManager->cend(); ++traceIterator) {
const auto &trace = *traceIterator;
if (trace->get_hypergraph()->size() == 0)
continue;
if (traceManager->cbegin() + tracesInsideWeights.size() <= traceIterator) {
std::cerr << "tried to access non-existent inside or outside weight map" << std::endl;
std::cerr << "it - begin " << traceIterator - traceManager->cbegin() << std::endl;
std::cerr << "in size: " << tracesInsideWeights.size() << std::endl;
abort();
}
// create inside weight for each node if necessary
if (tracesInsideWeights[traceIterator - traceManager->cbegin()].size() !=
trace->get_hypergraph()->size()) {
tracesInsideWeights[traceIterator - traceManager->cbegin()].clear();
for (const auto &node : *(trace->get_hypergraph())) {
tracesInsideWeights[traceIterator - traceManager->cbegin()].emplace(
node
, storageManager->create_weight_vector<WeightVector>(latentAnnotation.nonterminalSplits[node->get_label_id()]));
}
}
MAPTYPE<Element<Node<Nonterminal>>, int> insideLogScales;
for (const auto &node : *(trace->get_hypergraph())) {
insideLogScales[node] = 0;
}
trace->inside_weights_la(
latentAnnotation
, tracesInsideWeights[traceIterator - traceManager->cbegin()]
, insideLogScales
);
Eigen::Tensor<double, 1> traceRootProbabilities{
compute_trace_root_probabilities(traceIterator, latentAnnotation)};
Eigen::Tensor<double, 0> traceRootProbability = traceRootProbabilities.sum();
if (not std::isnan(traceRootProbability(0))
and not std::isinf(traceRootProbability(0))
and traceRootProbability(0) > 0) {
logLikelihood += log(traceRootProbability(0)) * trace->get_frequency();
} else {
++failures;
//std::cerr << "trace Root Probability " << traceRootProbability(0) << std::endl;
// sentences with 0 probability are simply ignored, cf.
// https://github.com/slavpetrov/berkeleyparser/blob/release-1.0/src/edu/berkeley/nlp/PCFGLA/GrammarTrainer.java?ts=2#L558
// alternatively one could add a positive constant to each probability
// to get a useful validation measure
// logLikelihood += minus_infinity;
continue;
}
if (debug)
std::cerr << "instance root probability: " << std::endl << traceRootProbabilities << std::endl;
}
parseFailures = failures;
return logLikelihood;
}
unsigned getParseFailures() const {
return parseFailures;
}
protected:
Eigen::Tensor<double, 1> compute_trace_root_probabilities(
TraceIterator traceIterator
, const LatentAnnotation &latentAnnotation
) {
const auto &rootInsideWeight
= tracesInsideWeights[traceIterator - traceManager->cbegin()].at(traceIterator->get_goal());
const auto &rootOutsideWeight = latentAnnotation.rootWeights;
return Eigen::Tensor<double, 1> {rootOutsideWeight * rootInsideWeight};
}
};
template <typename Nonterminal, typename TraceID>
class CandidateScoreValidator : public ValidationLA {
private:
std::shared_ptr<const GrammarInfo2> grammarInfo;
std::shared_ptr<StorageManager> storageManager;
const std::string _quantity;
std::vector<HypergraphRanker<Nonterminal, TraceID>> traces;
std::vector<std::vector<double>> scores;
std::vector<double> maxScores;
double globalMaxScore {0};
const double minimumScore;
unsigned parseFailures;
public:
CandidateScoreValidator(
std::shared_ptr<const GrammarInfo2> grammarInfo
, std::shared_ptr<StorageManager> storageManager
, std::string _quantity = "score"
, double minimumScore = minus_infinity
) : grammarInfo(grammarInfo)
, storageManager(storageManager)
, _quantity(_quantity)
, minimumScore(minimumScore) {};
void add_scored_candidates(TraceManagerPtr<Nonterminal, TraceID> traces, std::vector<double> scores, double maxScore) {
this->traces.emplace_back(traces, grammarInfo, storageManager);
this->scores.push_back(scores);
this->maxScores.push_back(maxScore);
globalMaxScore += maxScore;
}
virtual double validation_score(const LatentAnnotation & latentAnnotation) {
double globalScore {0.0};
unsigned failures {0};
for (size_t entry {0}; entry < scores.size(); ++entry) {
auto ranking = traces[entry].rank(latentAnnotation);
if (ranking.size() > 0) {
size_t selected {ranking[0].first};
globalScore += scores[entry][selected];
} else {
++failures;
}
}
parseFailures = failures;
if (globalMaxScore > 0.0) {
return globalScore / globalMaxScore;
} else {
assert (globalScore == 0.0);
return 0.0;
}
}
virtual double minimum_score() {
return minimumScore;
}
virtual std::string quantity() const {
return _quantity;
}
unsigned getParseFailures() const {
return parseFailures;
}
};
}
#endif //STERMPARSER_VALIDATION_H
|
parallelEdgeListLoad.h | #pragma once
#include<omp.h>
#include<iostream>
#include<unordered_map>
#include<unordered_set>
#include<string>
#include<vector>
#include<queue>
#include<limits>
#include<fstream>
#include<stack>
#include<sstream>
#include<cmath>
#include<cstring>
#include<exception>
#include<list>
// needed for posix io
#include<cstdio>
#include <sys/types.h>
#include <sys/stat.h>
#include"cmdln.h"
#include"util.h"
#include<omp.h>
using namespace std;
void fastLoadEdgeList(const string& graphPath, // file path
list<edge>& result, //pass by ref result
const unordered_set<nodeIdx>& subset = unordered_set<nodeIdx>()) // if inducing net
{
bool loadEverything = subset.size() == 0;
const string hwErrMsg =
"ERROR: the system does not have a 4 byte uint and a 4 byte float.";
const string fileErrMsg =
"ERROR: the supplied graph files is invalid.";
if(NUM_BYTE_PER_EDGE != 2 * sizeof(nodeIdx) + sizeof(float)){
throw runtime_error(hwErrMsg);
}
//load graph path
struct stat st;
stat(graphPath.c_str(), &st);
size_t totalFileSize = st.st_size;
if(totalFileSize % NUM_BYTE_PER_EDGE){
throw runtime_error(fileErrMsg);
}
size_t totalEdgeCount = totalFileSize / NUM_BYTE_PER_EDGE;
#pragma omp parallel
{
size_t tid = omp_get_thread_num();
size_t totalThreadNum = omp_get_num_threads();
size_t edgesPerThread = totalEdgeCount / totalThreadNum;
size_t startEdgeNum = tid * edgesPerThread;
size_t endEdgeNum = startEdgeNum + edgesPerThread;
if(tid == totalThreadNum-1){
endEdgeNum = totalEdgeCount;
}
list<edge> localEdges;
FILE* file;
file = fopen(graphPath.c_str(), "rb");
fseek(file, startEdgeNum * NUM_BYTE_PER_EDGE, SEEK_SET);
charBuff buff[3];
//load all the edges this thread is responsible for
for(size_t i = startEdgeNum; i < endEdgeNum; ++i){
fread(buff, sizeof(charBuff), 3, file);
edge e(buff[0].i, buff[1].i, buff[2].f);
if(loadEverything ||
subset.find(e.a) != subset.end() ||
subset.find(e.b) != subset.end())
{
localEdges.push_back(e);
}
}
fclose(file);
if(localEdges.size() > 0){
//concatinate local vecs to the result
#pragma omp critical
result.splice(result.end(), localEdges);
}
}
}
size_t loadAnotherOrderNeighbors(const string& path,
list<edge>& edges,
unordered_set<nodeIdx>& allNodes){
static size_t order = 0;
order += 1;
fastLoadEdgeList(path, edges, allNodes);
for(const edge& e : edges){
allNodes.insert(e.a);
allNodes.insert(e.b);
}
return order;
}
|
sincos2.c | #include<Python.h>
#include<numpy/arrayobject.h>
#include<math.h>
#include<omp.h>
#define IND(a,i) *((double *)(a->data+i*a->strides[0]))
static PyObject *sincos2(PyObject *self, PyObject *args, PyObject *keywds);
static PyObject *sincos2(PyObject *self, PyObject *args, PyObject *keywds)
{
PyObject *etc;
PyArrayObject *x,*y, *rampparams;
double c1a,c1o,c2a,c2o,s1a,s1o,s2a,s2o,p,c,midpt,t14,t12,pi,mod;
int i;
npy_intp dims[1];
// etc = PyList_New(0);
static char *kwlist[] = {"rampparams","x","etc",NULL};
if(!PyArg_ParseTupleAndKeywords(args,keywds,"OO|O",kwlist,&rampparams,&x,&etc))
{
return NULL;
}
c1a = IND(rampparams,0);
c1o = IND(rampparams,1);
c2a = IND(rampparams,2);
c2o = IND(rampparams,3);
s1a = IND(rampparams,4);
s1o = IND(rampparams,5);
s2a = IND(rampparams,6);
s2o = IND(rampparams,7);
p = IND(rampparams,8);
c = IND(rampparams,9);
midpt = IND(rampparams,10);
t14 = IND(rampparams,11);
t12 = IND(rampparams,12);
pi = 3.141592653589793;
dims[0] = x->dimensions[0];
y = (PyArrayObject *) PyArray_SimpleNew(1,dims,PyArray_DOUBLE);
#pragma omp parallel for private(mod)
for(i=0;i<dims[0];i++)
{
//modulus works different in c when values are negative
//mod = fmod(IND(x,i)-midpt,p);
mod = (IND(x,i)-midpt) - p*floor((IND(x,i)-midpt)/p);
if ((mod >= (p-(t14-t12)/2.)) || (mod <= ((t14-t12)/2.)))
{
//Flatten sin/cos during eclipse
IND(y,i) = c1a*cos(2*pi*(midpt-c1o)/p) + c2a*cos(4*pi*(midpt-c2o)/p) + s1a*sin(2*pi*(midpt-s1o)/p) + s2a*sin(4*pi*(midpt-s2o)/p) + c;
//printf("%i, ", i);
}
else
{
//Standard calculation
IND(y,i) = c1a*cos(2*pi*(IND(x,i)-c1o)/p) + c2a*cos(4*pi*(IND(x,i)-c2o)/p) + s1a*sin(2*pi*(IND(x,i)-s1o)/p) + s2a*sin(4*pi*(IND(x,i)-s2o)/p) + c;
}
}
//printf("\n");
return PyArray_Return(y);
}
static char module_docstring[] = "\
NAME:\n\
SINCOS2\n\
\n\
PURPOSE:\n\
This function creates a model that fits a sinusoid.\n\
\n\
CATEGORY:\n\
Astronomy.\n\
\n\
CALLING SEQUENCE:\n\
\n\
Result = SINCOS2([c1a,c1o,c2a,c2o,s1a,s1o,s2a,s2o,p,c,midpt,t14,t12],x)\n\
\n\
INPUTS:\n\
c#a/s#a : amplitude\n\
c#o/s#o : phase/time offset\n\
p : period\n\
c : vertical offset\n\
x : Array of time/phase points\n\
\n\
OUTPUTS:\n\
This function returns an array of y values\n\
\n\
PROCEDURE:\n\
\n\
EXAMPLE:\n\
\n\
\n\
\n\
MODIFICATION HISTORY:\n\
Written by: Kevin Stevenson, UCF\n\n\
kevin218@knights.ucf.edu\n\
2013-11-11 Original creation \n\
2015-03-17 Converted to C\n\
\n\
2018-11-22 Jonathan Fraine, SSI\n\
jfraine at spacescience.org\n\
Updated c extensions to python3, with support for python2.7\n\
";
static PyMethodDef module_methods[] = {
{"sincos2",(PyCFunction)sincos2,METH_VARARGS|METH_KEYWORDS,module_docstring},{NULL}};
PyMODINIT_FUNC
#if PY_MAJOR_VERSION >= 3
PyInit_sincos2(void)
#else
initsincos2(void)
#endif
{
#if PY_MAJOR_VERSION >= 3
PyObject *module;
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"sincos2", /* m_name */
module_docstring, /* m_doc */
-1, /* m_size */
module_methods, /* m_methods */
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL, /* m_free */
};
#endif
#if PY_MAJOR_VERSION >= 3
module = PyModule_Create(&moduledef);
if (!module)
return NULL;
/* Load `numpy` functionality. */
import_array();
return module;
#else
PyObject *m = Py_InitModule3("sincos2", module_methods, module_docstring);
if (m == NULL)
return;
/* Load `numpy` functionality. */
import_array();
#endif
}
|
nodal_two_step_v_p_strategy.h | //
// Project Name: KratosPFEMFluidDynamicsApplication $
// Last modified by: $Author: AFranci $
// Date: $Date: June 2018 $
// Revision: $Revision: 0.0 $
//
//
#ifndef KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_H
#define KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_H
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/deprecated_variables.h"
#include "includes/cfd_variables.h"
#include "utilities/openmp_utils.h"
#include "processes/process.h"
#include "solving_strategies/schemes/scheme.h"
#include "solving_strategies/strategies/solving_strategy.h"
#include "custom_utilities/mesher_utilities.hpp"
#include "custom_utilities/boundary_normals_calculation_utilities.hpp"
#include "geometries/geometry.h"
#include "utilities/geometry_utilities.h"
#include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h"
#include "custom_strategies/builders_and_solvers/nodal_residualbased_elimination_builder_and_solver.h"
#include "custom_strategies/builders_and_solvers/nodal_residualbased_elimination_builder_and_solver_continuity.h"
#include "custom_strategies/builders_and_solvers/nodal_residualbased_block_builder_and_solver.h"
#include "custom_utilities/solver_settings.h"
#include "custom_strategies/strategies/gauss_seidel_linear_strategy.h"
#include "pfem_fluid_dynamics_application_variables.h"
#include <stdio.h>
#include <math.h>
#include <iostream>
#include <fstream>
namespace Kratos
{
///@addtogroup PFEMFluidDynamicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
template <class TSparseSpace,
class TDenseSpace,
class TLinearSolver>
class NodalTwoStepVPStrategy : public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(NodalTwoStepVPStrategy);
/// Counted pointer of NodalTwoStepVPStrategy
//typedef boost::shared_ptr< NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> > Pointer;
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TDataType TDataType;
/// Node type (default is: Node<3>)
typedef Node<3> NodeType;
/// Geometry type (using with given NodeType)
typedef Geometry<NodeType> GeometryType;
typedef std::size_t SizeType;
//typedef typename BaseType::DofSetType DofSetType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType;
typedef TwoStepVPSolverSettings<TSparseSpace, TDenseSpace, TLinearSolver> SolverSettingsType;
typedef GeometryType::ShapeFunctionsGradientsType ShapeFunctionDerivativesArrayType;
typedef GlobalPointersVector<Node<3>> NodeWeakPtrVectorType;
///@}
///@name Life Cycle
///@{
NodalTwoStepVPStrategy(ModelPart &rModelPart,
SolverSettingsType &rSolverConfig) : BaseType(rModelPart)
{
InitializeStrategy(rSolverConfig);
}
NodalTwoStepVPStrategy(ModelPart &rModelPart,
/*SolverConfiguration<TSparseSpace, TDenseSpace, TLinearSolver>& rSolverConfig,*/
typename TLinearSolver::Pointer pVelocityLinearSolver,
typename TLinearSolver::Pointer pPressureLinearSolver,
bool ReformDofSet = true,
double VelTol = 0.0001,
double PresTol = 0.0001,
int MaxPressureIterations = 1, // Only for predictor-corrector
unsigned int TimeOrder = 2,
unsigned int DomainSize = 2) : BaseType(rModelPart), // Move Mesh flag, pass as input?
mVelocityTolerance(VelTol),
mPressureTolerance(PresTol),
mMaxPressureIter(MaxPressureIterations),
mDomainSize(DomainSize),
mTimeOrder(TimeOrder),
mReformDofSet(ReformDofSet)
{
KRATOS_TRY;
BaseType::SetEchoLevel(1);
// Check that input parameters are reasonable and sufficient.
this->Check();
bool CalculateNormDxFlag = true;
bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly.
// Additional Typedefs
typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer;
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
//initializing fractional velocity solution step
typedef Scheme<TSparseSpace, TDenseSpace> SchemeType;
typename SchemeType::Pointer pScheme;
typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace, TDenseSpace>());
/* typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new IncrementalUpdateStaticScheme< TSparseSpace, TDenseSpace > ()); */
pScheme.swap(Temp);
//CONSTRUCTION OF VELOCITY
BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new NodalResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pVelocityLinearSolver));
/* BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pVelocityLinearSolver)); */
this->mpMomentumStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pVelocityLinearSolver, vel_build, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpMomentumStrategy->SetEchoLevel(BaseType::GetEchoLevel());
vel_build->SetCalculateReactionsFlag(false);
/* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolverComponentwise<TSparseSpace, TDenseSpace, TLinearSolver, Variable<double> >(pPressureLinearSolver, PRESSURE)); */
/* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver >(pPressureLinearSolver)); */
BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new NodalResidualBasedEliminationBuilderAndSolverContinuity<TSparseSpace, TDenseSpace, TLinearSolver>(pPressureLinearSolver));
/* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new NodalResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver >(pPressureLinearSolver)); */
this->mpPressureStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pPressureLinearSolver, pressure_build, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpPressureStrategy->SetEchoLevel(BaseType::GetEchoLevel());
pressure_build->SetCalculateReactionsFlag(false);
KRATOS_CATCH("");
}
/// Destructor.
virtual ~NodalTwoStepVPStrategy() {}
int Check() override
{
KRATOS_TRY;
// Check elements and conditions in the model part
int ierr = BaseType::Check();
if (ierr != 0)
return ierr;
if (DELTA_TIME.Key() == 0)
KRATOS_THROW_ERROR(std::runtime_error, "DELTA_TIME Key is 0. Check that the application was correctly registered.", "");
ModelPart &rModelPart = BaseType::GetModelPart();
// const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
// for (ModelPart::ElementIterator itEl = rModelPart.ElementsBegin(); itEl != rModelPart.ElementsEnd(); ++itEl)
// {
// ierr = itEl->Check(rCurrentProcessInfo);
// if (ierr != 0)
// break;
// }
const auto &r_current_process_info = rModelPart.GetProcessInfo();
for (const auto &r_element : rModelPart.Elements())
{
ierr = r_element.Check(r_current_process_info);
if (ierr != 0)
{
break;
}
}
// for (ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); ++itCond)
// {
// ierr = itCond->Check(rCurrentProcessInfo);
// if (ierr != 0)
// break;
// }
return ierr;
KRATOS_CATCH("");
}
void SetTimeCoefficients(ProcessInfo &rCurrentProcessInfo)
{
KRATOS_TRY;
if (mTimeOrder == 2)
{
//calculate the BDF coefficients
double Dt = rCurrentProcessInfo[DELTA_TIME];
double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME];
double Rho = OldDt / Dt;
double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho);
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
BDFcoeffs.resize(3, false);
BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant)
BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant)
BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant)
}
else if (mTimeOrder == 1)
{
double Dt = rCurrentProcessInfo[DELTA_TIME];
double TimeCoeff = 1.0 / Dt;
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
BDFcoeffs.resize(2, false);
BDFcoeffs[0] = TimeCoeff; //coefficient for step n+1 (1/Dt)
BDFcoeffs[1] = -TimeCoeff; //coefficient for step n (-1/Dt)
}
KRATOS_CATCH("");
}
bool SolveSolutionStep() override
{
// Initialize BDF2 coefficients
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
bool timeIntervalChanged = rCurrentProcessInfo[TIME_INTERVAL_CHANGED];
bool converged = false;
// bool momentumAlreadyConverged=false;
// bool continuityAlreadyConverged=false;
unsigned int maxNonLinearIterations = mMaxPressureIter;
KRATOS_INFO("\n Solve with nodally_integrated_two_step_vp strategy at t=") << currentTime << "s" << std::endl;
if (timeIntervalChanged == true && currentTime > 10 * timeInterval)
{
maxNonLinearIterations *= 2;
}
if (currentTime < 10 * timeInterval)
{
if (BaseType::GetEchoLevel() > 1)
std::cout << "within the first 10 time steps, I consider the given iteration number x3" << std::endl;
maxNonLinearIterations *= 3;
}
if (currentTime < 20 * timeInterval && currentTime >= 10 * timeInterval)
{
if (BaseType::GetEchoLevel() > 1)
std::cout << "within the second 10 time steps, I consider the given iteration number x2" << std::endl;
maxNonLinearIterations *= 2;
}
bool momentumConverged = true;
bool continuityConverged = false;
bool fixedTimeStep = false;
double pressureNorm = 0;
double velocityNorm = 0;
/* boost::timer solve_step_time; */
this->InitializeSolutionStep();
for (unsigned int it = 0; it < maxNonLinearIterations; ++it)
{
if (BaseType::GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)
std::cout << "----- > iteration: " << it << std::endl;
if (it == 0)
{
this->ComputeNodalVolume();
this->InitializeNonLinearIterations();
}
this->CalcNodalStrainsAndStresses();
momentumConverged = this->SolveMomentumIteration(it, maxNonLinearIterations, fixedTimeStep, velocityNorm);
this->UpdateTopology(rModelPart, BaseType::GetEchoLevel());
this->ComputeNodalVolume();
this->InitializeNonLinearIterations();
this->CalcNodalStrains();
if (fixedTimeStep == false)
{
continuityConverged = this->SolveContinuityIteration(it, maxNonLinearIterations, pressureNorm);
}
// if((momentumConverged==true || it==maxNonLinearIterations-1) && momentumAlreadyConverged==false){
// std::ofstream myfile;
// myfile.open ("momentumConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
// momentumAlreadyConverged=true;
// }
// if((continuityConverged==true || it==maxNonLinearIterations-1) && continuityAlreadyConverged==false){
// std::ofstream myfile;
// myfile.open ("continuityConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
// continuityAlreadyConverged=true;
// }
if (it == maxNonLinearIterations - 1 || ((continuityConverged && momentumConverged) && it > 1))
{
//this->ComputeErrorL2NormCaseImposedG();
//this->ComputeErrorL2NormCasePoiseuille();
this->CalculateAccelerations();
// std::ofstream myfile;
// myfile.open ("maxConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
}
if ((continuityConverged && momentumConverged) && it > 1)
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false);
converged = true;
std::cout << "nodal V-P strategy converged in " << it + 1 << " iterations." << std::endl;
break;
}
}
if (!continuityConverged && !momentumConverged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
std::cout << "Convergence tolerance not reached." << std::endl;
if (mReformDofSet)
this->Clear();
/* std::cout << "solve_step_time : " << solve_step_time.elapsed() << std::endl; */
return converged;
}
void FinalizeSolutionStep() override
{
/* this->UpdateStressStrain(); */
}
void Initialize() override
{
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
unsigned int sizeStrains = 3 * (dimension - 1);
// #pragma omp parallel
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
unsigned int neighbourNodes = neighb_nodes.size();
unsigned int sizeSDFNeigh = neighbourNodes * dimension;
if (itNode->SolutionStepsDataHas(NODAL_CAUCHY_STRESS))
{
Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS);
if (rNodalStress.size() != sizeStrains)
{
rNodalStress.resize(sizeStrains, false);
}
noalias(rNodalStress) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have NODAL_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_DEVIATORIC_CAUCHY_STRESS))
{
Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS);
if (rNodalStress.size() != sizeStrains)
{
rNodalStress.resize(sizeStrains, false);
}
noalias(rNodalStress) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have NODAL_DEVIATORIC_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_VOLUME))
{
itNode->FastGetSolutionStepValue(NODAL_VOLUME) = 0;
}
else
{
std::cout << "THIS node does not have NODAL_VOLUME... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_MEAN_MESH_SIZE))
{
itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0;
}
else
{
std::cout << "THIS node does not have NODAL_MEAN_MESH_SIZE... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_FREESURFACE_AREA))
{
itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0;
}
else
{
std::cout << "THIS node does not have NODAL_FREESURFACE_AREA... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_SFD_NEIGHBOURS))
{
Vector &rNodalSFDneighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
if (rNodalSFDneighbours.size() != sizeSDFNeigh)
{
rNodalSFDneighbours.resize(sizeSDFNeigh, false);
}
noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh);
}
else
{
std::cout << "THIS node does not have NODAL_SFD_NEIGHBOURS... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_SPATIAL_DEF_RATE))
{
Vector &rSpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE);
if (rSpatialDefRate.size() != sizeStrains)
{
rSpatialDefRate.resize(sizeStrains, false);
}
noalias(rSpatialDefRate) = ZeroVector(sizeStrains);
}
else
{
std::cout << "THIS node does not have NODAL_SPATIAL_DEF_RATE... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD))
{
Matrix &rFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
if (rFgrad.size1() != dimension)
{
rFgrad.resize(dimension, dimension, false);
}
noalias(rFgrad) = ZeroMatrix(dimension, dimension);
}
else
{
std::cout << "THIS node does not have NODAL_DEFORMATION_GRAD... " << itNode->X() << " " << itNode->Y() << std::endl;
}
if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD_VEL))
{
Matrix &rFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
if (rFgradVel.size1() != dimension)
{
rFgradVel.resize(dimension, dimension, false);
}
noalias(rFgradVel) = ZeroMatrix(dimension, dimension);
}
else
{
std::cout << "THIS node does not have NODAL_DEFORMATION_GRAD_VEL... " << itNode->X() << " " << itNode->Y() << std::endl;
}
this->AssignFluidMaterialToEachNode(itNode);
}
// }
}
void AssignFluidMaterialToEachNode(ModelPart::NodeIterator itNode)
{
ModelPart &rModelPart = BaseType::GetModelPart();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double deviatoricCoeff = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY);
double volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS);
double currFirstLame = volumetricCoeff - 2.0 * deviatoricCoeff / 3.0;
itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT) = currFirstLame;
itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT) = deviatoricCoeff;
}
void ComputeNodalVolume()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ElementsArrayType &pElements = rModelPart.Elements();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, pElements.size(), element_partition);
// #pragma omp parallel
// {
int k = OpenMPUtils::ThisThread();
typename ElementsArrayType::iterator ElemBegin = pElements.begin() + element_partition[k];
typename ElementsArrayType::iterator ElemEnd = pElements.begin() + element_partition[k + 1];
for (typename ElementsArrayType::iterator itElem = ElemBegin; itElem != ElemEnd; itElem++) //MSI: To be parallelized
{
Element::GeometryType &geometry = itElem->GetGeometry();
double elementalVolume = 0;
if (dimension == 2)
{
elementalVolume = geometry.Area() / 3.0;
}
else if (dimension == 3)
{
elementalVolume = geometry.Volume() * 0.25;
}
// index = 0;
unsigned int numNodes = geometry.size();
for (unsigned int i = 0; i < numNodes; i++)
{
double &nodalVolume = geometry(i)->FastGetSolutionStepValue(NODAL_VOLUME);
nodalVolume += elementalVolume;
}
}
// }
}
void InitializeSolutionStep() override
{
this->FillNodalSFDVector();
}
void FillNodalSFDVector()
{
ModelPart &rModelPart = BaseType::GetModelPart();
// #pragma omp parallel
// {
// ModelPart::NodeIterator NodesBegin;
// ModelPart::NodeIterator NodesEnd;
// OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd);
// for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
// {
for (ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); itNode++)
{
InitializeNodalVariablesForRemeshedDomain(itNode);
SetNeighboursOrderToNode(itNode); // it assigns neighbours to inner nodes, filling NODAL_SFD_NEIGHBOURS_ORDER
}
}
void SetNeighboursOrderToNode(ModelPart::NodeIterator itNode)
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
unsigned int neighbourNodes = neighb_nodes.size() + 1; // +1 becausealso the node itself must be considered as nieghbor node
Vector &rNodeOrderedNeighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
if (rNodeOrderedNeighbours.size() != neighbourNodes)
rNodeOrderedNeighbours.resize(neighbourNodes, false);
noalias(rNodeOrderedNeighbours) = ZeroVector(neighbourNodes);
rNodeOrderedNeighbours[0] = itNode->Id();
if (neighbourNodes > 1)
{
for (unsigned int k = 0; k < neighbourNodes - 1; k++)
{
rNodeOrderedNeighbours[k + 1] = neighb_nodes[k].Id();
}
}
}
void InitializeNodalVariablesForRemeshedDomain(ModelPart::NodeIterator itNode)
{
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
unsigned int sizeStrains = 3 * (dimension - 1);
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
unsigned int neighbourNodes = neighb_nodes.size() + 1;
unsigned int sizeSDFNeigh = neighbourNodes * dimension;
if (itNode->SolutionStepsDataHas(NODAL_CAUCHY_STRESS))
{
Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS);
if (rNodalStress.size() != sizeStrains)
rNodalStress.resize(sizeStrains, false);
noalias(rNodalStress) = ZeroVector(sizeStrains);
}
if (itNode->SolutionStepsDataHas(NODAL_DEVIATORIC_CAUCHY_STRESS))
{
Vector &rNodalDevStress = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS);
if (rNodalDevStress.size() != sizeStrains)
rNodalDevStress.resize(sizeStrains, false);
noalias(rNodalDevStress) = ZeroVector(sizeStrains);
}
if (itNode->SolutionStepsDataHas(NODAL_SFD_NEIGHBOURS_ORDER))
{
Vector &rNodalSFDneighboursOrder = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
if (rNodalSFDneighboursOrder.size() != neighbourNodes)
rNodalSFDneighboursOrder.resize(neighbourNodes, false);
noalias(rNodalSFDneighboursOrder) = ZeroVector(neighbourNodes);
}
if (itNode->SolutionStepsDataHas(NODAL_SFD_NEIGHBOURS))
{
Vector &rNodalSFDneighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
if (rNodalSFDneighbours.size() != sizeSDFNeigh)
rNodalSFDneighbours.resize(sizeSDFNeigh, false);
noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh);
}
if (itNode->SolutionStepsDataHas(NODAL_SPATIAL_DEF_RATE))
{
Vector &rSpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE);
if (rSpatialDefRate.size() != sizeStrains)
rSpatialDefRate.resize(sizeStrains, false);
noalias(rSpatialDefRate) = ZeroVector(sizeStrains);
}
if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD))
{
Matrix &rFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
if (rFgrad.size1() != dimension)
rFgrad.resize(dimension, dimension, false);
noalias(rFgrad) = ZeroMatrix(dimension, dimension);
}
if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD_VEL))
{
Matrix &rFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
if (rFgradVel.size1() != dimension)
rFgradVel.resize(dimension, dimension, false);
noalias(rFgradVel) = ZeroMatrix(dimension, dimension);
}
if (itNode->SolutionStepsDataHas(NODAL_VOLUME))
{
itNode->FastGetSolutionStepValue(NODAL_VOLUME) = 0;
}
if (itNode->SolutionStepsDataHas(NODAL_MEAN_MESH_SIZE))
{
itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0;
}
if (itNode->SolutionStepsDataHas(NODAL_FREESURFACE_AREA))
{
itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0;
}
if (itNode->SolutionStepsDataHas(NODAL_VOLUMETRIC_DEF_RATE))
{
itNode->FastGetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = 0;
}
if (itNode->SolutionStepsDataHas(NODAL_EQUIVALENT_STRAIN_RATE))
{
itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = 0;
}
}
void InitializeNonLinearIterations()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ElementsArrayType &pElements = rModelPart.Elements();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, pElements.size(), element_partition);
// #pragma omp parallel
// {
int k = OpenMPUtils::ThisThread();
typename ElementsArrayType::iterator ElemBegin = pElements.begin() + element_partition[k];
typename ElementsArrayType::iterator ElemEnd = pElements.begin() + element_partition[k + 1];
for (typename ElementsArrayType::iterator itElem = ElemBegin; itElem != ElemEnd; itElem++) //MSI: To be parallelized
{
itElem->InitializeNonLinearIteration(rCurrentProcessInfo);
}
// }
}
void CalcNodalStrainsAndStresses()
{
ModelPart &rModelPart = BaseType::GetModelPart();
// #pragma omp parallel
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME);
double theta = 0.5;
if (nodalVolume > 0)
{
this->ComputeAndStoreNodalDeformationGradient(itNode, theta);
this->CalcNodalStrainsAndStressesForNode(itNode);
}
else
{ // if nodalVolume==0
InitializeNodalVariablesForRemeshedDomain(itNode);
}
}
// }
}
void CalcNodalStrainsAndStressesForNode(ModelPart::NodeIterator itNode)
{
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
double currFirstLame = itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT);
double deviatoricCoeff = itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT);
Matrix Fgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
Matrix FgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
double detFgrad = 1.0;
Matrix InvFgrad = ZeroMatrix(dimension, dimension);
Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension);
if (dimension == 2)
{
MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad);
}
else if (dimension == 3)
{
MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad);
}
//it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj
SpatialVelocityGrad = prod(FgradVel, InvFgrad);
if (dimension == 2)
{
auto &r_stain_tensor2D = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE);
r_stain_tensor2D[0] = SpatialVelocityGrad(0, 0);
r_stain_tensor2D[1] = SpatialVelocityGrad(1, 1);
r_stain_tensor2D[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]));
double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
double DefVol = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1];
itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2];
auto &r_stress_tensor2D = itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0);
r_stress_tensor2D[0] = nodalSigmaTot_xx;
r_stress_tensor2D[1] = nodalSigmaTot_yy;
r_stress_tensor2D[2] = nodalSigmaTot_xy;
auto &r_dev_stress_tensor2D = itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0);
r_dev_stress_tensor2D[0] = nodalSigmaDev_xx;
r_dev_stress_tensor2D[1] = nodalSigmaDev_yy;
r_dev_stress_tensor2D[2] = nodalSigmaDev_xy;
}
else if (dimension == 3)
{
auto &r_stain_tensor3D = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE);
r_stain_tensor3D[0] = SpatialVelocityGrad(0, 0);
r_stain_tensor3D[1] = SpatialVelocityGrad(1, 1);
r_stain_tensor3D[2] = SpatialVelocityGrad(2, 2);
r_stain_tensor3D[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
r_stain_tensor3D[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2));
r_stain_tensor3D[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2));
double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] +
2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]);
double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
double DefVol = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2];
itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0];
double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1];
double nodalSigmaTot_zz = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2];
double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaTot_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaTot_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5];
double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0);
double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0);
double nodalSigmaDev_zz = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] - DefVol / 3.0);
double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3];
double nodalSigmaDev_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4];
double nodalSigmaDev_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5];
auto &r_stress_tensor3D = itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0);
r_stress_tensor3D[0] = nodalSigmaTot_xx;
r_stress_tensor3D[1] = nodalSigmaTot_yy;
r_stress_tensor3D[2] = nodalSigmaTot_zz;
r_stress_tensor3D[3] = nodalSigmaTot_xy;
r_stress_tensor3D[4] = nodalSigmaTot_xz;
r_stress_tensor3D[5] = nodalSigmaTot_yz;
auto &r_dev_stress_tensor3D = itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0);
r_dev_stress_tensor3D[0] = nodalSigmaDev_xx;
r_dev_stress_tensor3D[1] = nodalSigmaDev_yy;
r_dev_stress_tensor3D[2] = nodalSigmaDev_zz;
r_dev_stress_tensor3D[3] = nodalSigmaDev_xy;
r_dev_stress_tensor3D[4] = nodalSigmaDev_xz;
r_dev_stress_tensor3D[5] = nodalSigmaDev_yz;
}
}
void CalcNodalStrainsForNode(ModelPart::NodeIterator itNode)
{
/* std::cout << "Calc Nodal Strains " << std::endl; */
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
// Matrix Fgrad=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
// Matrix FgradVel=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
// double detFgrad=1.0;
// Matrix InvFgrad=ZeroMatrix(dimension,dimension);
// Matrix SpatialVelocityGrad=ZeroMatrix(dimension,dimension);
double detFgrad = 1.0;
Matrix nodalFgrad = ZeroMatrix(dimension, dimension);
Matrix FgradVel = ZeroMatrix(dimension, dimension);
Matrix InvFgrad = ZeroMatrix(dimension, dimension);
Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension);
nodalFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
FgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
//Inverse
if (dimension == 2)
{
MathUtils<double>::InvertMatrix2(nodalFgrad, InvFgrad, detFgrad);
}
else if (dimension == 3)
{
MathUtils<double>::InvertMatrix3(nodalFgrad, InvFgrad, detFgrad);
}
//it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj
SpatialVelocityGrad = prod(FgradVel, InvFgrad);
if (dimension == 2)
{
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]));
double DefX = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0];
double DefY = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1];
double DefVol = DefX + DefY;
itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
}
else if (dimension == 3)
{
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2);
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1));
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2));
itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2));
itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] +
2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] +
2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] +
4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]);
double DefX = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0];
double DefY = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1];
double DefZ = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2];
double DefVol = DefX + DefY + DefZ;
itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol;
}
}
void CalcNodalStrains()
{
/* std::cout << "Calc Nodal Strains " << std::endl; */
ModelPart &rModelPart = BaseType::GetModelPart();
// #pragma omp parallel
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME);
double theta = 1.0;
if (nodalVolume > 0)
{
this->ComputeAndStoreNodalDeformationGradient(itNode, theta);
this->CalcNodalStrainsForNode(itNode);
}
else
{ // if nodalVolume==0
InitializeNodalVariablesForRemeshedDomain(itNode);
}
}
// }
/* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */
}
void ComputeAndStoreNodalDeformationGradient(ModelPart::NodeIterator itNode, double theta)
{
KRATOS_TRY;
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
/* unsigned int idThisNode=nodalSFDneighboursId[0]; */
const unsigned int neighSize = nodalSFDneighboursId.size();
Matrix Fgrad = ZeroMatrix(dimension, dimension);
Matrix FgradVel = ZeroMatrix(dimension, dimension);
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
if (dimension == 2)
{
double dNdXi = rNodalSFDneigh[0];
double dNdYi = rNodalSFDneigh[1];
Fgrad(0, 0) += dNdXi * itNode->X();
Fgrad(0, 1) += dNdYi * itNode->X();
Fgrad(1, 0) += dNdXi * itNode->Y();
Fgrad(1, 1) += dNdYi * itNode->Y();
double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
unsigned int firstRow = 2;
if (neighSize > 0)
{
for (unsigned int i = 0; i < neighSize - 1; i++) //neigh_nodes has one cell less than nodalSFDneighboursId becuase this has also the considered node ID at the beginning
{
dNdXi = rNodalSFDneigh[firstRow];
dNdYi = rNodalSFDneigh[firstRow + 1];
unsigned int neigh_nodes_id = neighb_nodes[i].Id();
unsigned int other_neigh_nodes_id = nodalSFDneighboursId[i + 1];
if (neigh_nodes_id != other_neigh_nodes_id)
{
std::cout << "node (x,y)=(" << itNode->X() << "," << itNode->Y() << ") with neigh_nodes_id " << neigh_nodes_id << " different than other_neigh_nodes_id " << other_neigh_nodes_id << std::endl;
}
Fgrad(0, 0) += dNdXi * neighb_nodes[i].X();
Fgrad(0, 1) += dNdYi * neighb_nodes[i].X();
Fgrad(1, 0) += dNdXi * neighb_nodes[i].Y();
Fgrad(1, 1) += dNdYi * neighb_nodes[i].Y();
VelocityX = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
VelocityY = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
firstRow += 2;
}
}
}
else
{
double dNdXi = rNodalSFDneigh[0];
double dNdYi = rNodalSFDneigh[1];
double dNdZi = rNodalSFDneigh[2];
double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
double VelocityZ = itNode->FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta);
Fgrad(0, 0) += dNdXi * itNode->X();
Fgrad(0, 1) += dNdYi * itNode->X();
Fgrad(0, 2) += dNdZi * itNode->X();
Fgrad(1, 0) += dNdXi * itNode->Y();
Fgrad(1, 1) += dNdYi * itNode->Y();
Fgrad(1, 2) += dNdZi * itNode->Y();
Fgrad(2, 0) += dNdXi * itNode->Z();
Fgrad(2, 1) += dNdYi * itNode->Z();
Fgrad(2, 2) += dNdZi * itNode->Z();
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(0, 2) += dNdZi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
FgradVel(1, 2) += dNdZi * VelocityY;
FgradVel(2, 0) += dNdXi * VelocityZ;
FgradVel(2, 1) += dNdYi * VelocityZ;
FgradVel(2, 2) += dNdZi * VelocityZ;
unsigned int firstRow = 3;
if (neighSize > 0)
{
for (unsigned int i = 0; i < neighSize - 1; i++)
{
dNdXi = rNodalSFDneigh[firstRow];
dNdYi = rNodalSFDneigh[firstRow + 1];
dNdZi = rNodalSFDneigh[firstRow + 2];
VelocityX = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta);
VelocityY = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta);
VelocityZ = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta);
Fgrad(0, 0) += dNdXi * neighb_nodes[i].X();
Fgrad(0, 1) += dNdYi * neighb_nodes[i].X();
Fgrad(0, 2) += dNdZi * neighb_nodes[i].X();
Fgrad(1, 0) += dNdXi * neighb_nodes[i].Y();
Fgrad(1, 1) += dNdYi * neighb_nodes[i].Y();
Fgrad(1, 2) += dNdZi * neighb_nodes[i].Y();
Fgrad(2, 0) += dNdXi * neighb_nodes[i].Z();
Fgrad(2, 1) += dNdYi * neighb_nodes[i].Z();
Fgrad(2, 2) += dNdZi * neighb_nodes[i].Z();
FgradVel(0, 0) += dNdXi * VelocityX;
FgradVel(0, 1) += dNdYi * VelocityX;
FgradVel(0, 2) += dNdZi * VelocityX;
FgradVel(1, 0) += dNdXi * VelocityY;
FgradVel(1, 1) += dNdYi * VelocityY;
FgradVel(1, 2) += dNdZi * VelocityY;
FgradVel(2, 0) += dNdXi * VelocityZ;
FgradVel(2, 1) += dNdYi * VelocityZ;
FgradVel(2, 2) += dNdZi * VelocityZ;
firstRow += 3;
}
}
}
itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD) = Fgrad;
itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL) = FgradVel;
KRATOS_CATCH("");
}
void UpdateTopology(ModelPart &rModelPart, unsigned int echoLevel)
{
KRATOS_TRY;
/* this->CalculateDisplacements(); */
this->CalculateDisplacementsAndResetNodalVariables();
BaseType::MoveMesh();
BoundaryNormalsCalculationUtilities BoundaryComputation;
BoundaryComputation.CalculateWeightedBoundaryNormals(rModelPart, echoLevel);
KRATOS_CATCH("");
}
void CalculatePressureVelocity()
{
ModelPart &rModelPart = BaseType::GetModelPart();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
if (timeStep == 1)
{
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0;
}
else
{
double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0);
double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1);
double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval;
}
}
}
void CalculatePressureAcceleration()
{
ModelPart &rModelPart = BaseType::GetModelPart();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i)
{
if (timeStep == 1)
{
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0;
}
else
{
double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
double &PreviousPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1);
double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0);
CurrentPressureAcceleration = (CurrentPressureVelocity - PreviousPressureVelocity) / timeInterval;
}
}
}
void CalculateAccelerations()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0);
array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1);
if ((i)->IsNot(ISOLATED) && ((i)->IsNot(RIGID) || (i)->Is(SOLID)))
{
UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity);
}
else if ((i)->Is(RIGID))
{
array_1d<double, 3> Zeros(3, 0.0);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros;
(i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros;
}
else
{
(i)->FastGetSolutionStepValue(NODAL_VOLUME) = 0.0;
(i)->FastGetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = 0.0;
(i)->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = 0;
(i)->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0.0;
(i)->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0;
if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION))
{
array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration;
(i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME];
}
}
}
}
inline void UpdateAccelerations(array_1d<double, 3> &CurrentAcceleration,
const array_1d<double, 3> &CurrentVelocity,
array_1d<double, 3> &PreviousAcceleration,
const array_1d<double, 3> &PreviousVelocity)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double Dt = rCurrentProcessInfo[DELTA_TIME];
noalias(CurrentAcceleration) = 2.0 * (CurrentVelocity - PreviousVelocity) / Dt - PreviousAcceleration;
}
void CalculateDisplacements()
{
ModelPart &rModelPart = BaseType::GetModelPart();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double TimeStep = rCurrentProcessInfo[DELTA_TIME];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0);
array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1);
/* if( i->IsFixed(DISPLACEMENT_X) == false ) */
CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0];
/* if( i->IsFixed(DISPLACEMENT_Y) == false ) */
CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1];
/* if( i->IsFixed(DISPLACEMENT_Z) == false ) */
CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2];
}
}
void CalculateDisplacementsAndResetNodalVariables()
{
ModelPart &rModelPart = BaseType::GetModelPart();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double TimeStep = rCurrentProcessInfo[DELTA_TIME];
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
unsigned int sizeStrains = 3 * (dimension - 1);
// #pragma omp parallel
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator i = NodesBegin; i != NodesEnd; ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0);
array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1);
CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0];
CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1];
if (dimension == 3)
{
CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2];
}
///// reset Nodal variables //////
Vector &rNodalSFDneighbours = i->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS);
unsigned int sizeSDFNeigh = rNodalSFDneighbours.size();
// unsigned int neighbourNodes=i->GetValue(NEIGHBOUR_NODES).size()+1;
// unsigned int sizeSDFNeigh=neighbourNodes*dimension;
i->FastGetSolutionStepValue(NODAL_VOLUME) = 0;
i->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0;
i->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0;
i->FastGetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = 0;
i->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = 0;
noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh);
Vector &rSpatialDefRate = i->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE);
noalias(rSpatialDefRate) = ZeroVector(sizeStrains);
Matrix &rFgrad = i->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD);
noalias(rFgrad) = ZeroMatrix(dimension, dimension);
Matrix &rFgradVel = i->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL);
noalias(rFgradVel) = ZeroMatrix(dimension, dimension);
}
// }
}
void UpdatePressureAccelerations()
{
this->CalculateAccelerations();
this->CalculatePressureVelocity();
this->CalculatePressureAcceleration();
}
void Clear() override
{
mpMomentumStrategy->Clear();
mpPressureStrategy->Clear();
}
///@}
///@name Access
///@{
void SetEchoLevel(int Level) override
{
BaseType::SetEchoLevel(Level);
int StrategyLevel = Level > 0 ? Level - 1 : 0;
mpMomentumStrategy->SetEchoLevel(StrategyLevel);
mpPressureStrategy->SetEchoLevel(StrategyLevel);
}
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "NodalTwoStepVPStrategy";
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream &rOStream) const override
{
rOStream << "NodalTwoStepVPStrategy";
}
/// Print object's data.
void PrintData(std::ostream &rOStream) const override
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected Life Cycle
///@{
///@}
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/// Calculate the coefficients for time iteration.
/**
* @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME variables.
*/
bool SolveMomentumIteration(unsigned int it, unsigned int maxIt, bool &fixedTimeStep, double &velocityNorm)
{
ModelPart &rModelPart = BaseType::GetModelPart();
int Rank = rModelPart.GetCommunicator().MyPID();
bool ConvergedMomentum = false;
double NormDv = 0;
fixedTimeStep = false;
// build momentum system and solve for fractional step velocity increment
rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 1);
if (it == 0)
{
mpMomentumStrategy->InitializeSolutionStep();
/* this->SetNeighboursVelocityId(); */
}
NormDv = mpMomentumStrategy->Solve();
if (BaseType::GetEchoLevel() > 1 && Rank == 0)
std::cout << "-------------- s o l v e d ! ------------------" << std::endl;
if (it == 0)
{
velocityNorm = this->ComputeVelocityNorm();
}
double DvErrorNorm = NormDv / velocityNorm;
// double DvErrorNorm = 0;
// ConvergedMomentum = this->CheckVelocityConvergence(NormDv, DvErrorNorm);
unsigned int iterationForCheck = 3;
KRATOS_INFO("TwoStepVPStrategy") << "iteration(" << it << ") Velocity error: " << DvErrorNorm << std::endl;
// Check convergence
if (it == maxIt - 1)
{
KRATOS_INFO("Iteration") << it << " Final Velocity error: " << DvErrorNorm << std::endl;
fixedTimeStep = this->FixTimeStepMomentum(DvErrorNorm);
}
else if (it > iterationForCheck)
{
fixedTimeStep = this->CheckMomentumConvergence(DvErrorNorm);
}
// ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
// double currentTime = rCurrentProcessInfo[TIME];
// double tolerance=0.0000000001;
// if(currentTime>(0.25-tolerance) && currentTime<(0.25+tolerance)){
// std::ofstream myfile;
// myfile.open ("velocityConvergenceAt025s.txt",std::ios::app);
// myfile << it << "\t" << DvErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(0.5-tolerance) && currentTime<(0.5+tolerance)){
// std::ofstream myfile;
// myfile.open ("velocityConvergenceAt05s.txt",std::ios::app);
// myfile << it << "\t" << DvErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(0.75-tolerance) && currentTime<(0.75+tolerance)){
// std::ofstream myfile;
// myfile.open ("velocityConvergenceAt075s.txt",std::ios::app);
// myfile << it << "\t" << DvErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(1.0-tolerance) && currentTime<(1.0+tolerance)){
// std::ofstream myfile;
// myfile.open ("velocityConvergenceAt100s.txt",std::ios::app);
// myfile << it << "\t" << DvErrorNorm << "\n";
// myfile.close();
// }
if (!ConvergedMomentum && BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "Momentum equations did not reach the convergence tolerance." << std::endl;
return ConvergedMomentum;
}
bool SolveContinuityIteration(unsigned int it, unsigned int maxIt, double &NormP)
{
ModelPart &rModelPart = BaseType::GetModelPart();
int Rank = rModelPart.GetCommunicator().MyPID();
bool ConvergedContinuity = false;
double NormDp = 0;
// 2. Pressure solution
rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 5);
if (it == 0)
{
mpPressureStrategy->InitializeSolutionStep();
}
NormDp = mpPressureStrategy->Solve();
if (BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "The norm of pressure is: " << NormDp << std::endl;
if (it == 0)
{
NormP = this->ComputePressureNorm();
}
double DpErrorNorm = NormDp / (NormP);
// double DpErrorNorm = 0;
// ConvergedContinuity = this->CheckPressureConvergence(NormDp, DpErrorNorm);
// Check convergence
if (it == maxIt - 1)
{
KRATOS_INFO("Iteration") << it << " Final Pressure error: " << DpErrorNorm << std::endl;
ConvergedContinuity = this->FixTimeStepContinuity(DpErrorNorm);
}
else
{
KRATOS_INFO("Iteration") << it << " Pressure error: " << DpErrorNorm << std::endl;
}
// ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
// double currentTime = rCurrentProcessInfo[TIME];
// double tolerance=0.0000000001;
// if(currentTime>(0.25-tolerance) && currentTime<(0.25+tolerance)){
// std::ofstream myfile;
// myfile.open ("pressureConvergenceAt025s.txt",std::ios::app);
// myfile << it << "\t" << DpErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(0.5-tolerance) && currentTime<(0.5+tolerance)){
// std::ofstream myfile;
// myfile.open ("pressureConvergenceAt05s.txt",std::ios::app);
// myfile << it << "\t" << DpErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(0.75-tolerance) && currentTime<(0.75+tolerance)){
// std::ofstream myfile;
// myfile.open ("pressureConvergenceAt075s.txt",std::ios::app);
// myfile << it << "\t" << DpErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(1.0-tolerance) && currentTime<(1.0+tolerance)){
// std::ofstream myfile;
// myfile.open ("pressureConvergenceAt100s.txt",std::ios::app);
// myfile << it << "\t" << DpErrorNorm << "\n";
// myfile.close();
// }
if (!ConvergedContinuity && BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "Continuity equation did not reach the convergence tolerance." << std::endl;
return ConvergedContinuity;
}
bool CheckVelocityConvergence(const double NormDv, double &errorNormDv)
{
ModelPart &rModelPart = BaseType::GetModelPart();
double NormV = 0.00;
errorNormDv = 0;
#pragma omp parallel reduction(+ \
: NormV)
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const array_1d<double, 3> &Vel = itNode->FastGetSolutionStepValue(VELOCITY);
double NormVelNode = 0;
for (unsigned int d = 0; d < 3; ++d)
{
NormVelNode += Vel[d] * Vel[d];
NormV += Vel[d] * Vel[d];
}
}
}
BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV);
NormV = sqrt(NormV);
if (NormV == 0.0)
NormV = 1.00;
errorNormDv = NormDv / NormV;
if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
{
std::cout << "The norm of velocity increment is: " << NormDv << std::endl;
std::cout << "The norm of velocity is: " << NormV << std::endl;
std::cout << "Velocity error: " << errorNormDv << "mVelocityTolerance: " << mVelocityTolerance << std::endl;
}
/* else{ */
/* std::cout<<"Velocity error: "<< errorNormDv <<" velTol: " << mVelocityTolerance<< std::endl; */
/* } */
if (errorNormDv < mVelocityTolerance)
{
return true;
}
else
{
return false;
}
}
double ComputeVelocityNorm()
{
ModelPart &rModelPart = BaseType::GetModelPart();
double NormV = 0.00;
#pragma omp parallel reduction(+ \
: NormV)
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const array_1d<double, 3> &Vel = itNode->FastGetSolutionStepValue(VELOCITY);
double NormVelNode = 0;
for (unsigned int d = 0; d < 3; ++d)
{
NormVelNode += Vel[d] * Vel[d];
NormV += Vel[d] * Vel[d];
}
}
}
BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV);
NormV = sqrt(NormV);
if (NormV == 0.0)
NormV = 1.00;
return NormV;
}
double ComputePressureNorm()
{
ModelPart &rModelPart = BaseType::GetModelPart();
double NormP = 0.00;
#pragma omp parallel reduction(+ \
: NormP)
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const double Pr = itNode->FastGetSolutionStepValue(PRESSURE);
NormP += Pr * Pr;
}
}
BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP);
NormP = sqrt(NormP);
if (NormP == 0.0)
NormP = 1.00;
return NormP;
}
void ComputeErrorL2NormCaseImposedG()
{
ModelPart &rModelPart = BaseType::GetModelPart();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double currentTime = rCurrentProcessInfo[TIME];
double sumErrorL2Velocity = 0;
double sumErrorL2VelocityX = 0;
double sumErrorL2VelocityY = 0;
double sumErrorL2Pressure = 0;
double sumErrorL2TauXX = 0;
double sumErrorL2TauYY = 0;
double sumErrorL2TauXY = 0;
#pragma omp parallel
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const double posX = itNode->X();
const double posY = itNode->Y();
const double nodalArea = itNode->FastGetSolutionStepValue(NODAL_VOLUME);
const double velX = itNode->FastGetSolutionStepValue(VELOCITY_X);
const double velY = itNode->FastGetSolutionStepValue(VELOCITY_Y);
const double pressure = itNode->FastGetSolutionStepValue(PRESSURE);
const double tauXX = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0];
const double tauYY = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1];
const double tauXY = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[2];
double expectedVelocityX = pow(posX, 2) * (1.0 - posX) * (1.0 - posX) * (2.0 * posY - 6.0 * pow(posY, 2) + 4.0 * pow(posY, 3));
double expectedVelocityY = -pow(posY, 2) * (1.0 - posY) * (1.0 - posY) * (2.0 * posX - 6.0 * pow(posX, 2) + 4.0 * pow(posX, 3));
double expectedPressure = -posX * (1.0 - posX);
double expectedTauXX = 2.0 * (-4.0 * (1 - posX) * posX * (-1.0 + 2.0 * posX) * posY * (1.0 - 3.0 * posY + 2.0 * pow(posY, 2)));
double expectedTauYY = 2.0 * (4.0 * posX * (1.0 - 3.0 * posX + 2.0 * pow(posX, 2)) * (1 - posY) * posY * (-1.0 + 2.0 * posY));
double expectedTauXY = (2.0 * (1.0 - 6.0 * posY + 6.0 * pow(posY, 2)) * (1 - posX) * (1 - posX) * pow(posX, 2) - 2.0 * (1.0 - 6.0 * posX + 6.0 * pow(posX, 2)) * (1 - posY) * (1 - posY) * pow(posY, 2));
double nodalErrorVelocityX = velX - expectedVelocityX;
double nodalErrorVelocityY = velY - expectedVelocityY;
double nodalErrorPressure = pressure - expectedPressure;
double nodalErrorTauXX = tauXX - expectedTauXX;
double nodalErrorTauYY = tauYY - expectedTauYY;
double nodalErrorTauXY = tauXY - expectedTauXY;
sumErrorL2Velocity += (pow(nodalErrorVelocityX, 2) + pow(nodalErrorVelocityY, 2)) * nodalArea;
sumErrorL2VelocityX += pow(nodalErrorVelocityX, 2) * nodalArea;
sumErrorL2VelocityY += pow(nodalErrorVelocityY, 2) * nodalArea;
sumErrorL2Pressure += pow(nodalErrorPressure, 2) * nodalArea;
sumErrorL2TauXX += pow(nodalErrorTauXX, 2) * nodalArea;
sumErrorL2TauYY += pow(nodalErrorTauYY, 2) * nodalArea;
sumErrorL2TauXY += pow(nodalErrorTauXY, 2) * nodalArea;
// itNode->FastGetSolutionStepValue(NODAL_ERROR_XX)=nodalErrorTauXX;
}
}
double errorL2Velocity = sqrt(sumErrorL2Velocity);
double errorL2VelocityX = sqrt(sumErrorL2VelocityX);
double errorL2VelocityY = sqrt(sumErrorL2VelocityY);
double errorL2Pressure = sqrt(sumErrorL2Pressure);
double errorL2TauXX = sqrt(sumErrorL2TauXX);
double errorL2TauYY = sqrt(sumErrorL2TauYY);
double errorL2TauXY = sqrt(sumErrorL2TauXY);
std::ofstream myfileVelocity;
myfileVelocity.open("errorL2VelocityFile.txt", std::ios::app);
myfileVelocity << currentTime << "\t" << errorL2Velocity << "\n";
myfileVelocity.close();
std::ofstream myfileVelocityX;
myfileVelocityX.open("errorL2VelocityXFile.txt", std::ios::app);
myfileVelocityX << currentTime << "\t" << errorL2VelocityX << "\n";
myfileVelocityX.close();
std::ofstream myfileVelocityY;
myfileVelocityY.open("errorL2VelocityYFile.txt", std::ios::app);
myfileVelocityY << currentTime << "\t" << errorL2VelocityY << "\n";
myfileVelocityY.close();
std::ofstream myfilePressure;
myfilePressure.open("errorL2PressureFile.txt", std::ios::app);
myfilePressure << currentTime << "\t" << errorL2Pressure << "\n";
myfilePressure.close();
std::ofstream myfileTauXX;
myfileTauXX.open("errorL2TauXXFile.txt", std::ios::app);
myfileTauXX << currentTime << "\t" << errorL2TauXX << "\n";
myfileTauXX.close();
std::ofstream myfileTauYY;
myfileTauYY.open("errorL2TauYYFile.txt", std::ios::app);
myfileTauYY << currentTime << "\t" << errorL2TauYY << "\n";
myfileTauYY.close();
std::ofstream myfileTauXY;
myfileTauXY.open("errorL2TauXYFile.txt", std::ios::app);
myfileTauXY << currentTime << "\t" << errorL2TauXY << "\n";
myfileTauXY.close();
}
void ComputeErrorL2NormCasePoiseuille()
{
ModelPart &rModelPart = BaseType::GetModelPart();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double currentTime = rCurrentProcessInfo[TIME];
double sumErrorL2VelocityTheta = 0;
double sumErrorL2TauTheta = 0;
double r_in = 0.2;
double R_out = 0.5;
double kappa = r_in / R_out;
double omega = 0.5;
double viscosity = 100.0;
#pragma omp parallel
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const double posX = itNode->X();
const double posY = itNode->Y();
const double rPos = sqrt(pow(posX, 2) + pow(posY, 2));
const double cosalfa = posX / rPos;
const double sinalfa = posY / rPos;
const double sin2alfa = 2.0 * cosalfa * sinalfa;
const double cos2alfa = 1.0 - 2.0 * pow(sinalfa, 2);
const double nodalArea = itNode->FastGetSolutionStepValue(NODAL_VOLUME);
const double velX = itNode->FastGetSolutionStepValue(VELOCITY_X);
const double velY = itNode->FastGetSolutionStepValue(VELOCITY_Y);
const double tauXX = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0];
const double tauYY = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1];
const double tauXY = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[2];
double expectedVelocityTheta = pow(kappa, 2) * omega * R_out / (1.0 - pow(kappa, 2)) * (R_out / rPos - rPos / R_out);
double computedVelocityTheta = sqrt(pow(velX, 2) + pow(velY, 2));
double nodalErrorVelocityTheta = computedVelocityTheta - expectedVelocityTheta;
double expectedTauTheta = (2.0 * viscosity * pow(kappa, 2) * omega * pow(R_out, 2)) / (1.0 - pow(kappa, 2)) / pow(rPos, 2);
double computedTauTheta = +(tauXX - tauYY) * sin2alfa / 2.0 - tauXY * cos2alfa;
double nodalErrorTauTheta = computedTauTheta - expectedTauTheta;
itNode->FastGetSolutionStepValue(NODAL_ERROR_XX) = computedVelocityTheta;
// if(posY>-0.01 && posY<0.01){
// std::cout<<"expectedTauTheta "<<expectedTauTheta<<" computedTauTheta "<<computedTauTheta <<std::endl;
// std::cout<<"tauXX "<<tauXX<<" tauYY "<<tauYY<<" tauXY "<<tauXY <<std::endl;
// std::cout<<"posX "<<posX <<" posY "<<posY <<std::endl;
// std::cout<<"\n ";
// }
// if(posX>-0.01 && posX<0.01){
// std::cout<<"expectedTauTheta "<<expectedTauTheta<<" computedTauTheta "<<computedTauTheta <<std::endl;
// std::cout<<"tauXX "<<tauXX<<" tauYY "<<tauYY<<" tauXY "<<tauXY <<std::endl;
// std::cout<<"posX "<<posX <<" posY "<<posY <<std::endl;
// std::cout<<"\n ";
// }
sumErrorL2VelocityTheta += pow(nodalErrorVelocityTheta, 2) * nodalArea;
sumErrorL2TauTheta += pow(nodalErrorTauTheta, 2) * nodalArea;
}
}
double errorL2VelocityTheta = sqrt(sumErrorL2VelocityTheta);
double errorL2TauTheta = sqrt(sumErrorL2TauTheta);
std::ofstream myfileVelocity;
myfileVelocity.open("errorL2Poiseuille.txt", std::ios::app);
myfileVelocity << currentTime << "\t" << errorL2VelocityTheta << "\t" << errorL2TauTheta << "\n";
myfileVelocity.close();
}
bool CheckPressureConvergence(const double NormDp, double &errorNormDp)
{
ModelPart &rModelPart = BaseType::GetModelPart();
double NormP = 0.00;
errorNormDp = 0;
// #pragma omp parallel reduction(+:NormP)
// {
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const double Pr = itNode->FastGetSolutionStepValue(PRESSURE);
NormP += Pr * Pr;
}
// }
BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP);
NormP = sqrt(NormP);
if (NormP == 0.0)
NormP = 1.00;
errorNormDp = NormDp / NormP;
if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
{
std::cout << " The norm of pressure increment is: " << NormDp << std::endl;
std::cout << " The norm of pressure is: " << NormP << std::endl;
std::cout << " Pressure error: " << errorNormDp << std::endl;
}
/* else{ */
/* std::cout<<" Pressure error: "<<errorNormDp <<" presTol: "<<mPressureTolerance << std::endl; */
/* } */
if (errorNormDp < mPressureTolerance)
{
return true;
}
else
return false;
}
bool FixTimeStepMomentum(const double DvErrorNorm)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double minTolerance = 0.005;
bool fixedTimeStep = false;
if (currentTime < 3 * timeInterval)
{
minTolerance = 10;
}
if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) &&
DvErrorNorm != 0 &&
(DvErrorNorm != 1 || currentTime > timeInterval))
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true);
std::cout << "NOT GOOD CONVERGENCE!!! I'll reduce the next time interval" << DvErrorNorm << std::endl;
minTolerance = 0.05;
if (DvErrorNorm > minTolerance)
{
std::cout << "BAD CONVERGENCE!!! I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << DvErrorNorm << std::endl;
fixedTimeStep = true;
// #pragma omp parallel
// {
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1);
itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1);
itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1);
}
// }
}
}
else
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
}
return fixedTimeStep;
}
bool CheckMomentumConvergence(const double DvErrorNorm)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double minTolerance = 0.99999;
bool fixedTimeStep = false;
if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) &&
DvErrorNorm != 0 &&
(DvErrorNorm != 1 || currentTime > timeInterval))
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true);
std::cout << " BAD CONVERGENCE DETECTED DURING THE ITERATIVE LOOP!!! error: " << DvErrorNorm << " higher than 0.9999" << std::endl;
std::cout << " I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << std::endl;
fixedTimeStep = true;
#pragma omp parallel
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1);
itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1);
itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1);
}
}
}
else
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
}
return fixedTimeStep;
}
bool FixTimeStepContinuity(const double DvErrorNorm)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double minTolerance = 0.01;
bool fixedTimeStep = false;
if (currentTime < 3 * timeInterval)
{
minTolerance = 10;
}
if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) &&
DvErrorNorm != 0 &&
(DvErrorNorm != 1 || currentTime > timeInterval))
{
fixedTimeStep = true;
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, true);
}
else
{
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false);
}
return fixedTimeStep;
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
// private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
double mVelocityTolerance;
double mPressureTolerance;
unsigned int mMaxPressureIter;
unsigned int mDomainSize;
unsigned int mTimeOrder;
bool mReformDofSet;
// Fractional step index.
/* 1 : Momentum step (calculate fractional step velocity)
* 2-3 : Unused (reserved for componentwise calculation of frac step velocity)
* 4 : Pressure step
* 5 : Computation of projections
* 6 : End of step velocity
*/
// unsigned int mStepId;
/// Scheme for the solution of the momentum equation
StrategyPointerType mpMomentumStrategy;
/// Scheme for the solution of the mass equation
StrategyPointerType mpPressureStrategy;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
void InitializeStrategy(SolverSettingsType &rSolverConfig)
{
KRATOS_TRY;
// Check that input parameters are reasonable and sufficient.
this->Check();
//ModelPart& rModelPart = this->GetModelPart();
mDomainSize = rSolverConfig.GetDomainSize();
mReformDofSet = rSolverConfig.GetReformDofSet();
BaseType::SetEchoLevel(rSolverConfig.GetEchoLevel());
// Initialize strategies for each step
bool HaveVelStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Velocity, mpMomentumStrategy);
if (HaveVelStrategy)
{
rSolverConfig.FindTolerance(SolverSettingsType::Velocity, mVelocityTolerance);
/* rSolverConfig.FindMaxIter(SolverSettingsType::Velocity,mMaxVelocityIter); */
}
else
{
KRATOS_THROW_ERROR(std::runtime_error, "NodalTwoStepVPStrategy error: No Velocity strategy defined in FractionalStepSettings", "");
}
bool HavePressStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Pressure, mpPressureStrategy);
if (HavePressStrategy)
{
rSolverConfig.FindTolerance(SolverSettingsType::Pressure, mPressureTolerance);
rSolverConfig.FindMaxIter(SolverSettingsType::Pressure, mMaxPressureIter);
}
else
{
KRATOS_THROW_ERROR(std::runtime_error, "NodalTwoStepVPStrategy error: No Pressure strategy defined in FractionalStepSettings", "");
}
// Check input parameters
this->Check();
KRATOS_CATCH("");
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
NodalTwoStepVPStrategy &operator=(NodalTwoStepVPStrategy const &rOther) {}
/// Copy constructor.
NodalTwoStepVPStrategy(NodalTwoStepVPStrategy const &rOther) {}
///@}
}; /// Class NodalTwoStepVPStrategy
///@}
///@name Type Definitions
///@{
///@}
///@} // addtogroup
} // namespace Kratos.
#endif // KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_H
|
GB_binop__first_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__first_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__first_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__first_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__first_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__first_fp32)
// A*D function (colscale): GB (_AxD__first_fp32)
// D*A function (rowscale): GB (_DxB__first_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__first_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__first_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_fp32)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: float
// A type: float
// A pattern? 0
// B type: float
// B pattern? 1
// BinaryOp: cij = aij
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// true if values of B are not used
#define GB_B_IS_PATTERN \
1 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = x ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_FP32 || GxB_NO_FIRST_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__first_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__first_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__first_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__first_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__first_fp32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__first_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__first_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__first_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__first_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__first_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
GB_binop__lxor_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__lxor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__lxor_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_uint64)
// A*D function (colscale): GB (_AxD__lxor_uint64)
// D*A function (rowscale): GB (_DxB__lxor_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_uint64)
// C=scalar+B GB (_bind1st__lxor_uint64)
// C=scalar+B' GB (_bind1st_tran__lxor_uint64)
// C=A+scalar GB (_bind2nd__lxor_uint64)
// C=A'+scalar GB (_bind2nd_tran__lxor_uint64)
// C type: uint64_t
// A type: uint64_t
// A pattern? 0
// B type: uint64_t
// B pattern? 0
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) != (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_UINT64 || GxB_NO_LXOR_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lxor_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lxor_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
matrix.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M AAA TTTTT RRRR IIIII X X %
% MM MM A A T R R I X X %
% M M M AAAAA T RRRR I X %
% M M A A T R R I X X %
% M M A A T R R IIIII X X %
% %
% %
% MagickCore Matrix Methods %
% %
% Software Design %
% Cristy %
% August 2007 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image-private.h"
#include "MagickCore/matrix.h"
#include "MagickCore/matrix-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/utility.h"
/*
Typedef declaration.
*/
struct _MatrixInfo
{
CacheType
type;
size_t
columns,
rows,
stride;
MagickSizeType
length;
MagickBooleanType
mapped,
synchronize;
char
path[MagickPathExtent];
int
file;
void
*elements;
SemaphoreInfo
*semaphore;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMatrixInfo() allocates the ImageInfo structure.
%
% The format of the AcquireMatrixInfo method is:
%
% MatrixInfo *AcquireMatrixInfo(const size_t columns,const size_t rows,
% const size_t stride,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o columns: the matrix columns.
%
% o rows: the matrix rows.
%
% o stride: the matrix stride.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(SIGBUS)
static void MatrixSignalHandler(int status)
{
ThrowFatalException(CacheFatalError,"UnableToExtendMatrixCache");
}
#endif
static inline MagickOffsetType WriteMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX));
#else
count=pwrite(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PWRITE)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
static MagickBooleanType SetMatrixExtent(
MatrixInfo *magick_restrict matrix_info,MagickSizeType length)
{
MagickOffsetType
count,
extent,
offset;
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(matrix_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
return(MagickTrue);
extent=(MagickOffsetType) length-1;
count=WriteMatrixElements(matrix_info,extent,1,(const unsigned char *) "");
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (matrix_info->synchronize != MagickFalse)
(void) posix_fallocate(matrix_info->file,offset+1,extent-offset);
#endif
#if defined(SIGBUS)
(void) signal(SIGBUS,MatrixSignalHandler);
#endif
return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue);
}
MagickExport MatrixInfo *AcquireMatrixInfo(const size_t columns,
const size_t rows,const size_t stride,ExceptionInfo *exception)
{
char
*synchronize;
MagickBooleanType
status;
MatrixInfo
*matrix_info;
matrix_info=(MatrixInfo *) AcquireMagickMemory(sizeof(*matrix_info));
if (matrix_info == (MatrixInfo *) NULL)
return((MatrixInfo *) NULL);
(void) memset(matrix_info,0,sizeof(*matrix_info));
matrix_info->signature=MagickCoreSignature;
matrix_info->columns=columns;
matrix_info->rows=rows;
matrix_info->stride=stride;
matrix_info->semaphore=AcquireSemaphoreInfo();
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
matrix_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
matrix_info->length=(MagickSizeType) columns*rows*stride;
if (matrix_info->columns != (size_t) (matrix_info->length/rows/stride))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=MemoryCache;
status=AcquireMagickResource(AreaResource,matrix_info->length);
if ((status != MagickFalse) &&
(matrix_info->length == (MagickSizeType) ((size_t) matrix_info->length)))
{
status=AcquireMagickResource(MemoryResource,matrix_info->length);
if (status != MagickFalse)
{
matrix_info->mapped=MagickFalse;
matrix_info->elements=AcquireMagickMemory((size_t)
matrix_info->length);
if (matrix_info->elements == NULL)
{
matrix_info->mapped=MagickTrue;
matrix_info->elements=MapBlob(-1,IOMode,0,(size_t)
matrix_info->length);
}
if (matrix_info->elements == (unsigned short *) NULL)
RelinquishMagickResource(MemoryResource,matrix_info->length);
}
}
matrix_info->file=(-1);
if (matrix_info->elements == (unsigned short *) NULL)
{
status=AcquireMagickResource(DiskResource,matrix_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=DiskCache;
matrix_info->file=AcquireUniqueFileResource(matrix_info->path);
if (matrix_info->file == -1)
return(DestroyMatrixInfo(matrix_info));
status=AcquireMagickResource(MapResource,matrix_info->length);
if (status != MagickFalse)
{
status=SetMatrixExtent(matrix_info,matrix_info->length);
if (status != MagickFalse)
matrix_info->elements=(void *) MapBlob(matrix_info->file,IOMode,0,
(size_t) matrix_info->length);
if (matrix_info->elements != NULL)
matrix_info->type=MapCache;
else
RelinquishMagickResource(MapResource,matrix_info->length);
}
}
return(matrix_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMagickMatrix() allocates and returns a matrix in the form of an
% array of pointers to an array of doubles, with all values pre-set to zero.
%
% This used to generate the two dimensional matrix, and vectors required
% for the GaussJordanElimination() method below, solving some system of
% simultanious equations.
%
% The format of the AcquireMagickMatrix method is:
%
% double **AcquireMagickMatrix(const size_t number_rows,
% const size_t size)
%
% A description of each parameter follows:
%
% o number_rows: the number pointers for the array of pointers
% (first dimension).
%
% o size: the size of the array of doubles each pointer points to
% (second dimension).
%
*/
MagickExport double **AcquireMagickMatrix(const size_t number_rows,
const size_t size)
{
double
**matrix;
register ssize_t
i,
j;
matrix=(double **) AcquireQuantumMemory(number_rows,sizeof(*matrix));
if (matrix == (double **) NULL)
return((double **) NULL);
for (i=0; i < (ssize_t) number_rows; i++)
{
matrix[i]=(double *) AcquireQuantumMemory(size,sizeof(*matrix[i]));
if (matrix[i] == (double *) NULL)
{
for (j=0; j < i; j++)
matrix[j]=(double *) RelinquishMagickMemory(matrix[j]);
matrix=(double **) RelinquishMagickMemory(matrix);
return((double **) NULL);
}
for (j=0; j < (ssize_t) size; j++)
matrix[i][j]=0.0;
}
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyMatrixInfo() dereferences a matrix, deallocating memory associated
% with the matrix.
%
% The format of the DestroyImage method is:
%
% MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
LockSemaphoreInfo(matrix_info->semaphore);
switch (matrix_info->type)
{
case MemoryCache:
{
if (matrix_info->mapped == MagickFalse)
matrix_info->elements=RelinquishMagickMemory(matrix_info->elements);
else
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=(unsigned short *) NULL;
}
RelinquishMagickResource(MemoryResource,matrix_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=NULL;
RelinquishMagickResource(MapResource,matrix_info->length);
}
case DiskCache:
{
if (matrix_info->file != -1)
(void) close(matrix_info->file);
(void) RelinquishUniqueFileResource(matrix_info->path);
RelinquishMagickResource(DiskResource,matrix_info->length);
break;
}
default:
break;
}
UnlockSemaphoreInfo(matrix_info->semaphore);
RelinquishSemaphoreInfo(&matrix_info->semaphore);
return((MatrixInfo *) RelinquishMagickMemory(matrix_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G a u s s J o r d a n E l i m i n a t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussJordanElimination() returns a matrix in reduced row echelon form,
% while simultaneously reducing and thus solving the augumented results
% matrix.
%
% See also http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
% The format of the GaussJordanElimination method is:
%
% MagickBooleanType GaussJordanElimination(double **matrix,
% double **vectors,const size_t rank,const size_t number_vectors)
%
% A description of each parameter follows:
%
% o matrix: the matrix to be reduced, as an 'array of row pointers'.
%
% o vectors: the additional matrix argumenting the matrix for row reduction.
% Producing an 'array of column vectors'.
%
% o rank: The size of the matrix (both rows and columns).
% Also represents the number terms that need to be solved.
%
% o number_vectors: Number of vectors columns, argumenting the above matrix.
% Usally 1, but can be more for more complex equation solving.
%
% Note that the 'matrix' is given as a 'array of row pointers' of rank size.
% That is values can be assigned as matrix[row][column] where 'row' is
% typically the equation, and 'column' is the term of the equation.
% That is the matrix is in the form of a 'row first array'.
%
% However 'vectors' is a 'array of column pointers' which can have any number
% of columns, with each column array the same 'rank' size as 'matrix'.
%
% This allows for simpler handling of the results, especially is only one
% column 'vector' is all that is required to produce the desired solution.
%
% For example, the 'vectors' can consist of a pointer to a simple array of
% doubles. when only one set of simultanious equations is to be solved from
% the given set of coefficient weighted terms.
%
% double **matrix = AcquireMagickMatrix(8UL,8UL);
% double coefficents[8];
% ...
% GaussJordanElimination(matrix, &coefficents, 8UL, 1UL);
%
% However by specifing more 'columns' (as an 'array of vector columns',
% you can use this function to solve a set of 'separable' equations.
%
% For example a distortion function where u = U(x,y) v = V(x,y)
% And the functions U() and V() have separate coefficents, but are being
% generated from a common x,y->u,v data set.
%
% Another example is generation of a color gradient from a set of colors at
% specific coordients, such as a list x,y -> r,g,b,a.
%
% You can also use the 'vectors' to generate an inverse of the given 'matrix'
% though as a 'column first array' rather than a 'row first array'. For
% details see http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
*/
MagickPrivate MagickBooleanType GaussJordanElimination(double **matrix,
double **vectors,const size_t rank,const size_t number_vectors)
{
#define GaussJordanSwap(x,y) \
{ \
if ((x) != (y)) \
{ \
(x)+=(y); \
(y)=(x)-(y); \
(x)=(x)-(y); \
} \
}
double
max,
scale;
register ssize_t
i,
j,
k;
ssize_t
column,
*columns,
*pivots,
row,
*rows;
columns=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*columns));
rows=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*rows));
pivots=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*pivots));
if ((rows == (ssize_t *) NULL) || (columns == (ssize_t *) NULL) ||
(pivots == (ssize_t *) NULL))
{
if (pivots != (ssize_t *) NULL)
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
if (columns != (ssize_t *) NULL)
columns=(ssize_t *) RelinquishMagickMemory(columns);
if (rows != (ssize_t *) NULL)
rows=(ssize_t *) RelinquishMagickMemory(rows);
return(MagickFalse);
}
(void) memset(columns,0,rank*sizeof(*columns));
(void) memset(rows,0,rank*sizeof(*rows));
(void) memset(pivots,0,rank*sizeof(*pivots));
column=0;
row=0;
for (i=0; i < (ssize_t) rank; i++)
{
max=0.0;
for (j=0; j < (ssize_t) rank; j++)
if (pivots[j] != 1)
{
for (k=0; k < (ssize_t) rank; k++)
if (pivots[k] != 0)
{
if (pivots[k] > 1)
return(MagickFalse);
}
else
if (fabs(matrix[j][k]) >= max)
{
max=fabs(matrix[j][k]);
row=j;
column=k;
}
}
pivots[column]++;
if (row != column)
{
for (k=0; k < (ssize_t) rank; k++)
GaussJordanSwap(matrix[row][k],matrix[column][k]);
for (k=0; k < (ssize_t) number_vectors; k++)
GaussJordanSwap(vectors[k][row],vectors[k][column]);
}
rows[i]=row;
columns[i]=column;
if (matrix[column][column] == 0.0)
return(MagickFalse); /* sigularity */
scale=PerceptibleReciprocal(matrix[column][column]);
matrix[column][column]=1.0;
for (j=0; j < (ssize_t) rank; j++)
matrix[column][j]*=scale;
for (j=0; j < (ssize_t) number_vectors; j++)
vectors[j][column]*=scale;
for (j=0; j < (ssize_t) rank; j++)
if (j != column)
{
scale=matrix[j][column];
matrix[j][column]=0.0;
for (k=0; k < (ssize_t) rank; k++)
matrix[j][k]-=scale*matrix[column][k];
for (k=0; k < (ssize_t) number_vectors; k++)
vectors[k][j]-=scale*vectors[k][column];
}
}
for (j=(ssize_t) rank-1; j >= 0; j--)
if (columns[j] != rows[j])
for (i=0; i < (ssize_t) rank; i++)
GaussJordanSwap(matrix[i][rows[j]],matrix[i][columns[j]]);
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
rows=(ssize_t *) RelinquishMagickMemory(rows);
columns=(ssize_t *) RelinquishMagickMemory(columns);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x C o l u m n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixColumns() returns the number of columns in the matrix.
%
% The format of the GetMatrixColumns method is:
%
% size_t GetMatrixColumns(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixColumns(const MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
return(matrix_info->columns);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixElement() returns the specifed element in the matrix.
%
% The format of the GetMatrixElement method is:
%
% MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: return the matrix element in this buffer.
%
*/
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline MagickOffsetType ReadMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX));
#else
count=pread(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PREAD)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
MagickExport MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
i=(MagickOffsetType) EdgeY(y,matrix_info->rows)*matrix_info->columns+
EdgeX(x,matrix_info->columns);
if (matrix_info->type != DiskCache)
{
(void) memcpy(value,(unsigned char *) matrix_info->elements+i*
matrix_info->stride,matrix_info->stride);
return(MagickTrue);
}
count=ReadMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x R o w s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixRows() returns the number of rows in the matrix.
%
% The format of the GetMatrixRows method is:
%
% size_t GetMatrixRows(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixRows(const MatrixInfo *matrix_info)
{
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
return(matrix_info->rows);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ L e a s t S q u a r e s A d d T e r m s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LeastSquaresAddTerms() adds one set of terms and associate results to the
% given matrix and vectors for solving using least-squares function fitting.
%
% The format of the AcquireMagickMatrix method is:
%
% void LeastSquaresAddTerms(double **matrix,double **vectors,
% const double *terms,const double *results,const size_t rank,
% const size_t number_vectors);
%
% A description of each parameter follows:
%
% o matrix: the square matrix to add given terms/results to.
%
% o vectors: the result vectors to add terms/results to.
%
% o terms: the pre-calculated terms (without the unknown coefficent
% weights) that forms the equation being added.
%
% o results: the result(s) that should be generated from the given terms
% weighted by the yet-to-be-solved coefficents.
%
% o rank: the rank or size of the dimensions of the square matrix.
% Also the length of vectors, and number of terms being added.
%
% o number_vectors: Number of result vectors, and number or results being
% added. Also represents the number of separable systems of equations
% that is being solved.
%
% Example of use...
%
% 2 dimensional Affine Equations (which are separable)
% c0*x + c2*y + c4*1 => u
% c1*x + c3*y + c5*1 => v
%
% double **matrix = AcquireMagickMatrix(3UL,3UL);
% double **vectors = AcquireMagickMatrix(2UL,3UL);
% double terms[3], results[2];
% ...
% for each given x,y -> u,v
% terms[0] = x;
% terms[1] = y;
% terms[2] = 1;
% results[0] = u;
% results[1] = v;
% LeastSquaresAddTerms(matrix,vectors,terms,results,3UL,2UL);
% ...
% if ( GaussJordanElimination(matrix,vectors,3UL,2UL) ) {
% c0 = vectors[0][0];
% c2 = vectors[0][1];
% c4 = vectors[0][2];
% c1 = vectors[1][0];
% c3 = vectors[1][1];
% c5 = vectors[1][2];
% }
% else
% printf("Matrix unsolvable\n);
% RelinquishMagickMatrix(matrix,3UL);
% RelinquishMagickMatrix(vectors,2UL);
%
*/
MagickPrivate void LeastSquaresAddTerms(double **matrix,double **vectors,
const double *terms,const double *results,const size_t rank,
const size_t number_vectors)
{
register ssize_t
i,
j;
for (j=0; j < (ssize_t) rank; j++)
{
for (i=0; i < (ssize_t) rank; i++)
matrix[i][j]+=terms[i]*terms[j];
for (i=0; i < (ssize_t) number_vectors; i++)
vectors[i][j]+=results[i]*terms[j];
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a t r i x T o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MatrixToImage() returns a matrix as an image. The matrix elements must be
% of type double otherwise nonsense is returned.
%
% The format of the MatrixToImage method is:
%
% Image *MatrixToImage(const MatrixInfo *matrix_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MatrixToImage(const MatrixInfo *matrix_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
max_value,
min_value,
scale_factor;
Image
*image;
MagickBooleanType
status;
ssize_t
y;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (matrix_info->stride < sizeof(double))
return((Image *) NULL);
/*
Determine range of matrix.
*/
(void) GetMatrixElement(matrix_info,0,0,&min_value);
max_value=min_value;
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) matrix_info->columns; x++)
{
double
value;
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
if (value < min_value)
min_value=value;
else
if (value > max_value)
max_value=value;
}
}
if ((min_value == 0.0) && (max_value == 0.0))
scale_factor=0;
else
if (min_value == max_value)
{
scale_factor=(double) QuantumRange/min_value;
min_value=0;
}
else
scale_factor=(double) QuantumRange/(max_value-min_value);
/*
Convert matrix to image.
*/
image=AcquireImage((ImageInfo *) NULL,exception);
image->columns=matrix_info->columns;
image->rows=matrix_info->rows;
image->colorspace=GRAYColorspace;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
value;
register Quantum
*q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
value=scale_factor*(value-min_value);
*q=ClampToQuantum(value);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N u l l M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NullMatrix() sets all elements of the matrix to zero.
%
% The format of the memset method is:
%
% MagickBooleanType *NullMatrix(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MagickBooleanType NullMatrix(MatrixInfo *matrix_info)
{
register ssize_t
x;
ssize_t
count,
y;
unsigned char
value;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
if (matrix_info->type != DiskCache)
{
(void) memset(matrix_info->elements,0,(size_t)
matrix_info->length);
return(MagickTrue);
}
value=0;
(void) lseek(matrix_info->file,0,SEEK_SET);
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
for (x=0; x < (ssize_t) matrix_info->length; x++)
{
count=write(matrix_info->file,&value,sizeof(value));
if (count != (ssize_t) sizeof(value))
break;
}
if (x < (ssize_t) matrix_info->length)
break;
}
return(y < (ssize_t) matrix_info->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e l i n q u i s h M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RelinquishMagickMatrix() frees the previously acquired matrix (array of
% pointers to arrays of doubles).
%
% The format of the RelinquishMagickMatrix method is:
%
% double **RelinquishMagickMatrix(double **matrix,
% const size_t number_rows)
%
% A description of each parameter follows:
%
% o matrix: the matrix to relinquish
%
% o number_rows: the first dimension of the acquired matrix (number of
% pointers)
%
*/
MagickExport double **RelinquishMagickMatrix(double **matrix,
const size_t number_rows)
{
register ssize_t
i;
if (matrix == (double **) NULL )
return(matrix);
for (i=0; i < (ssize_t) number_rows; i++)
matrix[i]=(double *) RelinquishMagickMemory(matrix[i]);
matrix=(double **) RelinquishMagickMemory(matrix);
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetMatrixElement() sets the specifed element in the matrix.
%
% The format of the SetMatrixElement method is:
%
% MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: set the matrix element to this value.
%
*/
MagickExport MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,const void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
i=(MagickOffsetType) y*matrix_info->columns+x;
if ((i < 0) ||
((MagickSizeType) (i*matrix_info->stride) >= matrix_info->length))
return(MagickFalse);
if (matrix_info->type != DiskCache)
{
(void) memcpy((unsigned char *) matrix_info->elements+i*
matrix_info->stride,value,matrix_info->stride);
return(MagickTrue);
}
count=WriteMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
|
graph-diameter-parallel.c | #include <stdio.h>
#include <sys/time.h>
#include <stdint.h>
#include <stdlib.h>
#include <limits.h>
#define MAX 10000
#define NOT_CONNECTED (INT_MAX)
int diameter(int distance[MAX][MAX], int nodesCount);
int distance[MAX][MAX];
/* initialize all distances to */
void Initialize() {
for (int i = 0; i < MAX; ++i) {
for (int j = 0; j < MAX; ++j) {
distance[i][j] = NOT_CONNECTED;
}
distance[i][i] = 0;
}
}
uint64_t GetTimeStamp() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * (uint64_t) 1000000 + tv.tv_usec;
}
int main() {
/* number of nodes */
int nodeCount;
/* Number of edges */
int m;
Initialize();
/* get the node count */
if (scanf("%d", &nodeCount) < 1) {
fprintf(stderr, "Error reading node count\n");
exit(1);
}
if (nodeCount < 1 || nodeCount > MAX) {
fprintf(stderr, "Invalid number of nodes, %d. Must be 1 to %d\n",
nodeCount, MAX);
exit(1);
}
/* edge count */
if (scanf("%d", &m) < 1) {
fprintf(stderr, "Error reading edge count\n");
exit(1);
}
if (m < nodeCount - 1 || m > nodeCount * (nodeCount - 1)) {
fprintf(stderr, "Invalid number of edges, %d. Must be %d to %d\n",
m, nodeCount - 1, nodeCount * (nodeCount - 1));
exit(1);
}
while (m--) {
/* nodes - let the indexation begin from 0 */
int a, b;
/* edge weight */
int c;
if (scanf("%d %d %d", &a, &b, &c) < 3) {
fprintf(stderr, "Error reading edge\n");
exit(1);
}
if (a < 0 || a >= nodeCount || b < 0 || b >= nodeCount || c <= 0) {
fprintf(stderr, "Invalid edge: from %d to %d weight %d\n", a, b, c);
exit(1);
}
distance[a][b] = c;
}
uint64_t start = GetTimeStamp();
printf("Diameter %d\n", diameter(distance, nodeCount));
printf("Time: %ld us\n", (uint64_t) (GetTimeStamp() - start));
return 0;
}
/******************************************************************************/
/* Your changes here */
#include "omp.h"
#include "pthread.h"
#include "threadqueue.c"
void *floydsParallel();
void *findDiameter();
struct DistanceUpdateTask {
int i;
int j;
int value;
};
struct threadqueue *distanceUpdateQueue;
thread_queue_init(struct threadqueue *distanceUpdateQueue);
int distanceGlobal[MAX][MAX];
int nodesCountGlobal;
int nodesCountLocal;
#pragma omp threadprivate(nodesCountLocal)
int diameter(int distance[MAX][MAX], int nodesCount) {
nodesCountGlobal = nodesCount;
#pragma omp parallel for
for (int l = 0; l < nodesCount; ++l) {
for (int i = 0; i < nodesCount; ++i) {
distanceGlobal[l][i] = distance[l][i];
}
}
pthread_t thread_id;
pthread_create(&thread_id, NULL, floydsParallel, NULL);
pthread_join(thread_id, NULL);
int diameter = -1;
/* look for the most distant pair */
for (int i = 0; i < nodesCount; ++i) {
for (int j = 0; j < nodesCount; ++j) {
if (distance[i][j] != NOT_CONNECTED && diameter < distance[i][j]) {
diameter = distance[i][j];
}
}
}
return (diameter);
}
void *floydsParallel() {
nodesCountLocal = nodesCountGlobal;
#pragma omp parallel for copyin(nodesCountLocal)
for (int k = 0; k < nodesCountGlobal; ++k) {
for (int i = 0; i < nodesCountLocal; ++i) {
if (distance[i][k] != NOT_CONNECTED) {
for (int j = 0; j < nodesCountLocal; ++j) {
if (distance[k][j] != NOT_CONNECTED &&
(distance[i][j] == NOT_CONNECTED || distance[i][k] + distance[k][j] < distance[i][j])) {
// distance[i][j] = distance[i][k] + distance[k][j];
struct DistanceUpdateTask task;
task.i = i;
task.j = j;
task.value = distance[i][k] + distance[k][j];
thread_queue_add(distanceUpdateQueue, task, 0);
}
}
}
}
}
}
void *findDiameter() {
}
/* The following is the exact command used to compile this code */
/* g++ -O2 graph-diameter.cpp -o graph-diameter */
|
lcpi_logic.c | /*
* Copyright (c) 2011-2016 University of Texas at Austin. All rights reserved.
*
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* This file is part of PerfExpert.
*
* PerfExpert is free software: you can redistribute it and/or modify it under
* the terms of the The University of Texas at Austin Research License
*
* PerfExpert is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
* A PARTICULAR PURPOSE.
*
* Authors: Antonio Gomez-Iglesias, Leonardo Fialho and Ashay Rane
*
* $HEADER$
*/
#ifdef __cplusplus
extern "C" {
#endif
/* System standard headers */
#include <string.h>
/* Utility headers */
#include <matheval.h>
/* Modules headers */
#include "lcpi.h"
#include "lcpi_types.h"
/* PerfExpert common headers */
#include "common/perfexpert_alloc.h"
#include "common/perfexpert_constants.h"
#include "common/perfexpert_hash.h"
#include "common/perfexpert_list.h"
#include "common/perfexpert_output.h"
#include <omp.h>
#include <sched.h>
/* logic_lcpi_compute */
int logic_lcpi_compute(lcpi_profile_t *profile) {
lcpi_metric_t *h_lcpi = NULL, *l = NULL, *t = NULL;
lcpi_hotspot_t *h = NULL;
double *values = NULL;
lcpi_hound_t *hound_info;
char **names = NULL;
int count = 0, i = 0;
int mpi_tasks, threads, num_threads;
int task, thread, my_thread;
sqlite3 *db[MAX_THREADS];
OUTPUT_VERBOSE((4, "%s", _YELLOW("Calculating LCPI metrics")));
if (my_module_globals.hound_info==NULL) {
if (PERFEXPERT_SUCCESS != import_hound (my_module_globals.hound_info)) {
OUTPUT((_ERROR("importing hound")));
return PERFEXPERT_ERROR;
}
}
#pragma omp parallel
{
num_threads = omp_get_num_threads()%MAX_THREADS;
}
for (i = 0; i < num_threads; ++i) {
if (PERFEXPERT_SUCCESS != perfexpert_database_connect(&(db[i]),globals.dbfile)) {
OUTPUT(("ERROR creating DB number %d", i));
return PERFEXPERT_ERROR;
}
}
mpi_tasks = database_get_mpi_tasks();
threads = database_get_threads();
for (task = 0; task < mpi_tasks; task++) {
/* Iterate over all threads */
for (thread = 0; thread < threads; thread++) {
/* With this if statement we don't need a break at the end -> good for OpenMP */
if ((globals.output_mode != SERIAL_OUTPUT) || ((globals.output_mode == SERIAL_OUTPUT) && (task == 0) && (thread == 0))) {
//#pragma omp parallel private (h, task, thread, count, l, hound_info, values, h_lcpi, names, i, t) num_threads(num_threads)
/* For each LCPI definition... */
perfexpert_hash_iter_str(my_module_globals.metrics_by_name, l, t) {
/* Get the list of variables and their values */
evaluator_get_variables(l->expression, &names, &count);
if (count <= 0) {
continue;
}
/* For each hotspot in this profile... */
#pragma omp parallel private(i, h_lcpi, values, hound_info, my_thread, h) default(none) shared(profile, names, count, l, task, thread, my_module_globals, db) num_threads(num_threads)
{
#pragma omp single nowait
{
perfexpert_list_for(h, &(profile->hotspots), lcpi_hotspot_t) {
#pragma omp task
{
my_thread = omp_get_thread_num();
OUTPUT_VERBOSE((10, " %s (%s:%d@%s)", _YELLOW(h->name),
h->file, h->line, h->module->name));
PERFEXPERT_ALLOC(lcpi_metric_t, h_lcpi, sizeof(lcpi_metric_t));
strcpy(h_lcpi->name_md5, l->name_md5);
h_lcpi->expression = l->expression;
h_lcpi->value = l->value;
h_lcpi->name = l->name;
h_lcpi->mpi_task = task;
h_lcpi->thread_id = thread;
PERFEXPERT_ALLOC(double, values, (sizeof(double *) * count));
/* Iterate over all the events of each metric */
for (i = 0; i < count; i++) {
/* Check if the current event is in hound */
perfexpert_hash_find_str(my_module_globals.hound_info, perfexpert_md5_string(names[i]), hound_info);
if (hound_info) {
/* If in hound, use that value */
values[i] = hound_info->value;
OUTPUT_VERBOSE((10, " Found name %s = %g", names[i], values[i]));
} else {
/* If not in hound, look that event in the DB for the task and thread */
values[i] = database_get_event(db[my_thread], names[i], h->id, task, thread);
if (values[i] != -1.0) {
OUTPUT_VERBOSE((10, " [%d] Found name %s = %g", h->id, names[i], values[i]));
}
else {
/* Value not found */
values[i] = 0.0;
}
}
}
/* Evaluate the LCPI expression */
#pragma omp critical
{
h_lcpi->value = evaluator_evaluate(h_lcpi->expression, count,
names, values);
}
#pragma omp critical
{
/* Add the LCPI to the hotspot's list of LCPIs */
perfexpert_hash_add_str(h->metrics_by_name, name_md5, h_lcpi);
}
OUTPUT_VERBOSE((10, " %s (%d - %d) = [%g]", h_lcpi->name, h_lcpi->mpi_task, h_lcpi->thread_id, h_lcpi->value));
PERFEXPERT_DEALLOC(values);
} //task
}
} // single
} //parallel
// if it's serial (aggregated), we don't need to continue
//if (my_module_globals.output == SERIAL_OUTPUT)
// break;
}
// if it's serial (aggregated), we don't need to continue
//if (my_module_globals.output == SERIAL_OUTPUT)
// break;
}
} //thread
}//mpi
for (i = 0; i < num_threads; ++i) {
if (PERFEXPERT_SUCCESS != perfexpert_database_disconnect(db[i])) {
OUTPUT(("ERROR disconnecting DB number %d", i));
return PERFEXPERT_ERROR;
}
}
// PERFEXPERT_DEALLOC(db);
return PERFEXPERT_SUCCESS;
}
#ifdef __cplusplus
}
#endif
// EOF
|
GB_unop__log2_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__log2_fp32_fp32)
// op(A') function: GB (_unop_tran__log2_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = log2f (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = log2f (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = log2f (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG2 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__log2_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = log2f (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = log2f (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__log2_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 24;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_unaryop__minv_int16_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int16_bool
// op(A') function: GB_tran__minv_int16_bool
// C type: int16_t
// A type: bool
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 16)
#define GB_ATYPE \
bool
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 16) ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT16 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int16_bool
(
int16_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int16_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 4;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4));
ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(4*t2-Nz,4));t3<=min(min(min(floord(4*t2+Ny,4),floord(Nt+Ny-4,4)),floord(2*t1+Ny+1,4)),floord(4*t1-4*t2+Nz+Ny-1,4));t3++) {
for (t4=max(max(max(0,ceild(t1-1023,1024)),ceild(4*t2-Nz-2044,2048)),ceild(4*t3-Ny-2044,2048));t4<=min(min(min(min(floord(4*t2+Nx,2048),floord(4*t3+Nx,2048)),floord(Nt+Nx-4,2048)),floord(2*t1+Nx+1,2048)),floord(4*t1-4*t2+Nz+Nx-1,2048));t4++) {
for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),4*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),4*t3+2),2048*t4+2046),4*t1-4*t2+Nz+1);t5++) {
for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) {
lbv=max(2048*t4,t5+1);
ubv=min(2048*t4+2047,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_unaryop__ainv_bool_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_bool_uint8
// op(A') function: GB_tran__ainv_bool_uint8
// C type: bool
// A type: uint8_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_BOOL || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_bool_uint8
(
bool *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_bool_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
FastFourierTransform.h | //Copyright (c) 2019 Mauricio Kugler, Nagoya Institute of Technology
#ifndef FASTFOURIERTRANSFORMH
#define FASTFOURIERTRANSFORMH
#include <complex>
#include <cmath>
using namespace std;
class FastFourierTransform
{
private:
unsigned int N1,N2,N3;
unsigned int M1,M2,M3;
unsigned int K1,K2,K3;
float F1,F2,F3;
unsigned int *D1, *D2, *D3;
complex<float> *w1, *w2, *w3;
complex<float> *v1, *v2, *v3;
complex<float> **z1a;
complex<float> *y1;
complex<float> ***z2a, ***u2a;
complex<float> **z2b, **u2b;
complex<float> **y2;
complex<float> ****z3a, ****u3a, ****g3a;
complex<float> ***z3b, ***u3b, ***g3b;
complex<float> ***y3;
float *r1;
float **r2;
float ***r3;
void initFFT(unsigned int n1, unsigned int n2, unsigned int n3, unsigned int d);
void inline fft(complex<float> *x, complex<float> **z, unsigned int N, unsigned int M, unsigned int K, complex<float> *w, unsigned int *D);
void inline ifft(complex<float> *x, complex<float> **z, unsigned int N, unsigned int M, unsigned int K, complex<float> *v, unsigned int *D, float F);
public:
FastFourierTransform(unsigned int n1);
FastFourierTransform(unsigned int n1, unsigned int n2);
FastFourierTransform(unsigned int n1, unsigned int n2, unsigned int n3);
~FastFourierTransform();
complex<float> inline *fft1(float *x); //complex-conjugate results!
complex<float> inline **fft2(float **x); //complex-conjugate results!
complex<float> inline ***fft3(float ***x);
float inline *ifft1(complex<float> *x);
float inline **ifft2(complex<float> **x);
float inline ***ifft3(complex<float> ***x);
};
void inline FastFourierTransform::fft(complex<float> *x, complex<float> **z, unsigned int N, unsigned int M, unsigned int K, complex<float> *w, unsigned int *D)
{
for(unsigned int i=0;i<K;i++) {
unsigned int mask = 0xffffffff<<i;
for(unsigned int j=0;j<M;j++) {
z[i+1][j<<1] = z[i][j] + z[i][j+M];
z[i+1][(j<<1)+1] = w[j&mask]*(z[i][j] - z[i][j+M]);
}
}
for(unsigned int i=0;i<N;i++) {
unsigned int j = D[i];
x[i] = z[K][j];
}
}
void inline FastFourierTransform::ifft(complex<float> *x, complex<float> **z, unsigned int N, unsigned int M, unsigned int K, complex<float> *v, unsigned int *D, float F)
{
for(unsigned int i=0;i<K;i++) {
unsigned int mask = 0xffffffff<<i;
for(unsigned int j=0;j<M;j++) {
z[i+1][j<<1] = z[i][j] + z[i][j+M];
z[i+1][(j<<1)+1] = v[j&mask]*(z[i][j] - z[i][j+M]);
}
}
for(unsigned int i=0;i<N;i++) {
unsigned int j = D[i];
x[i] = z[K][j]*F;
}
}
complex<float> inline *FastFourierTransform::fft1(float *x)
{
for(unsigned int i=0;i<N1;i++) {
z1a[0][i] = complex<float>(x[i],0);
}
fft(y1,z1a,N1,M1,K1,w1,D1);
return(y1);
}
float inline *FastFourierTransform::ifft1(complex<float> *x)
{
for(unsigned int i=0;i<N1;i++) {
z1a[0][i] = x[i];
}
ifft(y1,z1a,N1,M1,K1,v1,D1,F1);
for(unsigned int i=0;i<N1;i++) {
r1[i] = y1[i].real();
}
return(r1);
}
complex<float> inline **FastFourierTransform::fft2(float **x)
{
#pragma omp parallel for
for(int i=0;i<(int)N1;i++) {
for(unsigned int j=0;j<N2;j++) {
z2a[i][0][j] = complex<float>(x[i][j],0);
}
fft(z2b[i],z2a[i],N2,M2,K2,w2,D2);
}
#pragma omp parallel for
for(int i=0;i<(int)N2;i++) {
for(unsigned int j=0;j<N1;j++) {
u2a[i][0][j] = z2b[j][i];
}
fft(u2b[i],u2a[i],N1,M1,K1,w1,D1);
}
#pragma omp parallel for
for(int i=0;i<(int)N1;i++) {
for(unsigned int j=0;j<N2;j++) {
y2[i][j] = u2b[j][i];
}
}
return(y2);
}
float inline **FastFourierTransform::ifft2(complex<float> **x)
{
#pragma omp parallel for
for(int i=0;i<(int)N1;i++) {
for(unsigned int j=0;j<N2;j++) {
z2a[i][0][j] = x[i][j];
}
ifft(z2b[i],z2a[i],N2,M2,K2,v2,D2,F2);
}
#pragma omp parallel for
for(int i=0;i<(int)N2;i++) {
for(unsigned int j=0;j<N1;j++) {
u2a[i][0][j] = z2b[j][i];
}
ifft(u2b[i],u2a[i],N1,M1,K1,v1,D1,F1);
}
#pragma omp parallel for
for(int i=0;i<(int)N1;i++) {
for(unsigned int j=0;j<N2;j++) {
r2[i][j] = u2b[j][i].real();
}
}
return(r2);
}
complex<float> inline ***FastFourierTransform::fft3(float ***x)
{
#pragma omp parallel for
for(int i=0;i<(int)N1;i++) {
for(unsigned int j=0;j<N2;j++) {
for(unsigned int k=0;k<N3;k++) {
z3a[i][j][0][k] = complex<float>(x[i][j][k],0);
}
fft(z3b[i][j],z3a[i][j],N3,M3,K3,w3,D3);
}
}
#pragma omp parallel for
for(int i=0;i<(int)N2;i++) {
for(unsigned int j=0;j<N3;j++) {
for(unsigned int k=0;k<N1;k++) {
u3a[i][j][0][k] = z3b[k][i][j];
}
fft(u3b[i][j],u3a[i][j],N1,M1,K1,w1,D1);
}
}
#pragma omp parallel for
for(int i=0;i<(int)N3;i++) {
for(unsigned int j=0;j<N1;j++) {
for(unsigned int k=0;k<N2;k++) {
g3a[i][j][0][k] = u3b[k][i][j];
}
fft(g3b[i][j],g3a[i][j],N2,M2,K2,w2,D2);
}
}
#pragma omp parallel for
for(int i=0;i<(int)N1;i++) {
for(unsigned int j=0;j<N2;j++) {
for(unsigned int k=0;k<N3;k++) {
y3[i][j][k] = g3b[k][i][j];
}
}
}
return(y3);
}
float inline ***FastFourierTransform::ifft3(complex<float> ***x)
{
#pragma omp parallel for
for(int i=0;i<(int)N1;i++) {
for(unsigned int j=0;j<N2;j++) {
for(unsigned int k=0;k<N3;k++) {
z3a[i][j][0][k] = x[i][j][k];
}
ifft(z3b[i][j],z3a[i][j],N3,M3,K3,v3,D3,F3);
}
}
#pragma omp parallel for
for(int i=0;i<(int)N2;i++) {
for(unsigned int j=0;j<N3;j++) {
for(unsigned int k=0;k<N1;k++) {
u3a[i][j][0][k] = z3b[k][i][j];
}
ifft(u3b[i][j],u3a[i][j],N1,M1,K1,v1,D1,F1);
}
}
#pragma omp parallel for
for(int i=0;i<(int)N3;i++) {
for(unsigned int j=0;j<N1;j++) {
for(unsigned int k=0;k<N2;k++) {
g3a[i][j][0][k] = u3b[k][i][j];
}
ifft(g3b[i][j],g3a[i][j],N2,M2,K2,v2,D2,F2);
}
}
#pragma omp parallel for
for(int i=0;i<(int)N1;i++) {
for(unsigned int j=0;j<N2;j++) {
for(unsigned int k=0;k<N3;k++) {
r3[i][j][k] = g3b[k][i][j].real();
}
}
}
return(r3);
}
#endif
|
axcrypt_fmt_plug.c | /* AxCrypt 1.x encrypted files cracker patch for JtR
* 2016 by Fist0urs <eddy.maaalou at gmail.com>.
*
* This software is Copyright (c) 2016, Fist0urs <eddy.maaalou at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted. */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_axcrypt;
#elif FMT_REGISTERS_H
john_register_one(&fmt_axcrypt);
#else
#include <string.h>
#include <stdint.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "dyna_salt.h"
#include "sha.h"
#include "aes.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "axcrypt"
#define FORMAT_NAME "AxCrypt"
#define FORMAT_TAG "$axcrypt$*"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "SHA1 AES 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125 /* actual max is 250 */
#define BINARY_SIZE 0
#define SALT_SIZE sizeof(struct custom_salt *)
#define BINARY_ALIGN MEM_ALIGN_NONE
#define SALT_ALIGN sizeof(struct custom_salt *)
/* constant value recommended by FIPS */
#define AES_WRAPPING_IV "\xA6\xA6\xA6\xA6\xA6\xA6\xA6\xA6"
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define PUT_64BITS_XOR_MSB(cp, value) ( \
(cp)[0] ^= (unsigned char)((value)), \
(cp)[1] ^= (unsigned char)((value) >> 8), \
(cp)[2] ^= (unsigned char)((value) >> 16), \
(cp)[3] ^= (unsigned char)((value) >> 24 ) )
static struct fmt_tests axcrypt_tests[] = {
/*
formats can be:
$axcrypt$*version*iterations*salt*wrappedkey
$axcrypt$*version*iterations*salt*wrappedkey*key-file
*/
{"$axcrypt$*1*1337*0fd9e7e2f907f480f8af162564f8f94b*af10c88878ba4e2c89b12586f93b7802453121ee702bc362", "Bab00nmoNCo|\\|2$inge"},
{"$axcrypt$*1*60000*7522aa07694d441e47f8faad8a8cb984*95e02b7ccbdc27c227a80d1307505d8b769e87b32f312aa1", "nuNuche<3rewshauv"},
{"$axcrypt$*1*31014*3408ae91dddc0b1750ed4223fd843364*1cc0f8fa8d89f44d284d0562ac7e93848c86ce9605907129", "tr0pO$phere5apointzero"},
/* axcrypt created key-file */
{"$axcrypt$*1*38574*ce4f58c1e85df1ea921df6d6c05439b4*3278c3c730f7887b1008e852e59997e2196710a5c6bc1813*66664a6b2074434a4520374d73592055626979204a6b755520736d6b4b20394e694a205548444320524578562065674b33202f42593d", "0v3rgo2|<fc!"},
/* custom key-file */
{"$axcrypt$*1*130885*8eb4d745f7ac3f7505bcf14e8ce7e3b4*5221a6e8277e90b0b4f16f7871fca02986fca55c0dec5e59*22486520646f65736e2774206c696b652047656f726765204d69636861656c3a20426f6f6f6f6f6f220d0a0d0a49206665656c20736f20756e737572650d0a417320492074616b6520796f75722068616e6420616e64206c65616420796f7520746f207468652062616e6365666c6f6f720d0a417320746865206d75736963207374617274732c20736f6d657468696e6720696e20796f757220657965730d0a43616c6c7320746f206d696e642074686520676f6c64656e2073637265656e0d0a416e6420616c6c206974277320736169642069732068690d0a0d0a49276d206e6576657220676f6e6e612064616e636520616761696e0d0a4775696c74792066656574206861766520676f74206e6f2072687974686d0d0a54686f7567682069742773206561737920746f2070726574656e640d0a49206b6e6f7720796f277265206e6f74206120666f6f6c0d0a0d0a53686f756c64277665206b6e6f776e20626574746572207468616e20746f206368656174206120667269656e640d0a416e6420776173746520746865206368616e636520746861742049277665206265656e20676976656e0d0a536f2049276d206e6576657220676f6e6e612064616e636520616761696e0d0a5468652077617920492064616e636564207769746820796f750d0a0d0a54696d652063616e206e65766572206d656e640d0a54686520636172656c657373207768697370657273206f66206120676f6f6420667269656e640d0a546f2074686520686561727420616e64206d696e640d0a49676e6f72616e6365206973206b696e640d0a54686572652773206e6f20636f6d666f727420696e207468652074727574680d0a5061696e20697320616c6c20796f75276c6c2066696e640d0a0d0a49276d206e6576657220676f6e6e612064616e636520616761696e0d0a4775696c74792066656574206861766520676f74206e6f2072687974686d0d0a54686f7567682069742773206561737920746f2070726574656e640d0a49206b6e6f7720796f75277265206e6f74206120666f6f6c0d0a0d0a492073686f756c64277665206b6e6f776e20626574746572207468616e20746f206368656174206120667269656e640d0a416e6420776173746520746865206368616e636520746861742049277665206265656e20676976656e0d0a536f2049276d206e6576657220676f6e6e612064616e636520616761696e0d0a5468652077617920492064616e636564207769746820796f750d0a0d0a4e6576657220776974686f757420796f7572206c6f76650d0a0d0a546f6e6967687420746865206d75736963207365656d7320736f206c6f75640d0a492077697368207468617420776520636f756c64206c6f736520746869732063726f77640d0a4d617962652069742773206265747465722074686973207761790d0a5765276420687572742065616368206f74686572207769746820746865207468696e677320776527642077616e7420746f207361790d0a0d0a576520636f756c642068617665206265656e20736f20676f6f6420746f6765746865720d0a576520636f756c642068617665206c6976656420746869732064616e636520666f72657665720d0a427574206e6f772077686f277320676f6e6e612064616e63652077697468206d650d0a506c6561736520737461790d0a0d0a416e642049276d206e6576657220676f6e6e612064616e636520616761696e0d0a4775696c74792066656574206861766520676f74206e6f2072687974686d0d0a54686f7567682069742773206561737920746f2070726574656e640d0a49206b6e6f7720796f75277265206e6f74206120666f6f6c0d0a0d0a53686f756c64277665206b6e6f776e20626574746572207468616e20746f206368656174206120667269656e640d0a416e6420776173746520746865206368616e636520746861742049277665206265656e20676976656e0d0a536f2049276d206e6576657220676f6e6e612064616e636520616761696e0d0a5468652077617920492064616e636564207769746820796f750d0a0d0a284e6f77207468617420796f7527726520676f6e6529204e6f77207468617420796f7527726520676f6e650d0a284e6f77207468617420796f7527726520676f6e65292057686174204920646964277320736f2077726f6e672c20736f2077726f6e670d0a5468617420796f752068616420746f206c65617665206d6520616c6f6e65", "careless whisper"},
{NULL}
};
static char (*saved_key) [PLAINTEXT_LENGTH + 1];
static int any_cracked, *cracked;
static size_t cracked_size;
static struct custom_salt {
dyna_salt dsalt;
int version;
uint32_t key_wrapping_rounds;
unsigned char salt[16];
unsigned char wrappedkey[24];
char* keyfile;
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = 1;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
any_cracked = 0;
cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt;
cracked = mem_calloc(cracked_size, 1);
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
char *ctcopy;
char *keeptr;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN; /* skip over "$axcrypt$*" */
if ((p = strtokm(ctcopy, "*")) == NULL) /* version */
goto err;
if (!isdec(p))
goto err;
if (!atoi(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iterations */
goto err;
if (!isdec(p))
goto err;
if (!atoi(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt */
goto err;
if (strlen(p) != 32 || !ishexlc(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* wrappedkey */
goto err;
if (strlen(p) != 48 || !ishexlc(p))
goto err;
/* optional key-file following */
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
int i;
static struct custom_salt cs;
static void *ptr;
cs.keyfile = NULL;
ctcopy += FORMAT_TAG_LEN; /* skip over "$axcrypt$*" */
p = strtokm(ctcopy, "*");
cs.version = atoi(p);
p = strtokm(NULL, "*");
cs.key_wrapping_rounds = (uint32_t) atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < 16; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
for (i = 0; i < 24; i++)
cs.wrappedkey[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
/* if key-file present */
if ((p = strtokm(NULL, "*")) != NULL){
cs.keyfile = (char*) mem_calloc_tiny(strlen(p)/2 + 1, sizeof(char));
for (i = 0; i < strlen(p)/2; i++)
cs.keyfile[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
}
MEM_FREE(keeptr);
cs.dsalt.salt_cmp_offset = SALT_CMP_OFF(struct custom_salt, salt);
cs.dsalt.salt_cmp_size = SALT_CMP_SIZE(struct custom_salt, salt, wrappedkey, 0);
cs.dsalt.salt_alloc_needs_free = 0;
ptr = mem_alloc_tiny(sizeof(struct custom_salt), MEM_ALIGN_WORD);
memcpy(ptr, &cs, sizeof(struct custom_salt));
return (void *) &ptr;
}
static void set_salt(void *salt)
{
cur_salt = *(struct custom_salt **) salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
/*
NUMBER_AES_BLOCKS = 2
AES_BLOCK_SIZE = 16
*/
unsigned char KEK[20], lsb[24], cipher[16];
AES_KEY akey;
SHA_CTX ctx;
int i, j, nb_iterations = cur_salt->key_wrapping_rounds;
SHA1_Init(&ctx);
SHA1_Update(&ctx, (unsigned char *) saved_key[index],
strlen(saved_key[index]));
/* if key-file provided */
if (cur_salt->keyfile != NULL)
SHA1_Update(&ctx, (unsigned char *) cur_salt->keyfile,
strlen(cur_salt->keyfile));
SHA1_Final( KEK, &ctx );
/* hash XOR salt => KEK */
for (i = 0; i < sizeof(cur_salt->salt); i++)
KEK[i] ^= cur_salt->salt[i];
memcpy(lsb, cur_salt->wrappedkey + 8, 16);
memset(&akey, 0, sizeof(AES_KEY));
AES_set_decrypt_key(KEK, 128, &akey);
/* set msb */
memcpy(cipher, cur_salt->wrappedkey, 8);
/* custom AES un-wrapping loop */
for (j = nb_iterations - 1; j >= 0; j--) {
/* 1st block treatment */
/* MSB XOR (NUMBER_AES_BLOCKS * j + i) */
PUT_64BITS_XOR_MSB(cipher, 2 * j + 2);
/* R[i] */
memcpy(cipher + 8, lsb + 8, 8);
/* AES_ECB(KEK, (MSB XOR (NUMBER_AES_BLOCKS * j + i)) | R[i]) */
AES_decrypt(cipher, cipher, &akey);
memcpy(lsb + 8, cipher + 8, 8);
/* 2nd block treatment */
PUT_64BITS_XOR_MSB(cipher, 2 * j + 1);
memcpy(cipher + 8, lsb, 8);
AES_decrypt(cipher, cipher, &akey);
memcpy(lsb, cipher + 8, 8);
}
if (!memcmp(cipher, AES_WRAPPING_IV, 8)) {
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return cracked[index];
}
static void axcrypt_set_key(char *key, int index)
{
int saved_len = strlen(key);
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_axcrypt =
{
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_DYNA_SALT | FMT_HUGE_INPUT,
{ NULL },
{ FORMAT_TAG },
axcrypt_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_dyna_salt_hash,
NULL,
set_salt,
axcrypt_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif
|
pf_fold.c | /*
* partiton function for single RNA secondary structures
*
* Simplified interfaces and backward compatibility
* wrappers
*
* Ivo L Hofacker + Ronny Lorenz
* Vienna RNA package
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
/*
* ###########################################
* # deprecated functions below #
*###########################################
*/
#ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <float.h> /* #defines FLT_MAX ... */
#include <limits.h>
#include "ViennaRNA/utils/basic.h"
#include "ViennaRNA/params/default.h"
#include "ViennaRNA/fold_vars.h"
#include "ViennaRNA/loops/all.h"
#include "ViennaRNA/gquad.h"
#include "ViennaRNA/constraints/hard.h"
#include "ViennaRNA/constraints/soft.h"
#include "ViennaRNA/mfe.h"
#include "ViennaRNA/part_func.h"
#ifdef _OPENMP
#include <omp.h>
#endif
/*
#################################
# GLOBAL VARIABLES #
#################################
*/
PUBLIC int st_back = 0;
/*
#################################
# PRIVATE VARIABLES #
#################################
*/
/* some backward compatibility stuff */
PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL;
PRIVATE int backward_compat = 0;
#ifdef _OPENMP
#pragma omp threadprivate(backward_compat_compound, backward_compat)
#endif
/*
#################################
# PRIVATE FUNCTION DECLARATIONS #
#################################
*/
PRIVATE float
wrap_pf_fold(const char *sequence,
char *structure,
vrna_exp_param_t *parameters,
int calculate_bppm,
int is_constrained,
int is_circular);
PRIVATE double
wrap_mean_bp_distance(FLT_OR_DBL *p,
int length,
int *index,
int turn);
/*
#################################
# BEGIN OF FUNCTION DEFINITIONS #
#################################
*/
PRIVATE double
wrap_mean_bp_distance(FLT_OR_DBL *p,
int length,
int *index,
int turn)
{
int i, j;
double d = 0.;
/* compute the mean base pair distance in the thermodynamic ensemble */
/* <d> = \sum_{a,b} p_a p_b d(S_a,S_b)
* this can be computed from the pair probs p_ij as
* <d> = \sum_{ij} p_{ij}(1-p_{ij}) */
for (i = 1; i <= length; i++)
for (j = i + turn + 1; j <= length; j++)
d += p[index[i] - j] * (1 - p[index[i] - j]);
return 2 * d;
}
PRIVATE float
wrap_pf_fold(const char *sequence,
char *structure,
vrna_exp_param_t *parameters,
int calculate_bppm,
int is_constrained,
int is_circular)
{
vrna_fold_compound_t *vc;
vrna_md_t md;
vc = NULL;
/* we need vrna_exp_param_t datastructure to correctly init default hard constraints */
if (parameters)
md = parameters->model_details;
else
set_model_details(&md); /* get global default parameters */
md.circ = is_circular;
md.compute_bpp = calculate_bppm;
vc = vrna_fold_compound(sequence, &md, VRNA_OPTION_DEFAULT);
/* prepare exp_params and set global pf_scale */
vc->exp_params = vrna_exp_params(&(vc->params->model_details));
vc->exp_params->pf_scale = pf_scale;
if (is_constrained && structure) {
unsigned int constraint_options = 0;
constraint_options |= VRNA_CONSTRAINT_DB
| VRNA_CONSTRAINT_DB_PIPE
| VRNA_CONSTRAINT_DB_DOT
| VRNA_CONSTRAINT_DB_X
| VRNA_CONSTRAINT_DB_ANG_BRACK
| VRNA_CONSTRAINT_DB_RND_BRACK;
vrna_constraints_add(vc, (const char *)structure, constraint_options);
}
if (backward_compat_compound && backward_compat)
vrna_fold_compound_free(backward_compat_compound);
backward_compat_compound = vc;
backward_compat = 1;
iindx = backward_compat_compound->iindx;
return vrna_pf(vc, structure);
}
PUBLIC vrna_ep_t *
stackProb(double cutoff)
{
if (!(backward_compat_compound && backward_compat)) {
vrna_message_warning("stackProb: "
"run pf_fold() first!");
return NULL;
} else if (!backward_compat_compound->exp_matrices->probs) {
vrna_message_warning("stackProb: "
"probs == NULL!");
return NULL;
}
return vrna_stack_prob(backward_compat_compound, cutoff);
}
PUBLIC char *
centroid(int length,
double *dist)
{
if (pr == NULL) {
vrna_message_warning("centroid: "
"pr == NULL. You need to call pf_fold() before centroid()");
return NULL;
}
return vrna_centroid_from_probs(length, dist, pr);
}
PUBLIC double
mean_bp_dist(int length)
{
/* compute the mean base pair distance in the thermodynamic ensemble */
/* <d> = \sum_{a,b} p_a p_b d(S_a,S_b)
* this can be computed from the pair probs p_ij as
* <d> = \sum_{ij} p_{ij}(1-p_{ij}) */
int i, j, *my_iindx;
double d = 0;
if (pr == NULL) {
vrna_message_warning("mean_bp_dist: "
"pr == NULL. You need to call pf_fold() before mean_bp_dist()");
return d;
}
my_iindx = vrna_idx_row_wise(length);
for (i = 1; i <= length; i++)
for (j = i + TURN + 1; j <= length; j++)
d += pr[my_iindx[i] - j] * (1 - pr[my_iindx[i] - j]);
free(my_iindx);
return 2 * d;
}
/* get the free energy of a subsequence from the q[] array */
PUBLIC double
get_subseq_F(int i,
int j)
{
if (backward_compat_compound) {
if (backward_compat_compound->exp_matrices) {
if (backward_compat_compound->exp_matrices->q) {
int *my_iindx = backward_compat_compound->iindx;
vrna_exp_param_t *pf_params = backward_compat_compound->exp_params;
FLT_OR_DBL *q = backward_compat_compound->exp_matrices->q;
return (-log(q[my_iindx[i] - j]) - (j - i + 1) * log(pf_params->pf_scale)) * pf_params->kT /
1000.0;
}
}
}
vrna_message_warning("get_subseq_F: "
"call pf_fold() to fill q[] array before calling get_subseq_F()");
return 0.; /* we will never get to this point */
}
/*----------------------------------------------------------------------*/
PUBLIC double
expHairpinEnergy(int u,
int type,
short si1,
short sj1,
const char *string)
{
/* compute Boltzmann weight of a hairpin loop, multiply by scale[u+2] */
vrna_exp_param_t *pf_params = backward_compat_compound->exp_params;
double q, kT;
kT = pf_params->kT; /* kT in cal/mol */
if (u <= 30)
q = pf_params->exphairpin[u];
else
q = pf_params->exphairpin[30] * exp(-(pf_params->lxc * log(u / 30.)) * 10. / kT);
if ((tetra_loop) && (u == 4)) {
char tl[7] = {
0
}, *ts;
strncpy(tl, string, 6);
if ((ts = strstr(pf_params->Tetraloops, tl)))
return pf_params->exptetra[(ts - pf_params->Tetraloops) / 7];
}
if ((tetra_loop) && (u == 6)) {
char tl[9] = {
0
}, *ts;
strncpy(tl, string, 6);
if ((ts = strstr(pf_params->Hexaloops, tl)))
return pf_params->exphex[(ts - pf_params->Hexaloops) / 9];
}
if (u == 3) {
char tl[6] = {
0
}, *ts;
strncpy(tl, string, 5);
if ((ts = strstr(pf_params->Triloops, tl)))
return pf_params->exptri[(ts - pf_params->Triloops) / 6];
if (type > 2)
q *= pf_params->expTermAU;
} else {
/* no mismatches for tri-loops */
q *= pf_params->expmismatchH[type][si1][sj1];
}
return q;
}
PUBLIC double
expLoopEnergy(int u1,
int u2,
int type,
int type2,
short si1,
short sj1,
short sp1,
short sq1)
{
/* compute Boltzmann weight of interior loop,
* multiply by scale[u1+u2+2] for scaling */
double z = 0;
int no_close = 0;
vrna_exp_param_t *pf_params = backward_compat_compound->exp_params;
if ((no_closingGU) && ((type2 == 3) || (type2 == 4) || (type == 2) || (type == 4)))
no_close = 1;
if ((u1 == 0) && (u2 == 0)) {
/* stack */
z = pf_params->expstack[type][type2];
} else if (no_close == 0) {
if ((u1 == 0) || (u2 == 0)) {
/* bulge */
int u;
u = (u1 == 0) ? u2 : u1;
z = pf_params->expbulge[u];
if (u2 + u1 == 1) {
z *= pf_params->expstack[type][type2];
} else {
if (type > 2)
z *= pf_params->expTermAU;
if (type2 > 2)
z *= pf_params->expTermAU;
}
} else {
/* interior loop */
if (u1 + u2 == 2) {
/* size 2 is special */
z = pf_params->expint11[type][type2][si1][sj1];
} else if ((u1 == 1) && (u2 == 2)) {
z = pf_params->expint21[type][type2][si1][sq1][sj1];
} else if ((u1 == 2) && (u2 == 1)) {
z = pf_params->expint21[type2][type][sq1][si1][sp1];
} else if ((u1 == 2) && (u2 == 2)) {
z = pf_params->expint22[type][type2][si1][sp1][sq1][sj1];
} else if (((u1 == 2) && (u2 == 3)) || ((u1 == 3) && (u2 == 2))) {
/*2-3 is special*/
z = pf_params->expinternal[5] *
pf_params->expmismatch23I[type][si1][sj1] *
pf_params->expmismatch23I[type2][sq1][sp1];
z *= pf_params->expninio[2][1];
} else if ((u1 == 1) || (u2 == 1)) {
/*1-n is special*/
z = pf_params->expinternal[u1 + u2] *
pf_params->expmismatch1nI[type][si1][sj1] *
pf_params->expmismatch1nI[type2][sq1][sp1];
z *= pf_params->expninio[2][abs(u1 - u2)];
} else {
z = pf_params->expinternal[u1 + u2] *
pf_params->expmismatchI[type][si1][sj1] *
pf_params->expmismatchI[type2][sq1][sp1];
z *= pf_params->expninio[2][abs(u1 - u2)];
}
}
}
return z;
}
PUBLIC void
init_pf_circ_fold(int length)
{
/* DO NOTHING */
}
PUBLIC void
init_pf_fold(int length)
{
/* DO NOTHING */
}
/**
*** Allocate memory for all matrices and other stuff
**/
PUBLIC void
free_pf_arrays(void)
{
if (backward_compat_compound && backward_compat) {
vrna_fold_compound_free(backward_compat_compound);
backward_compat_compound = NULL;
backward_compat = 0;
iindx = NULL;
}
}
PUBLIC FLT_OR_DBL *
export_bppm(void)
{
if (backward_compat_compound)
if (backward_compat_compound->exp_matrices)
if (backward_compat_compound->exp_matrices->probs)
return backward_compat_compound->exp_matrices->probs;
return NULL;
}
/*
* -------------------------------------------------------------------------
* make arrays used for pf_fold available to other routines
*/
PUBLIC int
get_pf_arrays(short **S_p,
short **S1_p,
char **ptype_p,
FLT_OR_DBL **qb_p,
FLT_OR_DBL **qm_p,
FLT_OR_DBL **q1k_p,
FLT_OR_DBL **qln_p)
{
if (backward_compat_compound) {
if (backward_compat_compound->exp_matrices) {
if (backward_compat_compound->exp_matrices->qb) {
*S_p = backward_compat_compound->sequence_encoding2;
*S1_p = backward_compat_compound->sequence_encoding;
*ptype_p = backward_compat_compound->ptype_pf_compat;
*qb_p = backward_compat_compound->exp_matrices->qb;
*qm_p = backward_compat_compound->exp_matrices->qm;
*q1k_p = backward_compat_compound->exp_matrices->q1k;
*qln_p = backward_compat_compound->exp_matrices->qln;
return 1;
}
}
}
return 0;
}
/*-----------------------------------------------------------------*/
PUBLIC float
pf_fold(const char *sequence,
char *structure)
{
return wrap_pf_fold(sequence, structure, NULL, do_backtrack, fold_constrained, 0);
}
PUBLIC float
pf_circ_fold(const char *sequence,
char *structure)
{
return wrap_pf_fold(sequence, structure, NULL, do_backtrack, fold_constrained, 1);
}
PUBLIC float
pf_fold_par(const char *sequence,
char *structure,
vrna_exp_param_t *parameters,
int calculate_bppm,
int is_constrained,
int is_circular)
{
return wrap_pf_fold(sequence, structure, parameters, calculate_bppm, is_constrained, is_circular);
}
PUBLIC char *
pbacktrack(char *seq)
{
int n = (int)strlen(seq);
return vrna_pbacktrack5(backward_compat_compound, n);
}
PUBLIC char *
pbacktrack5(char *seq,
int length)
{
/* the seq parameter must no differ to the one stored globally anyway, so we just ignore it */
return vrna_pbacktrack5(backward_compat_compound, length);
}
PUBLIC char *
pbacktrack_circ(char *seq)
{
char *structure;
vrna_md_t *md;
structure = NULL;
if (backward_compat_compound) {
md = &(backward_compat_compound->exp_params->model_details);
if (md->circ && backward_compat_compound->exp_matrices->qm2)
structure = vrna_pbacktrack(backward_compat_compound);
}
return structure;
}
PUBLIC void
update_pf_params(int length)
{
if (backward_compat_compound && backward_compat) {
vrna_md_t md;
set_model_details(&md);
vrna_exp_params_reset(backward_compat_compound, &md);
/* compatibility with RNAup, may be removed sometime */
pf_scale = backward_compat_compound->exp_params->pf_scale;
}
}
PUBLIC void
update_pf_params_par(int length,
vrna_exp_param_t *parameters)
{
if (backward_compat_compound && backward_compat) {
vrna_md_t md;
if (parameters) {
vrna_exp_params_subst(backward_compat_compound, parameters);
} else {
set_model_details(&md);
vrna_exp_params_reset(backward_compat_compound, &md);
}
/* compatibility with RNAup, may be removed sometime */
pf_scale = backward_compat_compound->exp_params->pf_scale;
}
}
PUBLIC char *
get_centroid_struct_gquad_pr(int length,
double *dist)
{
return vrna_centroid(backward_compat_compound, dist);
}
PUBLIC void
assign_plist_gquad_from_pr(vrna_ep_t **pl,
int length, /* ignored */
double cut_off)
{
if (!backward_compat_compound)
*pl = NULL;
else if (!backward_compat_compound->exp_matrices->probs)
*pl = NULL;
else
*pl = vrna_plist_from_probs(backward_compat_compound, cut_off);
}
PUBLIC double
mean_bp_distance(int length)
{
if (backward_compat_compound)
if (backward_compat_compound->exp_matrices)
if (backward_compat_compound->exp_matrices->probs)
return vrna_mean_bp_distance(backward_compat_compound);
vrna_message_warning("mean_bp_distance: "
"you need to call vrna_pf_fold first");
return 0.; /* we will never get to this point */
}
PUBLIC double
mean_bp_distance_pr(int length,
FLT_OR_DBL *p)
{
double d = 0;
int *index = vrna_idx_row_wise((unsigned int)length);
if (p == NULL) {
vrna_message_warning("mean_bp_distance_pr: "
"p == NULL. You need to supply a valid probability matrix for mean_bp_distance_pr()");
return d;
}
d = wrap_mean_bp_distance(p, length, index, TURN);
free(index);
return d;
}
#endif
|
lab-sheet-8-5.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
int turn=0;
int s1,s2;
int main(int argc, char *argv[])
{
srand ( time(NULL) );//randomize random function generator
/* sequential code create two threads*/
#pragma omp parallel num_threads(2)
{
//first process P0
#pragma parallel section
{int i,n=5;
if(omp_get_thread_num()==0){ //run only for P0
for(i=0;i<n;i++)
{s1=1;
while(s2==1)
{
if (turn!=0)
{
s1=0;
while(turn!=0);
s1=1;
}
}
//CR
printf("\nprocess %d other %d in CR count%d \n",s1,s2,i);
printf("\nprocess %d other %d in CR count%d \n",omp_get_thread_num(),turn,i);
turn=1;
s1=0;
}}
}
//second process P1
#pragma parallel section
{int i,n=5;
if(omp_get_thread_num()==1){ //run only for P1
for(i=0;i<n;i++){
s2=1;
while(s1==1)
{
if (turn!=1)
{
s2=0;
while(turn!=1);
s2=1;
}
}
//CR
printf("\nprocess %d other %d in CR count%d \n",s1,s2,i);
printf("\nprocess %d other %d in CR count%d \n",omp_get_thread_num(),turn,i);
turn=0;
s2=0; }}
}
}//end pragma
/* sequential code */
return 0;
} |
jacobi.c | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <omp.h>
static int N;
static int IT_MAXIMAS;
static int SEED;
static int verbose, threads;
static double CONVERGENCIA;
#define SEPARADOR "------------------------------------\n"
// Devuelve el tiempo en segundos
double get_timestamp();
// parsear los argumentos
void parse_arguments(int argc, char *argv[]);
// Ejecución secuencial
// Devuelve el numero de iteraciones
int run(double *A, double *b, double *x_old, double *x_new)
{
int itr;
int row, col;
double dot;
double dif;
double sqdif;
double *xtemp;
// Bucle hasta convergencia o iteraciones
itr = 0;
do
{
// Iteracion de jacobi
for (row = 0; row < N; row++)
{
dot = 0.0;
for (col = 0; col < N; col++)
{
if (row != col)
dot += A[row + col*N] * x_old[col];
}
x_new[row] = (b[row] - dot) / A[row + row*N];
}
// Intercambio de punteros
xtemp = x_old;
x_old = x_new;
x_new = xtemp;
// convergencia
sqdif = 0.0;
for (row = 0; row < N; row++)
{
dif = x_new[row] - x_old[row];
sqdif += dif * dif;
}
itr++;
} while ((itr < IT_MAXIMAS) && (sqrt(sqdif) > CONVERGENCIA));
return itr;
}
// Ejecución paralela
// Devuelve el numero de iteraciones
int run_parallel(double *A, double *b, double *x_old, double *x_new, int threads)
{
int itr;
int row, col;
double dot;
double dif;
double sqdif;
double *xtemp;
// Bucle hasta convergencia o iteraciones
itr = 0;
do
{
// Iteracion de jacobi
omp_set_num_threads(threads);
#pragma omp parallel private (row,col,dot)
{
#pragma omp for
for (row = 0; row < N; row++)
{
dot = 0.0;
for (col = 0; col < N; col++)
{
if (row != col)
dot += A[row + col*N] * x_old[col];
}
x_new[row] = (b[row] - dot) / A[row + row*N];
}
}
// Intercambio de punteros
xtemp = x_old;
x_old = x_new;
x_new = xtemp;
// convergencia
sqdif = 0.0;
for (row = 0; row < N; row++)
{
dif = x_new[row] - x_old[row];
sqdif += dif * dif;
}
itr++;
} while ((itr < IT_MAXIMAS) && (sqrt(sqdif) > CONVERGENCIA));
return itr;
}
int main(int argc, char *argv[])
{
// parsear los argumentos de entrada
parse_arguments(argc, argv);
double *A = malloc(N*N*sizeof(double));
double *b = malloc(N*sizeof(double));
double *x_old = malloc(N*sizeof(double));
double *x_new = malloc(N*sizeof(double));
if(verbose == 1){
printf(SEPARADOR);
printf("tamaño de matriz: %dx%d\n", N, N);
printf("iteraciones máximas: %d\n", IT_MAXIMAS);
printf("límite de convergencia: %lf\n", CONVERGENCIA);
printf(SEPARADOR);
}
double err;
double inicio_abs;
double final_abs;
double final_sol;
int itr;
double inicio_sol;
//Verbose = 0 -> no hay output
//Verbose = 1 -> todo el output
//Verbose = 2 -> output paralelo
//Verbose = 3 -> output solo secuencial
if(verbose == 1 || verbose == 3 || verbose == 0){
inicio_abs = get_timestamp();
// SECUENCIAL
// Inicializar datos
srand(SEED);
for (int row = 0; row < N; row++)
{
double rowsum = 0.0;
for (int col = 0; col < N; col++)
{
double value = rand()/(double)RAND_MAX;
A[row + col*N] = value;
rowsum += value;
}
// Asegurar que es diagonal dominante
A[row + row*N] += rowsum;
b[row] = rand()/(double)RAND_MAX;
x_old[row] = 0.0;
}
// ejecutar Jacobi
inicio_sol = get_timestamp();
itr = run(A, b, x_old, x_new);
final_sol = get_timestamp();
// comprobar error
err = 0.0;
for (int row = 0; row < N; row++)
{
double tmp = 0.0;
for (int col = 0; col < N; col++)
{
tmp += A[row + col*N] * x_old[col];
}
tmp = b[row] - tmp;
err += tmp*tmp;
}
err = sqrt(err);
final_abs = get_timestamp();
}
if(verbose == 1){
printf("SECUENCIAL\n");
printf("Error = %lf\n", err);
printf("Iteraciones = %d\n", itr);
printf("Tiempo total = %lf seconds\n", (final_abs-inicio_abs));
printf("Tiempo solución = %lf seconds\n", (final_sol-inicio_sol));
if (itr == IT_MAXIMAS)
printf("NO CONVERGENCIA\n");
printf(SEPARADOR);
}
if (verbose == 3) {
printf("size: \t %d \t iterations: \t %d \t threads: \t %d \t time: \t %lf \t seconds\n", (N*N), itr, threads, (final_sol-inicio_sol));
}
if(verbose == 1 || verbose == 2 || verbose == 0){
// PARALELO
// Inicializar datos
srand(SEED);
for (int row = 0; row < N; row++)
{
double rowsum = 0.0;
for (int col = 0; col < N; col++)
{
double value = rand()/(double)RAND_MAX;
A[row + col*N] = value;
rowsum += value;
}
// Asegurar que es diagonal dominante
A[row + row*N] += rowsum;
b[row] = rand()/(double)RAND_MAX;
x_old[row] = 0.0;
}
// Ejecutar jacobi paralelo
inicio_sol = get_timestamp();
itr = run_parallel(A, b, x_old, x_new, threads);
final_sol = get_timestamp();
// Comprobar error
err = 0.0;
for (int row = 0; row < N; row++)
{
double tmp = 0.0;
for (int col = 0; col < N; col++)
{
tmp += A[row + col*N] * x_old[col];
}
tmp = b[row] - tmp;
err += tmp*tmp;
}
err = sqrt(err);
final_abs = get_timestamp();
}
if(verbose == 1){
printf("PARALELO\n");
printf("Error = %lf\n", err);
printf("Iteraciones = %d\n", itr);
printf("Tiempo total = %lf seconds\n", (final_abs-inicio_abs));
printf("Tiempo solucion = %lf seconds\n", (final_sol-inicio_sol));
if (itr == IT_MAXIMAS)
printf("NO CONVERGENCIA\n");
printf(SEPARADOR);
}
if (verbose == 2) {
printf("size: \t %d \t iterations: \t %d \t threads: \t %d \t time: \t %lf \t seconds\n", (N*N), itr, threads, (final_sol-inicio_sol));
}
// Liberar memoria
free(A);
free(b);
free(x_old);
free(x_new);
return 0;
}
//Métodos entrada
double get_timestamp()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec + tv.tv_usec*1e-6;
}
int parse_int(const char *str)
{
char *next;
int value = strtoul(str, &next, 10);
return strlen(next) ? -1 : value;
}
double parse_double(const char *str)
{
char *next;
double value = strtod(str, &next);
return strlen(next) ? -1 : value;
}
void parse_arguments(int argc, char *argv[])
{
// Valores por defecto
N = 1000;
IT_MAXIMAS = 20000;
CONVERGENCIA = 0.0001;
SEED = 0;
verbose = 1;
threads = 4;
for (int i = 1; i < argc; i++)
{
if (!strcmp(argv[i], "--convergencia") || !strcmp(argv[i], "-c"))
{
if (++i >= argc || (CONVERGENCIA = parse_double(argv[i])) < 0)
{
printf("Límite de convergencia inválido\n");
exit(1);
}
}
else if (!strcmp(argv[i], "--iteraciones") || !strcmp(argv[i], "-i"))
{
if (++i >= argc || (IT_MAXIMAS = parse_int(argv[i])) < 0)
{
printf("Numero de iteraciones inválido\n");
exit(1);
}
}
else if (!strcmp(argv[i], "--norden") || !strcmp(argv[i], "-n"))
{
if (++i >= argc || (N = parse_int(argv[i])) < 0)
{
printf("orden de la matriz inválido\n");
exit(1);
}
}
else if (!strcmp(argv[i], "--seed") || !strcmp(argv[i], "-s"))
{
if (++i >= argc || (SEED = parse_int(argv[i])) < 0)
{
printf("Seed inválida\n");
exit(1);
}
}
else if (!strcmp(argv[i], "--verbose") || !strcmp(argv[i], "-v"))
{
if (++i >= argc || (verbose = parse_int(argv[i])) < 0)
{
printf("Verbose inválido\n");
exit(1);
}
}
else if (!strcmp(argv[i], "--threads") || !strcmp(argv[i], "-t"))
{
if (++i >= argc || (threads = parse_int(argv[i])) < 0)
{
printf("threads inválidos\n");
exit(1);
}
}
else if (!strcmp(argv[i], "--help") || !strcmp(argv[i], "-h"))
{
printf("\n");
printf("Uso: ./jacobi_omp [OPCIONES]\n\n");
printf("Options:\n");
printf(" -h --help Imprime este mensaje\n");
printf(" -c --convergencia C Elige límite de convergencia\n");
printf(" -i --iteraciones I Elige numero máximo de iteraciones\n");
printf(" -n --norden N Elige el orden de la matriz (n)\n");
printf(" -s --seed S Elige el numero del seed\n");
printf(" -v --verbose V Elige nivel de verbosidad\n");
printf(" -t --threads T Elige numero de hilos\n");
printf("\n");
exit(0);
}
else
{
printf("Argumento no reconocido '%s' (prueba '--help')\n", argv[i]);
exit(1);
}
}
} |
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 32;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
main.c | #include <stdio.h>
#include<omp.h>
#include <stdlib.h>
int main()
{
// Define the domain
double x_len = 2.0;
double y_len = 2.0;
int x_points = 351;
int y_points = 351;
double del_x = x_len/(x_points-1);
double del_y = y_len/(y_points-1);
double x[x_points], y[y_points];
#pragma omp parallel
{
#pragma omp for nowait
for(int i = 0; i < x_points; i++){
x[i] = i * del_x;
}
#pragma omp for
for(int j = 0; j < y_points; j++){
y[j] = j * del_y;
}
}
// printf("\n The x and y points are : \n \t");
// for(int i = 0; i < x_points; i++){
// for (int j = 0; j < y_points; j++){
// printf("%f; %f \n \t", x[i], y[j]);
// }
// }
double u[y_points][x_points];
double u_new[y_points][x_points];
#pragma omp parallel for
for(int i = 0; i < y_points; i++){
for(int j = 0; j < x_points; j++){
u[i][j] = 1.0;
u_new[i][j] = 1.0;
if(x[i] > 0.5 && x[i] < 1.0 && y[i] > 0.5 && y[i] < 1.0){
u[i][j] = 2.0;
u_new[i][j] = 2.0;
}
}
}
//
// printf("\n The initial velocity is : \n \t");
// for(int i = 0; i < y_points; i++){
// for(int j = 0; j < x_points; j++){
// printf("%f \t", u[i][j]);
// }
// }
// Define the parameters
int num_itrs = 700;
double c = 1.0;
double sigma = 0.2;
double del_t = sigma * del_x; // CFL criteria
// Iterations
double par_start_time = omp_get_wtime();
#pragma omp parallel
for (int itr = 0; itr < num_itrs; itr ++){
#pragma omp for nowait
for (int i = 1; i < y_points; i++){
for (int j = 1; j < x_points; j++){
u_new[i][j] = u[i][j] - (c * del_t/del_x * (u[i][j] - u[i][j-1])) - (c * del_t/del_y * (u[i][j] - u[i-1][j]));
}
}
#pragma omp for nowait
for (int i = 0; i < y_points; i++){
for (int j = 0; j < x_points; j++){
u[i][j] = u_new[i][j];
}
}
// Setting the boundary value
#pragma omp for nowait
for (int i = 0; i < y_points; i++){
u[i][0] = 1.0;
u[i][x_points-1] = 1.0;
}
#pragma omp for
for (int j = 0; j < x_points; j++){
u[0][j] = 1.0;
u[y_points-1][j] = 1.0;
}
}
double par_end_time = omp_get_wtime();
// printf("\n The final velocity is : \n \t");
// for(int i = 0; i < y_points; i++){
// for(int j = 0; j < x_points; j++){
// printf("%f \t", u[i][j]);
// }
// }
printf("\n Time taken for parallel computing is: %f \t", par_end_time - par_start_time);
// ------------------------------------------------------------------------- //
// Serial computing - for time comparison
// Reinitializing u velocity
for(int i = 0; i < y_points; i++){
for(int j = 0; j < x_points; j++){
u[i][j] = 1.0;
u_new[i][j] = 1.0;
if(x[i] > 0.5 && x[i] < 1.0 && y[i] > 0.5 && y[i] < 1.0){
u[i][j] = 2.0;
u_new[i][j] = 2.0;
}
}
}
// Iteration
double ser_start_time = omp_get_wtime();
for (int itr = 0; itr < num_itrs; itr ++){
for (int i = 1; i < y_points; i++){
for (int j = 1; j < x_points; j++){
u_new[i][j] = u[i][j] - (c * del_t/del_x * (u[i][j] - u[i][j-1])) - (c * del_t/del_y * (u[i][j] - u[i-1][j]));
}
}
for (int i = 0; i < y_points; i++){
for (int j = 0; j < x_points; j++){
u[i][j] = u_new[i][j];
}
}
// Setting the boundary value
for (int i = 0; i < y_points; i++){
u[i][0] = 1.0;
u[i][x_points-1] = 1.0;
}
for (int j = 0; j < x_points; j++){
u[0][j] = 1.0;
u[y_points-1][j] = 1.0;
}
}
double ser_end_time = omp_get_wtime();
// printf("\n The final velocity is : \n \t");
// for(int i = 0; i < y_points; i++){
// for(int j = 0; j < x_points; j++){
// printf("%f \t", u[i][j]);
// }
// }
printf("\n Time taken for serial computing is: %f \t", ser_end_time - ser_start_time);
printf("\n Speedup is : %f \t", (ser_end_time - ser_start_time)/(par_end_time - par_start_time));
return 0;
}
|
GB_unop__identity_int64_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int64_int32)
// op(A') function: GB (_unop_tran__identity_int64_int32)
// C type: int64_t
// A type: int32_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = (int64_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int64_int32)
(
int64_t *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int32_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int64_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_AxB_dot3_template.c | //------------------------------------------------------------------------------
// GB_AxB_dot3_template: C<M>=A'*B via dot products
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
#ifndef GB_DOT3
#define GB_DOT3
#endif
{
//--------------------------------------------------------------------------
// get M, A, B, and C
//--------------------------------------------------------------------------
const int64_t *GB_RESTRICT Cp = C->p ;
const int64_t *GB_RESTRICT Ch = C->h ;
int64_t *GB_RESTRICT Ci = C->i ;
GB_CTYPE *GB_RESTRICT Cx = C->x ;
const int64_t *GB_RESTRICT Bp = B->p ;
const int64_t *GB_RESTRICT Bh = B->h ;
const int64_t *GB_RESTRICT Bi = B->i ;
const GB_BTYPE *GB_RESTRICT Bx = B_is_pattern ? NULL : B->x ;
const int64_t bvlen = B->vlen ;
const int64_t bnvec = B->nvec ;
const bool B_is_hyper = B->is_hyper ;
const int64_t *GB_RESTRICT Mi = M->i ;
const GB_void *GB_RESTRICT Mx = (Mask_struct ? NULL : (M->x)) ;
const size_t msize = M->type->size ;
const int64_t *GB_RESTRICT Ah = A->h ;
const int64_t *GB_RESTRICT Ap = A->p ;
const int64_t *GB_RESTRICT Ai = A->i ;
const int64_t anvec = A->nvec ;
const bool A_is_hyper = GB_IS_HYPER (A) ;
const GB_ATYPE *GB_RESTRICT Ax = A_is_pattern ? NULL : A->x ;
//--------------------------------------------------------------------------
// C<M> = A'*B
//--------------------------------------------------------------------------
// C and M have the same pattern, except some entries of C may become
// zombies.
int64_t nzombies = 0 ;
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
int64_t kfirst = TaskList [taskid].kfirst ;
int64_t klast = TaskList [taskid].klast ;
int64_t pC_first = TaskList [taskid].pC ;
int64_t pC_last = TaskList [taskid].pC_end ;
int64_t task_nzombies = 0 ;
int64_t bpleft = 0 ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get C(:,k) and M(:k)
//------------------------------------------------------------------
int64_t j = (Ch == NULL) ? k : Ch [k] ;
int64_t pC_start, pC_end ;
if (k == kfirst)
{
// First vector for task; may only be partially owned.
pC_start = pC_first ;
pC_end = GB_IMIN (Cp [k+1], pC_last) ;
}
else if (k == klast)
{
// Last vector for task; may only be partially owned.
pC_start = Cp [k] ;
pC_end = pC_last ;
}
else
{
// task fully owns this vector C(:,k).
pC_start = Cp [k] ;
pC_end = Cp [k+1] ;
}
//------------------------------------------------------------------
// get B(:,j)
//------------------------------------------------------------------
int64_t pB_start, pB_end ;
GB_lookup (B_is_hyper, Bh, Bp, &bpleft, bnvec-1, j,
&pB_start, &pB_end) ;
int64_t bjnz = pB_end - pB_start ;
//------------------------------------------------------------------
// C(:,j)<M(:,j)> = A(:,i)'*B(:,j)
//------------------------------------------------------------------
if (bjnz == 0)
{
//--------------------------------------------------------------
// C(:,j) is empty if B(:,j) is empty
//--------------------------------------------------------------
task_nzombies += (pC_end - pC_start) ;
for (int64_t pC = pC_start ; pC < pC_end ; pC++)
{
// C(i,j) is a zombie
Ci [pC] = GB_FLIP (Mi [pC]) ;
}
}
else
{
//--------------------------------------------------------------
// B(:,j) not empty
//--------------------------------------------------------------
int64_t ib_first = Bi [pB_start] ;
int64_t ib_last = Bi [pB_end-1] ;
int64_t apleft = 0 ;
for (int64_t pC = pC_start ; pC < pC_end ; pC++)
{
//----------------------------------------------------------
// compute C(i,j)
//----------------------------------------------------------
// get the value of M(i,j)
int64_t i = Mi [pC] ;
if (GB_mcast (Mx, pC, msize)) // note: Mx [pC], same as Cx
{
//------------------------------------------------------
// M(i,j) is true, so compute C(i,j)
//------------------------------------------------------
// get A(:,i), if it exists
int64_t pA, pA_end ;
GB_lookup (A_is_hyper, Ah, Ap, &apleft, anvec-1, i,
&pA, &pA_end) ;
// C(i,j) = A(:,i)'*B(:,j)
#include "GB_AxB_dot_cij.c"
}
else
{
//------------------------------------------------------
// M(i,j) is false, so C(i,j) is a zombie
//------------------------------------------------------
task_nzombies++ ;
Ci [pC] = GB_FLIP (i) ;
}
}
}
}
//----------------------------------------------------------------------
// sum up the zombies found by this task
//----------------------------------------------------------------------
nzombies += task_nzombies ;
}
//--------------------------------------------------------------------------
// finalize the zombie count for C
//--------------------------------------------------------------------------
C->nzombies = nzombies ;
}
#undef GB_DOT3
|
walsh.c | /* Copyright 2014. The Regents of the University of California.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2013 Martin Uecker <uecker@eecs.berkeley.edu>
*/
#include <complex.h>
#include <math.h>
#include <assert.h>
#include "num/multind.h"
#include "num/flpmath.h"
#include "num/fft.h"
#include "num/linalg.h"
#include "misc/mri.h"
#include "misc/misc.h"
#include "misc/debug.h"
#include "calib/calmat.h"
#include "walsh.h"
void walsh(const long bsize[3], const long dims[DIMS], complex float* sens, const long caldims[DIMS], const complex float* data)
{
assert(1 == caldims[MAPS_DIM]);
assert(1 == dims[MAPS_DIM]);
int channels = caldims[COIL_DIM];
int cosize = channels * (channels + 1) / 2;
assert(dims[COIL_DIM] == cosize);
long dims1[DIMS];
md_copy_dims(DIMS, dims1, dims);
dims1[COIL_DIM] = channels;
long kdims[4];
kdims[0] = MIN(bsize[0], dims[0]);
kdims[1] = MIN(bsize[1], dims[1]);
kdims[2] = MIN(bsize[2], dims[2]);
md_resize_center(DIMS, dims1, sens, caldims, data, CFL_SIZE);
ifftc(DIMS, dims1, FFT_FLAGS, sens, sens);
long odims[DIMS];
md_copy_dims(DIMS, odims, dims1);
for (int i = 0; i < 3; i++)
odims[i] = dims[i] + kdims[i] - 1;
complex float* tmp = md_alloc(DIMS, odims, CFL_SIZE);
#if 0
md_resizec(DIMS, odims, tmp, dims1, sens, CFL_SIZE);
#else
long cen[DIMS] = { 0 };
for (int i = 0; i < 3; i++)
cen[i] = (odims[i] - dims[i] + 1) / 2;
complex float* tmp1 = md_alloc(DIMS, odims, CFL_SIZE);
md_circ_ext(DIMS, odims, tmp1, dims1, sens, CFL_SIZE);
// md_resize(DIMS, odims, tmp1, dims1, sens, CFL_SIZE);
md_circ_shift(DIMS, odims, cen, tmp, tmp1, CFL_SIZE);
md_free(tmp1);
#endif
long calmat_dims[2];
complex float* cm = calibration_matrix(calmat_dims, kdims, odims, tmp);
md_free(tmp);
int xh = dims[0];
int yh = dims[1];
int zh = dims[2];
int pixels = calmat_dims[1] / channels;
#pragma omp parallel for
for (int k = 0; k < zh; k++) {
complex float in[channels][pixels];
complex float cov[cosize];
for (int j = 0; j < yh; j++) {
for (int i = 0; i < xh; i++) {
for (int c = 0; c < channels; c++)
for (int p = 0; p < pixels; p++)
in[c][p] = cm[((((c * pixels + p) * zh) + k) * yh + j) * xh + i];
gram_matrix2(channels, cov, pixels, in);
for (int l = 0; l < cosize; l++)
sens[(((l * zh) + k) * yh + j) * xh + i] = cov[l];
}
}
}
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 16;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
pdsytrf_aasen.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/pzhetrf_aasen.c, normal z -> d, Fri Sep 28 17:38:12 2018
*
**/
#include <math.h>
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
#include <plasma_core_blas.h>
#define A(m, n) ((double*)plasma_tile_addr(A, (m), (n)))
#define L(m, n) ((double*)plasma_tile_addr(A, (m), (n)-1))
#define U(m, n) ((double*)plasma_tile_addr(A, (m)-1, (n)))
#define T(m, n) ((double*)(W4((m)+((m)-(n))*A.mt)))
#define Tgb(m, n) ((double*)plasma_tile_addr(T, (m), (n)))
// W(m): nb*nb used to compute T(k,k)
#define W(m) ((double*)plasma_tile_addr(W, (m), 0))
// W2(m): mt*(nb*nb) to store H
#define W2(m) ((double*)plasma_tile_addr(W2, (m), 0))
// W3(m): mt*(nb*nb) used to form T(k,k)
#define W3(m) ((double*)plasma_tile_addr(W3, (m), 0))
// W4(m): mt*(nb*nb) used to store off-diagonal T
#define W4(m) ((double*)plasma_tile_addr(W4, (m), 0))
// W5(m): wmt used to update L(:,k)
#define W5(m) ((double*)plasma_tile_addr(W5, (m), 0))
#define H(m, n) (uplo == PlasmaLower ? W2((m)) : W2((n)))
#define IPIV(i) (ipiv + (i)*(A.mb))
/***************************************************************************//**
* Parallel tile LDLt factorization.
* @see plasma_omp_dsytrf_aasen
******************************************************************************/
void plasma_pdsytrf_aasen(plasma_enum_t uplo,
plasma_desc_t A, int *ipiv,
plasma_desc_t T,
plasma_desc_t W,
plasma_sequence_t *sequence,
plasma_request_t *request)
{
// Return if failed sequence.
if (sequence->status != PlasmaSuccess)
return;
// Read parameters from the context.
plasma_context_t *plasma = plasma_context_self();
int ib = plasma->ib;
int wmt = W.mt-(1+4*A.mt);
// Creaet views for the workspaces
plasma_desc_t W2
= plasma_desc_view(W, A.mb, 0, A.mt*A.mb, A.nb);
plasma_desc_t W3
= plasma_desc_view(W, (1+1*A.mt)*A.mb, 0, A.mt*A.mb, A.nb);
plasma_desc_t W4
= plasma_desc_view(W, (1+2*A.mt)*A.mb, 0, 2*A.mt*A.mb, A.nb);
plasma_desc_t W5
= plasma_desc_view(W, (1+4*A.mt)*A.mb, 0, wmt*A.mb, A.nb);
//==============
// PlasmaLower
//==============
// NOTE: In old PLASMA, we used priority.
if (uplo == PlasmaLower) {
for (int k = 0; k < A.mt; k++) {
int nvak = plasma_tile_nview(A, k);
int mvak = plasma_tile_mview(A, k);
int ldak = plasma_tile_mmain(A, k);
int ldtk = plasma_tile_mmain(W4, k);
// -- computing offdiagonals H(1:k-1, k) -- //
for (int m=1; m < k; m++) {
int mvam = plasma_tile_mview(A, m);
int ldtm = plasma_tile_mmain(W4, m);
plasma_core_omp_dgemm(
PlasmaNoTrans, PlasmaConjTrans,
mvam, mvak, mvam,
1.0, T(m, m), ldtm,
L(k, m), ldak,
0.0, H(m, k), A.mb,
sequence, request);
if (m > 1) {
plasma_core_omp_dgemm(
PlasmaNoTrans, PlasmaConjTrans,
mvam, mvak, A.mb,
1.0, T(m, m-1), ldtm,
L(k, m-1), ldak,
1.0, H(m, k), A.mb,
sequence, request);
}
int mvamp1 = plasma_tile_mview(A, m+1);
int ldtmp1 = plasma_tile_mmain(W4, m+1);
plasma_core_omp_dgemm(
PlasmaConjTrans, PlasmaConjTrans,
mvam, mvak, mvamp1,
1.0, T(m+1, m), ldtmp1,
L(k, m+1), ldak,
1.0, H(m, k), A.mb,
sequence, request);
}
// ---- end of computing H(1:(k-1),k) -- //
// -- computing diagonal T(k, k) -- //
double beta;
if (k > 1) {
int num = k-1;
for (int m = 1; m < k; m++) {
int mvam = plasma_tile_mview(A, m);
int id = (m-1) % num;
if (m < num+1)
beta = 0.0;
else
beta = 1.0;
plasma_core_omp_dgemm(
PlasmaNoTrans, PlasmaNoTrans,
mvak, mvak, mvam,
-1.0, L(k, m), ldak,
H(m, k), A.mb,
beta, W3(id), A.mb,
sequence, request);
}
// all-reduce W3 using a binary tree //
// NOTE: Old PLASMA had an option to reduce in a set of tiles //
// num_players: number of players
int num_players = num;
// num_rounds : height of tournament
int num_rounds = ceil( log10((double)num_players)/log10(2.0) );
// base: intervals between brackets
int base = 2;
for (int round = 1; round <= num_rounds; round++) {
int num_brackets = num_players / 2; // number of brackets
for (int bracket = 0; bracket < num_brackets; bracket++) {
// first contender
int m1 = base*bracket;
// second contender
int m2 = m1+base/2;
plasma_core_omp_dgeadd(
PlasmaNoTrans, mvak, mvak,
1.0, W3(m2), A.mb,
1.0, W3(m1), A.mb,
sequence, request);
}
num_players = ceil( ((double)num_players)/2.0 );
base = 2*base;
}
plasma_core_omp_dlacpy(
PlasmaLower, PlasmaNoTrans,
mvak, mvak,
A(k, k), ldak,
T(k, k), ldtk,
sequence, request);
plasma_core_omp_dgeadd(
PlasmaNoTrans, mvak, mvak,
1.0, W3(0), A.mb,
1.0, T(k, k), ldtk,
sequence, request);
}
else { // k == 0 or 1
plasma_core_omp_dlacpy(
PlasmaLower, PlasmaNoTrans,
mvak, mvak,
A(k, k), ldak,
T(k, k), ldtk,
sequence, request);
// expanding to full matrix
plasma_core_omp_dlacpy(
PlasmaLower, PlasmaConjTrans,
mvak, mvak,
T(k, k), ldtk,
T(k, k), ldtk,
sequence, request);
}
if (k > 0) {
if (k > 1) {
plasma_core_omp_dgemm(
PlasmaNoTrans, PlasmaNoTrans,
mvak, A.mb, mvak,
1.0, L(k, k), ldak,
T(k, k-1), ldtk,
0.0, W(0), A.mb,
sequence, request);
plasma_core_omp_dgemm(
PlasmaNoTrans, PlasmaConjTrans,
mvak, mvak, A.mb,
-1.0, W(0), A.mb,
L(k, k-1), ldak,
1.0, T(k, k), ldtk,
sequence, request);
}
// - symmetrically solve with L(k,k) //
plasma_core_omp_dsygst(
1, PlasmaLower, mvak,
T(k, k), ldtk,
L(k, k), ldak,
sequence, request);
// expand to full matrix
plasma_core_omp_dlacpy(
PlasmaLower, PlasmaConjTrans,
mvak, mvak,
T(k, k), ldtk,
T(k, k), ldtk,
sequence, request);
}
// computing H(k, k) //
beta = 0.0;
if (k > 1) {
plasma_core_omp_dgemm(
PlasmaNoTrans, PlasmaConjTrans,
mvak, mvak, A.nb,
1.0, T(k, k-1), ldtk,
L(k, k-1), ldak,
0.0, H(k, k), A.mb,
sequence, request);
beta = 1.0;
}
// computing the (k+1)-th column of L //
if (k+1 < A.nt) {
int ldakp1 = plasma_tile_mmain(A, k+1);
if (k > 0) {
// finish computing H(k, k) //
plasma_core_omp_dgemm(
PlasmaNoTrans, PlasmaConjTrans,
mvak, mvak, mvak,
1.0, T(k, k), ldtk,
L(k, k), ldak,
beta, H(k, k), A.mb,
sequence, request);
// computing the (k+1)-th column of L //
// - update with the previous column
if (A.mt-k < plasma->max_threads && k > 0) {
int num = imin(k, wmt/(A.mt-k-1)); // workspace per row
for (int n = 1; n <= k; n++) {
// update A(k+1:mt-1, k) using L(:,n) //
double *a1, *a2, *b, *c;
int ma1 = (A.mt-k)*A.mb;
int ma2 = plasma_tile_mmain(A, A.mt-1);
int mc = (A.mt-(k+1))*A.mb;
int na = plasma_tile_nmain(A, k);
a1 = L(k, n); // we need only L(k+1:mt-1, n)
a2 = L(k, A.mt-1); // left-over tile
b = H(n, k);
c = W5(((n-1)%num)*(A.mt-k-1));
int mvan = plasma_tile_mview(A, n);
#pragma omp task depend(in:a1[0:ma1*na]) \
depend(in:a2[0:ma2*na]) \
depend(in:b[0:A.mb*mvak]) \
depend(inout:c[0:mc*A.mb])
{
for (int m = k+1; m < A.mt; m++) {
int mvam = plasma_tile_mview(A, m);
int ldam = plasma_tile_mmain(A, m);
int id = (m-k-1)+((n-1)%num)*(A.mt-k-1);
if (n < num+1)
beta = 0.0;
else
beta = 1.0;
#pragma omp task
{
plasma_core_dgemm(
PlasmaNoTrans, PlasmaNoTrans,
mvam, mvak, mvan,
-1.0, L(m, n), ldam,
H(n, k), A.mb,
beta, W5(id), A.mb);
}
}
#pragma omp taskwait
}
}
// accumerate within workspace using a binary tree
// num_players: number of players
int num_players = num;
// num_rounds: height of tournament
int num_rounds = ceil( log10((double)num_players)/log10(2.0) );
// base: intervals between brackets
int base = 2;
for (int round = 1; round <= num_rounds; round++) {
int num_brackets = num_players / 2; // number of brackets
for (int bracket = 0; bracket < num_brackets; bracket++) {
// first contender
int m1 = base*bracket;
// second contender
int m2 = m1+base/2;
double *c1, *c2;
int mc = (A.mt-(k+1))*A.mb;
c1 = W5(m1*(A.mt-k-1));
c2 = W5(m2*(A.mt-k-1));
#pragma omp task depend(in:c2[0:mc*A.mb]) \
depend(inout:c1[0:mc*A.mb])
{
for (int m = k+1; m < A.mt; m++) {
int mvam = plasma_tile_mview(A, m);
#pragma omp task
{
plasma_core_dgeadd(
PlasmaNoTrans, mvam, mvak,
1.0, W5((m-k-1)+m2*(A.mt-k-1)), A.mb,
1.0, W5((m-k-1)+m1*(A.mt-k-1)), A.mb);
}
}
#pragma omp taskwait
}
}
num_players = ceil( ((double)num_players)/2.0 );
base = 2*base;
}
// accumelate into A(k+1:mt-1, k)
{
double *c1;
double *c2_in;
double *c3_in;
double *c2_out;
int mc = (A.mt-(k+1))*A.mb;
int mc2_in = (A.mt-k)*A.mb;
int mc3_in = plasma_tile_mmain(A, A.mt-1);
int mc2_out = (A.mt-(k+1))*A.mb;
int nc2 = plasma_tile_nmain(A, k);
c1 = W5(0);
c2_in = A(k, k); // we write only A(k+1,k), but dependency from sym-swap
c3_in = A(A.mt-1, k); // left-over tile
c2_out = A(k+1, k);
#pragma omp task depend(in:c1[0:mc*A.mb]) \
depend(in:c2_in[0:mc2_in*nc2]) \
depend(in:c3_in[0:mc3_in*nc2]) \
depend(out:c2_out[0:mc2_out*nc2])
{
for (int m = k+1; m < A.mt; m++) {
int mvam = plasma_tile_mview(A, m);
int ldam = plasma_tile_mmain(A, m);
#pragma omp task
{
plasma_core_dgeadd(
PlasmaNoTrans, mvam, mvak,
1.0, W5(m-k-1), A.mb,
1.0, A(m, k), ldam);
}
}
#pragma omp taskwait
}
}
}
else {
for (int n = 1; n <= k; n++) {
// update L(:,k+1) using L(:,n) //
double *a1, *a2;
double *b;
double *c1_in, *c2_in;
double *c_out;
int ma1 = (A.mt-k)*A.mb;
int ma2 = plasma_tile_mmain(A, A.mt-1);
int mc1_in = (A.mt-k)*A.mb;
int mc2_in = plasma_tile_mmain(A, A.mt-1);
int mc_out = (A.mt-(k+1))*A.mb;
int na = plasma_tile_nmain(A, k);
int nc = plasma_tile_nmain(A, k);
a1 = L(k, n); // we read only L(k+1:mt-1, n), dependency from row-swap
a2 = L(A.mt-1, n); // left-over tile
b = H(n, k);
c1_in = A(k, k); // we write only A(k+1,k), but dependency from sym-swap
c2_in = A(A.mt-1, k); // left-over tile
c_out = A(k+1, k);
int mvan = plasma_tile_mview(A, n);
#pragma omp task depend(in:a1[0:ma1*na]) \
depend(in:a2[0:ma2*na]) \
depend(in:b[0:A.mb*mvak]) \
depend(in:c1_in[0:mc1_in*nc]) \
depend(in:c2_in[0:mc2_in*nc]) \
depend(out:c_out[0:mc_out*nc])
{
for (int m = k+1; m < A.mt; m++) {
int mvam = plasma_tile_mview(A, m);
int ldam = plasma_tile_mmain(A, m);
#pragma omp task
{
plasma_core_dgemm(
PlasmaNoTrans, PlasmaNoTrans,
mvam, mvak, mvan,
-1.0, L(m, n), ldam,
H(n, k), A.mb,
1.0, A(m, k), ldam);
}
}
#pragma omp taskwait
}
}
}
} // end of if (k > 0)
// ============================= //
// == PLASMA LU panel == //
// ============================= //
// -- compute LU of the panel -- //
double *a1, *a2;
a1 = L(k+1, k+1);
a2 = L(A.mt-1, k+1);
int mlkk = A.m - (k+1)*A.mb; // dimension
int ma1 = (A.mt-(k+1)-1)*A.mb;
int ma2 = plasma_tile_mmain(A, A.mt-1);
int na = plasma_tile_nmain(A, k);
int k1 = 1+(k+1)*A.nb;
int k2 = imin(mlkk, mvak)+(k+1)*A.nb;
int num_panel_threads = imin(plasma->max_panel_threads,
A.mt-(k+1));
#pragma omp task depend(inout:a1[0:ma1*na]) \
depend(inout:a2[0:ma2*na]) \
depend(out:ipiv[k1-1:k2])
{
volatile int *max_idx =
(int*)malloc(num_panel_threads*sizeof(int));
if (max_idx == NULL)
plasma_request_fail(sequence, request,
PlasmaErrorOutOfMemory);
volatile double *max_val =
(double*)malloc(num_panel_threads*sizeof(
double));
if (max_val == NULL)
plasma_request_fail(sequence, request,
PlasmaErrorOutOfMemory);
volatile int info = 0;
plasma_barrier_t barrier;
plasma_barrier_init(&barrier);
if (sequence->status == PlasmaSuccess) {
for (int rank = 0; rank < num_panel_threads; rank++) {
#pragma omp task shared(barrier)
{
plasma_desc_t view =
plasma_desc_view(A,
(k+1)*A.mb, k*A.nb,
mlkk, mvak);
plasma_core_dgetrf(view, IPIV(k+1), ib,
rank, num_panel_threads,
max_idx, max_val, &info,
&barrier);
if (info != 0)
plasma_request_fail(sequence, request,
(k+1)*A.mb+info);
}
}
}
#pragma omp taskwait
free((void*)max_idx);
free((void*)max_val);
{
for (int i = 0; i < imin(mlkk, mvak); i++) {
IPIV(k+1)[i] += (k+1)*A.mb;
}
}
}
// ============================== //
// == end of PLASMA LU panel == //
// ============================== //
// -- apply pivoting to previous columns of L -- //
for (int n = 1; n < k+1; n++) {
ma1 = (A.mt-k-1)*A.mb;
ma2 = plasma_tile_mmain(A, A.mt-1);
na = plasma_tile_nmain(A, n-1);
a1 = L(k+1, n);
a2 = L(A.mt-1, n);
#pragma omp task depend(in:ipiv[(k1-1):k2]) \
depend(inout:a1[0:ma1*na]) \
depend(inout:a2[0:ma2*na])
{
if (sequence->status == PlasmaSuccess) {
plasma_desc_t view =
plasma_desc_view(A, 0, (n-1)*A.nb, A.m, na);
plasma_core_dgeswp(PlasmaRowwise, view, k1, k2, ipiv, 1);
}
}
}
// computing T(k+1, k) //
int mvakp1 = plasma_tile_mview(A, k+1);
int ldak_n = plasma_tile_nmain(A, k);
int ldtkp1 = plasma_tile_mmain(W4, k+1);
// copy upper-triangular part of L(k+1,k+1) to T(k+1,k)
// and then zero it out
plasma_core_omp_dlacpy(
PlasmaUpper, PlasmaNoTrans,
mvakp1, mvak,
L(k+1, k+1), ldakp1,
T(k+1, k ), ldtkp1,
sequence, request);
plasma_core_omp_dlaset(
PlasmaUpper,
ldakp1, ldak_n, 0, 0,
mvakp1, mvak,
0.0, 1.0,
L(k+1, k+1));
if (k > 0) {
plasma_core_omp_dtrsm(
PlasmaRight, PlasmaLower,
PlasmaConjTrans, PlasmaUnit,
mvakp1, mvak,
1.0, L(k, k), ldak,
T(k+1, k), ldtkp1,
sequence, request);
}
// -- symmetrically apply pivoting to trailing A -- //
{
int mnt1, mnt2;
mnt2 = A.mt-1-(k+1);
ma2 = plasma_tile_mmain(A, A.mt-1);
mnt1 = (mnt2*(mnt2+1))/2; // # of remaining tiles in a1
mnt1 *= A.mb*A.mb;
mnt2 *= A.mb*ma2; // a2
mnt2 += ma2*ma2; // last diagonal
a1 = A(k+1, k+1);
a2 = A(A.mt-1, k+1);
int num_swap_threads = imin(plasma->max_panel_threads,
A.mt-(k+1));
#pragma omp task depend(in:ipiv[(k1-1):k2]) \
depend(inout:a1[0:mnt1]) \
depend(inout:a2[0:mnt2])
{
plasma_barrier_t barrier;
plasma_barrier_init(&barrier);
for (int rank = 0; rank < num_swap_threads; rank++) {
#pragma omp task shared(barrier)
{
plasma_core_dsyswp(rank, num_swap_threads, PlasmaLower, A, k1, k2, ipiv, 1, &barrier);
}
}
#pragma omp taskwait
}
}
}
// copy T(k+1, k) to T(k, k+1) for dgbtrf,
// forcing T(k, k) and T(k+1, k) tiles are ready
double *Tin10 = NULL;
double *Tin11 = NULL;
double *Tin21 = NULL;
double *Tout01 = NULL;
double *Tout11 = NULL;
double *Tout21 = NULL;
double *Tout = NULL;
int km1 = imax(0, k-1);
int kp1 = imin(k+1, A.mt-1);
int ldtkm1 = plasma_tile_mmain(W4, km1);
int ldtkp1 = plasma_tile_mmain(W4, kp1);
int nvakp1 = plasma_tile_nview(A, kp1);
Tin10 = T(k, km1);
Tin11 = T(k, k);
Tin21 = T(kp1, k);
Tout01 = Tgb(km1, k);
Tout11 = Tgb(k, k);
Tout21 = Tgb(kp1, k);
Tout = Tgb(imax(0, k-T.kut+1), k);
#pragma omp task depend(in:Tin10[0:A.mb*ldtk]) \
depend(in:Tin11[0:nvak*ldtk]) \
depend(in:Tin21[0:nvakp1*ldtkp1]) \
depend(out:Tout01[0:nvak*ldtkm1]) \
depend(out:Tout11[0:nvak*ldtk]) \
depend(out:Tout21[0:nvak*ldtkp1]) \
depend(out:Tout[0:nvak*ldtkp1])
{
plasma_core_dlacpy(
PlasmaGeneral, PlasmaNoTrans,
mvak, mvak,
T(k, k), ldtk,
Tgb(k, k), ldtk);
if (k+1 < T.nt) {
int mvakp1 = plasma_tile_mview(A, k+1);
plasma_core_dlacpy(
PlasmaGeneral, PlasmaNoTrans,
mvakp1, mvak,
T(kp1, k), ldtkp1,
Tgb(kp1, k), ldtkp1);
}
if (k > 0) {
int nvakm1 = plasma_tile_nview(A, k-1);
plasma_core_dlacpy(
PlasmaGeneral, PlasmaConjTrans,
mvak, nvakm1,
T(k, km1), ldtk,
Tgb(km1, k), ldtkm1);
}
}
}
}
//==============
// PlasmaUpper
//==============
else {
// TODO: Upper
}
}
|
omp_test.c | // OpenMP header
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char *argv[]) {
// Beginning of parallel region
#pragma omp parallel
{ printf("Hello World... from thread = %d\n", omp_get_thread_num()); }
// Ending of parallel region
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.