source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
qp_pixel.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <float.h>
#include "qpoint.h"
#include "fast_math.h"
#include "vec3.h"
#include "quaternion.h"
#include "chealpix.h"
/* Compute healpix pixel number for given nside and ra/dec */
long qp_radec2pix(qp_memory_t *mem, double ra, double dec, int nside) {
long pix;
if (mem->pix_order == QP_ORDER_NEST)
ang2pix_nest(nside, M_PI_2 - deg2rad(dec), deg2rad(ra), &pix);
else
ang2pix_ring(nside, M_PI_2 - deg2rad(dec), deg2rad(ra), &pix);
return pix;
}
void qp_radec2pixn(qp_memory_t *mem, double *ra, double *dec,
int nside, long *pix, int n) {
for (int ii = 0; ii < n; ii++) {
pix[ii] = qp_radec2pix(mem, ra[ii], dec[ii], nside);
}
}
void qp_init_gal(qp_memory_t *mem) {
if (mem->gal_init)
return;
/* galactic pole cf. sofa/g2icrs */
double gp_ra = 192.85948;
double gp_dec = 27.12825;
double gp_pa = 90 + 32.93192;
qp_radecpa2quat(mem, gp_ra, gp_dec, gp_pa, mem->q_gal);
Quaternion_copy(mem->q_gal_inv, mem->q_gal);
Quaternion_inv(mem->q_gal_inv);
mem->gal_init = 1;
}
void qp_radec2gal_quat(qp_memory_t *mem, quat_t q) {
qp_init_gal(mem);
Quaternion_mul_left(mem->q_gal_inv, q);
}
void qp_gal2radec_quat(qp_memory_t *mem, quat_t q) {
qp_init_gal(mem);
Quaternion_mul_left(mem->q_gal, q);
}
void qp_radec2gal_quatn(qp_memory_t *mem, quat_t *q, int n) {
for (int ii=0; ii<n; ii++) {
qp_radec2gal_quat(mem, q[ii]);
}
}
void qp_radec2gal(qp_memory_t *mem, double *ra, double *dec,
double *sin2psi, double *cos2psi) {
quat_t q;
qp_radec2quat(mem, *ra, *dec, *sin2psi, *cos2psi, q);
qp_radec2gal_quat(mem, q);
qp_quat2radec(mem, q, ra, dec, sin2psi, cos2psi);
}
void qp_radecpa2gal(qp_memory_t *mem, double *ra, double *dec,
double *pa) {
quat_t q;
qp_radecpa2quat(mem, *ra, *dec, *pa, q);
qp_radec2gal_quat(mem, q);
qp_quat2radecpa(mem, q, ra, dec, pa);
}
void qp_radec2galn(qp_memory_t *mem, double *ra, double *dec,
double *sin2psi, double *cos2psi, int n) {
for (int ii=0; ii<n; ii++) {
qp_radec2gal(mem, ra+ii, dec+ii, sin2psi+ii, cos2psi+ii);
}
}
void qp_radecpa2galn(qp_memory_t *mem, double *ra, double *dec,
double *pa, int n) {
for (int ii=0; ii<n; ii++) {
qp_radecpa2gal(mem, ra+ii, dec+ii, pa+ii);
}
}
void qp_gal2radec_quatn(qp_memory_t *mem, quat_t *q, int n) {
for (int ii=0; ii<n; ii++) {
qp_gal2radec_quat(mem, q[ii]);
}
}
void qp_gal2radec(qp_memory_t *mem, double *ra, double *dec,
double *sin2psi, double *cos2psi) {
quat_t q;
qp_radec2quat(mem, *ra, *dec, *sin2psi, *cos2psi, q);
qp_gal2radec_quat(mem, q);
qp_quat2radec(mem, q, ra, dec, sin2psi, cos2psi);
}
void qp_gal2radecpa(qp_memory_t *mem, double *ra, double *dec,
double *pa) {
quat_t q;
qp_radecpa2quat(mem, *ra, *dec, *pa, q);
qp_gal2radec_quat(mem, q);
qp_quat2radecpa(mem, q, ra, dec, pa);
}
void qp_gal2radecn(qp_memory_t *mem, double *ra, double *dec,
double *sin2psi, double *cos2psi, int n) {
for (int ii=0; ii<n; ii++) {
qp_gal2radec(mem, ra+ii, dec+ii, sin2psi+ii, cos2psi+ii);
}
}
void qp_gal2radecpan(qp_memory_t *mem, double *ra, double *dec,
double *pa, int n) {
for (int ii=0; ii<n; ii++) {
qp_gal2radecpa(mem, ra+ii, dec+ii, pa+ii);
}
}
void qp_rotate_map(qp_memory_t *mem, int nside,
double **map_in, const char coord_in,
double **map_out, const char coord_out) {
long npix = 12 * nside * nside;
long pix, ipix[4];
double ra, dec, sin2psi, cos2psi;
double t, q, u, weight[4];
/* check inputs */
if (!(coord_in == 'C' || coord_in == 'G'))
return;
if (!(coord_out == 'C' || coord_out == 'G'))
return;
if (coord_in == coord_out)
return;
/* initialize */
qp_init_gal(mem);
qp_pixinfo_t *pixinfo = qp_init_pixinfo(nside, 1);
#pragma omp parallel for private(pix, ipix, weight, ra, dec, sin2psi, cos2psi, t, q, u)
for (long ii=0; ii<npix; ii++) {
/* ra/dec of output pixel */
if (mem->pix_order == QP_ORDER_NEST)
pix2ang_nest(nside, ii, &dec, &ra);
else
pix2ang_ring(nside, ii, &dec, &ra);
dec = rad2deg(M_PI_2 - dec);
ra = rad2deg(ra);
sin2psi = 0;
cos2psi = 1;
/* find corresponding input pixel */
if (coord_in == 'C' && coord_out == 'G') {
qp_gal2radec(mem, &ra, &dec, &sin2psi, &cos2psi);
} else if (coord_in == 'G' && coord_out == 'C') {
qp_radec2gal(mem, &ra, &dec, &sin2psi, &cos2psi);
}
if (mem->interp_pix) {
qp_get_interpol(mem, pixinfo, ra, dec, ipix, weight);
/* rotate input pixel to output pixel */
t = q = u = 0;
for (int jj = 0; jj < 4; jj++) {
t += map_in[0][ipix[jj]] * weight[jj];
q += map_in[1][ipix[jj]] * weight[jj];
u += map_in[2][ipix[jj]] * weight[jj];
}
} else {
pix = qp_radec2pix(mem, ra, dec, nside);
t = map_in[0][pix];
q = map_in[1][pix];
u = map_in[2][pix];
}
if (t == 0 && q == 0 && u == 0) continue;
cos2psi = 1.;
sin2psi = 0.;
if (coord_in == 'C' && coord_out == 'G') {
qp_radec2gal(mem, &ra, &dec, &sin2psi, &cos2psi);
} else if (coord_in == 'G' && coord_out == 'C') {
qp_gal2radec(mem, &ra, &dec, &sin2psi, &cos2psi);
}
map_out[0][ii] = t;
map_out[1][ii] = q * cos2psi + u * sin2psi;
map_out[2][ii] = u * cos2psi - q * sin2psi;
}
qp_free_pixinfo(pixinfo);
}
/* Compute pixel number and pol angle given nside and quaternion */
void qp_quat2pix(qp_memory_t *mem, quat_t q, int nside, long *pix,
double *sin2psi, double *cos2psi) {
if (mem->fast_pix) {
vec3_t vec;
Quaternion_to_matrix_col3(q, vec);
if (mem->pix_order == QP_ORDER_NEST)
vec2pix_nest(nside, vec, pix);
else
vec2pix_ring(nside, vec, pix);
double cosb2 = (1 - vec[2] * vec[2]) / 4.;
double norm, cosg, sing;
if (cosb2 < DBL_EPSILON) {
if (vec[2] > 0) {
cosg = q[3] * q[3] - q[0] * q[0];
sing = 2 * q[0] * q[3];
} else {
cosg = q[1] * q[1] - q[2] * q[2];
sing = 2 * q[1] * q[2];
}
norm = 2 * cosg;
} else {
cosg = q[1] * q[3] - q[0] * q[2];
sing = q[0] * q[1] + q[2] * q[3];
norm = 2. * cosg / cosb2;
}
*sin2psi = norm * sing;
*cos2psi = norm * cosg - 1;
} else {
double ra, dec;
qp_quat2radec(mem, q, &ra, &dec, sin2psi, cos2psi);
*pix = qp_radec2pix(mem, ra, dec, nside);
}
}
void qp_quat2pixpa(qp_memory_t *mem, quat_t q, int nside, long *pix, double *pa) {
if (mem->fast_pix) {
vec3_t vec;
Quaternion_to_matrix_col3(q, vec);
if (mem->pix_order == QP_ORDER_NEST)
vec2pix_nest(nside, vec, pix);
else
vec2pix_ring(nside, vec, pix);
double cosb2 = (1 - vec[2] * vec[2]) / 4.;
double cosg, sing;
if (cosb2 < DBL_EPSILON) {
if (vec[2] > 0) {
cosg = q[3] * q[3] - q[0] * q[0];
sing = 2 * q[0] * q[3];
} else {
cosg = q[1] * q[1] - q[2] * q[2];
sing = 2 * q[1] * q[2];
}
} else {
cosg = q[1] * q[3] - q[0] * q[2];
sing = q[0] * q[1] + q[2] * q[3];
}
if (mem->fast_math) {
*pa = rad2deg(poly_atan2(sing, cosg));
} else {
*pa = rad2deg(atan2(sing, cosg));
}
} else {
double ra, dec;
qp_quat2radecpa(mem, q, &ra, &dec, pa);
*pix = qp_radec2pix(mem, ra, dec, nside);
}
}
void qp_quat2pixn(qp_memory_t *mem, quat_t *q, int nside, long *pix,
double *sin2psi, double *cos2psi, int n) {
for (int ii = 0; ii < n; ii++) {
qp_quat2pix(mem, q[ii], nside, pix+ii, sin2psi+ii, cos2psi+ii);
}
}
void qp_quat2pixpan(qp_memory_t *mem, quat_t *q, int nside, long *pix,
double *pa, int n) {
for (int ii = 0; ii < n; ii++) {
qp_quat2pixpa(mem, q[ii], nside, pix+ii, pa+ii);
}
}
void qp_bore2pix(qp_memory_t *mem, quat_t q_off, double *ctime, quat_t *q_bore,
int nside, long *pix, double *sin2psi, double *cos2psi, int n) {
quat_t q;
for (int ii = 0; ii < n; ii++) {
qp_bore2det(mem, q_off, ctime[ii], q_bore[ii], q);
qp_quat2pix(mem, q, nside, pix+ii, sin2psi+ii, cos2psi+ii);
}
}
void qp_bore2pixpa(qp_memory_t *mem, quat_t q_off, double *ctime, quat_t *q_bore,
int nside, long *pix, double *pa, int n) {
quat_t q;
for (int ii = 0; ii < n; ii++) {
qp_bore2det(mem, q_off, ctime[ii], q_bore[ii], q);
qp_quat2pixpa(mem, q, nside, pix+ii, pa+ii);
}
}
void qp_bore2pix_hwp(qp_memory_t *mem, quat_t q_off, double *ctime,
quat_t *q_bore, quat_t *q_hwp, int nside, long *pix,
double *sin2psi, double *cos2psi, int n) {
quat_t q;
for (int ii = 0; ii < n; ii++) {
qp_bore2det_hwp(mem, q_off, ctime[ii], q_bore[ii], q_hwp[ii], q);
qp_quat2pix(mem, q, nside, pix+ii, sin2psi+ii, cos2psi+ii);
}
}
void qp_bore2pixpa_hwp(qp_memory_t *mem, quat_t q_off, double *ctime,
quat_t *q_bore, quat_t *q_hwp, int nside, long *pix,
double *pa, int n) {
quat_t q;
for (int ii = 0; ii < n; ii++) {
qp_bore2det_hwp(mem, q_off, ctime[ii], q_bore[ii], q_hwp[ii], q);
qp_quat2pixpa(mem, q, nside, pix+ii, pa+ii);
}
}
void qp_pixel_offset(qp_memory_t *mem, int nside, long pix,
double ra, double dec, double *dtheta,
double *dphi) {
if (mem->pix_order == QP_ORDER_NEST)
pix2ang_nest(nside, pix, dtheta, dphi);
else
pix2ang_ring(nside, pix, dtheta, dphi);
*dtheta = M_PI_2 - deg2rad(dec) - *dtheta;
if (*dtheta < -M_PI_2) *dtheta += M_PI;
if (*dtheta > M_PI_2) *dtheta -= M_PI;
*dphi = deg2rad(ra) - *dphi;
if (*dphi < -M_PI) *dphi += M_TWOPI;
if (*dphi > M_PI) *dphi -= M_TWOPI;
}
/* copied get_interpol from healpix-cxx, because there is no C equivalent. */
void get_ring_info2(qp_pixinfo_t *pixinfo, long iring, long *startpix,
long *ringpix, double *theta, int *shifted) {
qp_ring_t *ring = pixinfo->rings + iring;
if (!ring->init) {
ring->idx = iring;
long northring = \
(iring > (2 * pixinfo->nside)) ? (4 * pixinfo->nside - iring) : iring;
if (northring < pixinfo->nside) {
double tmp = northring * northring * pixinfo->fact2;
double costheta = 1 - tmp;
double sintheta = sqrt(tmp * (2 - tmp));
ring->theta = atan2(sintheta, costheta);
ring->ringpix = 4 * northring;
ring->shifted = 1;
ring->startpix = 2 * northring * (northring - 1);
} else {
ring->theta = acos((2 * pixinfo->nside - northring) * pixinfo->fact1);
ring->ringpix = 4 * pixinfo->nside;
ring->shifted = (((northring - pixinfo->nside) & 1) == 0);
ring->startpix = pixinfo->ncap + (northring - pixinfo->nside) * ring->ringpix;
}
if (northring != iring) {
ring->theta = M_PI - ring->theta;
ring->startpix = pixinfo->npix - ring->startpix - ring->ringpix;
}
ring->init = 1;
}
if (theta != NULL)
*theta = ring->theta;
if (ringpix != NULL)
*ringpix = ring->ringpix;
if (shifted != NULL)
*shifted = ring->shifted;
if (startpix != NULL)
*startpix = ring->startpix;
}
qp_pixinfo_t * qp_init_pixinfo(size_t nside, int populate) {
qp_pixinfo_t *pixinfo = malloc(sizeof(*pixinfo));
pixinfo->nside = nside;
pixinfo->npface = pixinfo->nside * pixinfo->nside;
pixinfo->npix = 12 * pixinfo->npface;
pixinfo->ncap = (pixinfo->npface - pixinfo->nside) << 1;
pixinfo->fact2 = 4. / pixinfo->npix;
pixinfo->fact1 = (pixinfo->nside << 1) * pixinfo->fact2;
pixinfo->rings = calloc(4 * pixinfo->nside, sizeof(qp_ring_t));
pixinfo->rings_init = QP_ARR_MALLOC_1D;
pixinfo->init = QP_STRUCT_INIT | QP_STRUCT_MALLOC;
if (populate) {
for (int iring = 0; iring < 4 * pixinfo->nside; iring++)
get_ring_info2(pixinfo, iring, NULL, NULL, NULL, NULL);
}
return pixinfo;
}
void qp_free_pixinfo(qp_pixinfo_t *pixinfo) {
if (pixinfo->rings_init & QP_ARR_MALLOC_1D)
free(pixinfo->rings);
if (pixinfo->init & QP_STRUCT_MALLOC)
free(pixinfo);
else
memset(pixinfo, 0, sizeof(*pixinfo));
}
int get_interpol_ring(qp_pixinfo_t *pixinfo, double theta, double phi,
long pix[4], double weight[4]) {
if (theta < 0 || theta > M_PI)
return -1;
int nside = pixinfo->nside;
long npix = pixinfo->npix;
double z = cos(theta);
double az = fabs(z);
long ir1;
if (az < 2. / 3.)
ir1 = nside * (2 - 1.5*z);
else {
ir1 = nside * sqrt(3 * (1 - az));
if (z <= 0)
ir1 = 4 * nside - ir1 - 1;
}
long ir2 = ir1 + 1;
double theta1=0, theta2=0, w1, tmp, dphi;
long sp, nr;
int shift;
long i1, i2;
if (ir1 > 0) {
get_ring_info2(pixinfo, ir1, &sp, &nr, &theta1, &shift);
dphi = M_TWOPI / nr;
tmp = phi / dphi - 0.5 * shift;
i1 = (tmp < 0) ? tmp - 1 : tmp;
w1 = (phi - (i1 + 0.5 * shift) * dphi) / dphi;
if (i1 < 0) i1 += nr;
i2 = i1 + 1;
if (i2 >= nr) i2 -= nr;
pix[0] = sp + i1;
pix[1] = sp + i2;
weight[0] = 1 - w1;
weight[1] = w1;
}
if (ir2 < (4 * nside)) {
get_ring_info2(pixinfo, ir2, &sp, &nr, &theta2, &shift);
dphi = M_TWOPI / nr;
tmp = phi / dphi - 0.5 * shift;
i1 = (tmp < 0) ? tmp - 1 : tmp;
w1 = (phi - (i1 + 0.5 * shift) * dphi) / dphi;
if (i1 < 0) i1 += nr;
i2 = i1 + 1;
if (i2 >= nr) i2 -= nr;
pix[2] = sp + i1;
pix[3] = sp + i2;
weight[2] = 1 - w1;
weight[3] = w1;
}
if (ir1 == 0) {
double wtheta = theta / theta2;
weight[2] *= wtheta;
weight[3] *= wtheta;
double fac = (1 - wtheta) * 0.25;
weight[0] = fac;
weight[1] = fac;
weight[2] += fac;
weight[3] += fac;
pix[0] = (pix[2] + 2) & 3;
pix[1] = (pix[3] + 2) & 3;
} else if (ir2 == 4 * nside) {
double wtheta = (theta - theta1)/(M_PI - theta1);
weight[0] *= (1 - wtheta);
weight[1] *= (1 - wtheta);
double fac = wtheta * 0.25;
weight[0] += fac;
weight[1] += fac;
weight[2] = fac;
weight[3] = fac;
pix[2] = ((pix[0] + 2) & 3) + npix - 4;
pix[3] = ((pix[1] + 2) & 3) + npix - 4;
} else {
double wtheta = (theta - theta1) / (theta2 - theta1);
weight[0] *= (1 - wtheta);
weight[1] *= (1 - wtheta);
weight[2] *= wtheta;
weight[3] *= wtheta;
}
return 0;
}
int get_interpol_nest(qp_pixinfo_t *pixinfo, double theta, double phi,
long pix[4], double weight[4]) {
if (get_interpol_ring(pixinfo, theta, phi, pix, weight))
return -1;
for (int ii = 0; ii < 4; ii++) {
ring2nest(pixinfo->nside, pix[ii], pix + ii);
}
return 0;
}
double get_interp_val_ring(qp_pixinfo_t *pixinfo, double *map,
double theta, double phi) {
long pix[4];
double weight[4];
get_interpol_ring(pixinfo, theta, phi, pix, weight);
double val = 0;
for (int ii = 0; ii < 4; ii++)
val += map[pix[ii]] * weight[ii];
return val;
}
double get_interp_val_nest(qp_pixinfo_t *pixinfo, double *map,
double theta, double phi) {
long pix[4];
double weight[4];
get_interpol_nest(pixinfo, theta, phi, pix, weight);
double val = 0;
for (int ii = 0; ii < 4; ii++)
val += map[pix[ii]] * weight[ii];
return val;
}
void qp_get_interpol(qp_memory_t *mem, qp_pixinfo_t *pixinfo, double ra,
double dec, long pix[4], double weight[4]) {
if (mem->pix_order == QP_ORDER_RING) {
get_interpol_ring(pixinfo, M_PI_2 - deg2rad(dec), deg2rad(ra),
pix, weight);
} else {
get_interpol_nest(pixinfo, M_PI_2 - deg2rad(dec), deg2rad(ra),
pix, weight);
}
}
double qp_get_interp_val(qp_memory_t *mem, qp_pixinfo_t *pixinfo, double *map,
double ra, double dec) {
if (mem->pix_order == QP_ORDER_RING) {
return get_interp_val_ring(pixinfo, map, M_PI_2 - deg2rad(dec),
deg2rad(ra));
} else {
return get_interp_val_nest(pixinfo, map, M_PI_2 - deg2rad(dec),
deg2rad(ra));
}
}
void qp_get_interp_valn(qp_memory_t *mem, int nside, double *map, double *ra,
double *dec, double *val, int n) {
qp_pixinfo_t *pixinfo = qp_init_pixinfo(nside, 0);
for (int ii = 0; ii < n; ii++) {
val[ii] = qp_get_interp_val(mem, pixinfo, map, ra[ii], dec[ii]);
}
qp_free_pixinfo(pixinfo);
}
|
colorspace.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE %
% C O O L O O R R SS P P A A C E %
% C O O L O O RRRR SSS PPPP AAAAA C EEE %
% C O O L O O R R SS P A A C E %
% CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE %
% %
% %
% MagickCore Image Colorspace Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/enhance.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/gem.h"
#include "magick/gem-private.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/utility.h"
/*
Typedef declarations.
*/
typedef struct _TransformPacket
{
MagickRealType
x,
y,
z;
} TransformPacket;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C o l o r s p a c e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageColorspaceType() returns the potential colorspace of image:
% sRGBColorspaceType, RGBColorspaceType, GRAYColorspaceType, etc.
%
% To ensure the image type matches its potential, use SetImageColorspaceType():
%
% (void) SetImageColorspaceType(image,GetImageColorspaceType(image),
% exception);
%
% The format of the GetImageColorspaceType method is:
%
% ColorspaceType GetImageColorspaceType(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ColorspaceType GetImageColorspaceType(const Image *image,
ExceptionInfo *exception)
{
ColorspaceType
colorspace;
ImageType
type;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
colorspace=image->colorspace;
type=IdentifyImageType(image,exception);
if ((type == BilevelType) || (type == GrayscaleType) ||
(type == GrayscaleMatteType))
colorspace=GRAYColorspace;
return(colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R G B T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RGBTransformImage() converts the reference image from sRGB to an alternate
% colorspace. The transformation matrices are not the standard ones: the
% weights are rescaled to normalized the range of the transformed values to
% be [0..QuantumRange].
%
% The format of the RGBTransformImage method is:
%
% MagickBooleanType RGBTransformImage(Image *image,
% const ColorspaceType colorspace)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace to transform the image to.
%
*/
static inline void ConvertRGBToCMY(const Quantum red,const Quantum green,
const Quantum blue,double *cyan,double *magenta,double *yellow)
{
*cyan=QuantumScale*(QuantumRange-red);
*magenta=QuantumScale*(QuantumRange-green);
*yellow=QuantumScale*(QuantumRange-blue);
}
static void ConvertRGBToLab(const Quantum red,const Quantum green,
const Quantum blue,double *L,double *a,double *b)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLab(X,Y,Z,L,a,b);
}
static inline void ConvertXYZToLMS(const double x,const double y,
const double z,double *L,double *M,double *S)
{
*L=0.7328*x+0.4296*y-0.1624*z;
*M=(-0.7036*x+1.6975*y+0.0061*z);
*S=0.0030*x+0.0136*y+0.9834*z;
}
static void ConvertRGBToLMS(const Quantum red,const Quantum green,
const Quantum blue,double *L,double *M,double *S)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLMS(X,Y,Z,L,M,S);
}
static void ConvertRGBToLuv(const Quantum red,const Quantum green,
const Quantum blue,double *L,double *u,double *v)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLuv(X,Y,Z,L,u,v);
}
static void ConvertRGBToxyY(const Quantum red,const Quantum green,
const Quantum blue,double *low_x,double *low_y,double *cap_Y)
{
double
gamma,
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
gamma=PerceptibleReciprocal(X+Y+Z);
*low_x=gamma*X;
*low_y=gamma*Y;
*cap_Y=Y;
}
static void ConvertRGBToYPbPr(const Quantum red,const Quantum green,
const Quantum blue,double *Y,double *Pb,double *Pr)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5;
*Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5;
}
static void ConvertRGBToYCbCr(const Quantum red,const Quantum green,
const Quantum blue,double *Y,double *Cb,double *Cr)
{
ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr);
}
static void ConvertRGBToYUV(const Quantum red,const Quantum green,
const Quantum blue,double *Y,double *U,double *V)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5;
*V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5;
}
static void ConvertRGBToYDbDr(const Quantum red,const Quantum green,
const Quantum blue,double *Y,double *Db,double *Dr)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5;
*Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5;
}
static void ConvertRGBToYIQ(const Quantum red,const Quantum green,
const Quantum blue,double *Y,double *I,double *Q)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5;
*Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5;
}
MagickExport MagickBooleanType RGBTransformImage(Image *image,
const ColorspaceType colorspace)
{
#define RGBTransformImageTag "RGBTransform/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
PrimaryInfo
primary_info;
register ssize_t
i;
ssize_t
y;
TransformPacket
*x_map,
*y_map,
*z_map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(colorspace != sRGBColorspace);
assert(colorspace != TransparentColorspace);
assert(colorspace != UndefinedColorspace);
status=MagickTrue;
progress=0;
exception=(&image->exception);
switch (colorspace)
{
case CMYKColorspace:
{
MagickPixelPacket
zero;
/*
Convert RGB to CMYK colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
GetMagickPixelPacket(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
pixel.red=(MagickRealType) pixel.red;
pixel.green=(MagickRealType) pixel.green;
pixel.blue=(MagickRealType) pixel.blue;
ConvertRGBToCMYK(&pixel);
SetPixelPacket(image,&pixel,q,indexes+x);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->type=image->matte == MagickFalse ? ColorSeparationType :
ColorSeparationMatteType;
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case LinearGRAYColorspace:
case GRAYColorspace:
{
/*
Transform image from sRGB to GRAY.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelGray(q,ClampToQuantum(GetPixelIntensity(image,q)));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
image->type=GrayscaleType;
return(status);
}
case CMYColorspace:
case HCLColorspace:
case HCLpColorspace:
case HSBColorspace:
case HSIColorspace:
case HSLColorspace:
case HSVColorspace:
case HWBColorspace:
case LabColorspace:
case LCHColorspace:
case LCHabColorspace:
case LCHuvColorspace:
case LMSColorspace:
case LuvColorspace:
case xyYColorspace:
case XYZColorspace:
case YCbCrColorspace:
case YDbDrColorspace:
case YIQColorspace:
case YPbPrColorspace:
case YUVColorspace:
{
/*
Transform image from sRGB to HSI.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
X,
Y,
Z;
Quantum
blue,
green,
red;
red=ClampToQuantum((MagickRealType) GetPixelRed(q));
green=ClampToQuantum((MagickRealType) GetPixelGreen(q));
blue=ClampToQuantum((MagickRealType) GetPixelBlue(q));
switch (colorspace)
{
case CMYColorspace:
{
ConvertRGBToCMY(red,green,blue,&X,&Y,&Z);
break;
}
case HCLColorspace:
{
ConvertRGBToHCL(red,green,blue,&X,&Y,&Z);
break;
}
case HCLpColorspace:
{
ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z);
break;
}
case HSBColorspace:
{
ConvertRGBToHSB(red,green,blue,&X,&Y,&Z);
break;
}
case HSIColorspace:
{
ConvertRGBToHSI(red,green,blue,&X,&Y,&Z);
break;
}
case HSLColorspace:
{
ConvertRGBToHSL(red,green,blue,&X,&Y,&Z);
break;
}
case HSVColorspace:
{
ConvertRGBToHSV(red,green,blue,&X,&Y,&Z);
break;
}
case HWBColorspace:
{
ConvertRGBToHWB(red,green,blue,&X,&Y,&Z);
break;
}
case LabColorspace:
{
ConvertRGBToLab(red,green,blue,&X,&Y,&Z);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ConvertRGBToLCHab(red,green,blue,&X,&Y,&Z);
break;
}
case LCHuvColorspace:
{
ConvertRGBToLCHuv(red,green,blue,&X,&Y,&Z);
break;
}
case LMSColorspace:
{
ConvertRGBToLMS(red,green,blue,&X,&Y,&Z);
break;
}
case LuvColorspace:
{
ConvertRGBToLuv(red,green,blue,&X,&Y,&Z);
break;
}
case xyYColorspace:
{
ConvertRGBToxyY(red,green,blue,&X,&Y,&Z);
break;
}
case XYZColorspace:
{
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
break;
}
case YCbCrColorspace:
{
ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z);
break;
}
case YDbDrColorspace:
{
ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z);
break;
}
case YIQColorspace:
{
ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z);
break;
}
case YPbPrColorspace:
{
ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z);
break;
}
case YUVColorspace:
{
ConvertRGBToYUV(red,green,blue,&X,&Y,&Z);
break;
}
default:
{
X=QuantumScale*red;
Y=QuantumScale*green;
Z=QuantumScale*blue;
break;
}
}
SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*X));
SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*Y));
SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*Z));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case LogColorspace:
{
#define DisplayGamma (1.0/1.7)
#define FilmGamma 0.6
#define ReferenceBlack 95.0
#define ReferenceWhite 685.0
const char
*value;
double
black,
density,
film_gamma,
gamma,
reference_black,
reference_white;
Quantum
*logmap;
/*
Transform RGB to Log colorspace.
*/
density=DisplayGamma;
gamma=DisplayGamma;
value=GetImageProperty(image,"gamma");
if (value != (const char *) NULL)
gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL));
film_gamma=FilmGamma;
value=GetImageProperty(image,"film-gamma");
if (value != (const char *) NULL)
film_gamma=StringToDouble(value,(char **) NULL);
reference_black=ReferenceBlack;
value=GetImageProperty(image,"reference-black");
if (value != (const char *) NULL)
reference_black=StringToDouble(value,(char **) NULL);
reference_white=ReferenceWhite;
value=GetImageProperty(image,"reference-white");
if (value != (const char *) NULL)
reference_white=StringToDouble(value,(char **) NULL);
logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*logmap));
if (logmap == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/
film_gamma);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
logmap[i]=ScaleMapToQuantum((MagickRealType) (MaxMap*(reference_white+
log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002/
film_gamma))/1024.0));
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
Quantum
blue,
green,
red;
red=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelRed(q)));
green=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelGreen(q)));
blue=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelBlue(q)));
SetPixelRed(q,logmap[ScaleQuantumToMap(red)]);
SetPixelGreen(q,logmap[ScaleQuantumToMap(green)]);
SetPixelBlue(q,logmap[ScaleQuantumToMap(blue)]);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
logmap=(Quantum *) RelinquishMagickMemory(logmap);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case RGBColorspace:
case scRGBColorspace:
{
/*
Transform image from sRGB to linear RGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
blue,
green,
red;
red=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelRed(q)));
green=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelGreen(q)));
blue=ClampToQuantum(DecodePixelGamma((MagickRealType)
GetPixelBlue(q)));
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
default:
break;
}
/*
Allocate the tables.
*/
x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*x_map));
y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*y_map));
z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*z_map));
if ((x_map == (TransformPacket *) NULL) ||
(y_map == (TransformPacket *) NULL) ||
(z_map == (TransformPacket *) NULL))
{
if (x_map != (TransformPacket *) NULL)
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (y_map != (TransformPacket *) NULL)
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
if (z_map != (TransformPacket *) NULL)
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(&primary_info,0,sizeof(primary_info));
switch (colorspace)
{
case OHTAColorspace:
{
/*
Initialize OHTA tables:
I1 = 0.33333*R+0.33334*G+0.33333*B
I2 = 0.50000*R+0.00000*G-0.50000*B
I3 =-0.25000*R+0.50000*G-0.25000*B
I and Q, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.33333*(double) i);
y_map[i].x=(MagickRealType) (0.33334*(double) i);
z_map[i].x=(MagickRealType) (0.33333*(double) i);
x_map[i].y=(MagickRealType) (0.50000*(double) i);
y_map[i].y=(MagickRealType) (0.00000*(double) i);
z_map[i].y=(MagickRealType) (-0.50000*(double) i);
x_map[i].z=(MagickRealType) (-0.25000*(double) i);
y_map[i].z=(MagickRealType) (0.50000*(double) i);
z_map[i].z=(MagickRealType) (-0.25000*(double) i);
}
break;
}
case Rec601LumaColorspace:
{
/*
Initialize Rec601 luma tables:
G = 0.298839*R+0.586811*G+0.114350*B
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.298839*(double) i);
y_map[i].x=(MagickRealType) (0.586811*(double) i);
z_map[i].x=(MagickRealType) (0.114350*(double) i);
x_map[i].y=(MagickRealType) (0.298839*(double) i);
y_map[i].y=(MagickRealType) (0.586811*(double) i);
z_map[i].y=(MagickRealType) (0.114350*(double) i);
x_map[i].z=(MagickRealType) (0.298839*(double) i);
y_map[i].z=(MagickRealType) (0.586811*(double) i);
z_map[i].z=(MagickRealType) (0.114350*(double) i);
}
break;
}
case Rec601YCbCrColorspace:
{
/*
Initialize YCbCr tables (ITU-R BT.601):
Y = 0.2988390*R+0.5868110*G+0.1143500*B
Cb= -0.1687367*R-0.3312640*G+0.5000000*B
Cr= 0.5000000*R-0.4186880*G-0.0813120*B
Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.298839*(double) i);
y_map[i].x=(MagickRealType) (0.586811*(double) i);
z_map[i].x=(MagickRealType) (0.114350*(double) i);
x_map[i].y=(MagickRealType) (-0.1687367*(double) i);
y_map[i].y=(MagickRealType) (-0.331264*(double) i);
z_map[i].y=(MagickRealType) (0.500000*(double) i);
x_map[i].z=(MagickRealType) (0.500000*(double) i);
y_map[i].z=(MagickRealType) (-0.418688*(double) i);
z_map[i].z=(MagickRealType) (-0.081312*(double) i);
}
break;
}
case Rec709LumaColorspace:
{
/*
Initialize Rec709 luma tables:
G = 0.212656*R+0.715158*G+0.072186*B
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.212656*(double) i);
y_map[i].x=(MagickRealType) (0.715158*(double) i);
z_map[i].x=(MagickRealType) (0.072186*(double) i);
x_map[i].y=(MagickRealType) (0.212656*(double) i);
y_map[i].y=(MagickRealType) (0.715158*(double) i);
z_map[i].y=(MagickRealType) (0.072186*(double) i);
x_map[i].z=(MagickRealType) (0.212656*(double) i);
y_map[i].z=(MagickRealType) (0.715158*(double) i);
z_map[i].z=(MagickRealType) (0.072186*(double) i);
}
break;
}
case Rec709YCbCrColorspace:
{
/*
Initialize YCbCr tables (ITU-R BT.709):
Y = 0.212656*R+0.715158*G+0.072186*B
Cb= -0.114572*R-0.385428*G+0.500000*B
Cr= 0.500000*R-0.454153*G-0.045847*B
Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.212656*(double) i);
y_map[i].x=(MagickRealType) (0.715158*(double) i);
z_map[i].x=(MagickRealType) (0.072186*(double) i);
x_map[i].y=(MagickRealType) (-0.114572*(double) i);
y_map[i].y=(MagickRealType) (-0.385428*(double) i);
z_map[i].y=(MagickRealType) (0.500000*(double) i);
x_map[i].z=(MagickRealType) (0.500000*(double) i);
y_map[i].z=(MagickRealType) (-0.454153*(double) i);
z_map[i].z=(MagickRealType) (-0.045847*(double) i);
}
break;
}
case YCCColorspace:
{
/*
Initialize YCC tables:
Y = 0.298839*R+0.586811*G+0.114350*B
C1= -0.298839*R-0.586811*G+0.88600*B
C2= 0.70100*R-0.586811*G-0.114350*B
YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137.
*/
primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156));
primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137));
for (i=0; i <= (ssize_t) (0.018*MaxMap); i++)
{
x_map[i].x=0.005382*i;
y_map[i].x=0.010566*i;
z_map[i].x=0.002052*i;
x_map[i].y=(-0.003296)*i;
y_map[i].y=(-0.006471)*i;
z_map[i].y=0.009768*i;
x_map[i].z=0.009410*i;
y_map[i].z=(-0.007880)*i;
z_map[i].z=(-0.001530)*i;
}
for ( ; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.298839*(1.099*i-0.099);
y_map[i].x=0.586811*(1.099*i-0.099);
z_map[i].x=0.114350*(1.099*i-0.099);
x_map[i].y=(-0.298839)*(1.099*i-0.099);
y_map[i].y=(-0.586811)*(1.099*i-0.099);
z_map[i].y=0.88600*(1.099*i-0.099);
x_map[i].z=0.70100*(1.099*i-0.099);
y_map[i].z=(-0.586811)*(1.099*i-0.099);
z_map[i].z=(-0.114350)*(1.099*i-0.099);
}
break;
}
default:
{
/*
Linear conversion tables.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
y_map[i].x=(MagickRealType) 0.0;
z_map[i].x=(MagickRealType) 0.0;
x_map[i].y=(MagickRealType) 0.0;
y_map[i].y=(MagickRealType) (1.0*(double) i);
z_map[i].y=(MagickRealType) 0.0;
x_map[i].z=(MagickRealType) 0.0;
y_map[i].z=(MagickRealType) 0.0;
z_map[i].z=(MagickRealType) (1.0*(double) i);
}
break;
}
}
/*
Convert from sRGB.
*/
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Convert DirectClass image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
register size_t
blue,
green,
red;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelRed(q)));
green=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelGreen(q)));
blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelBlue(q)));
pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+
(MagickRealType) primary_info.x;
pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+
(MagickRealType) primary_info.y;
pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+
(MagickRealType) primary_info.z;
SetPixelRed(q,ScaleMapToQuantum(pixel.red));
SetPixelGreen(q,ScaleMapToQuantum(pixel.green));
SetPixelBlue(q,ScaleMapToQuantum(pixel.blue));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RGBTransformImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
break;
}
case PseudoClass:
{
register size_t
blue,
green,
red;
/*
Convert PseudoClass image.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
MagickPixelPacket
pixel;
red=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
image->colormap[i].red));
green=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
image->colormap[i].green));
blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
image->colormap[i].blue));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z;
image->colormap[i].red=ScaleMapToQuantum(pixel.red);
image->colormap[i].green=ScaleMapToQuantum(pixel.green);
image->colormap[i].blue=ScaleMapToQuantum(pixel.blue);
}
(void) SyncImage(image);
break;
}
}
/*
Relinquish resources.
*/
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorspace() sets the colorspace member of the Image structure.
%
% The format of the SetImageColorspace method is:
%
% MagickBooleanType SetImageColorspace(Image *image,
% const ColorspaceType colorspace)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace.
%
*/
MagickExport MagickBooleanType SetImageColorspace(Image *image,
const ColorspaceType colorspace)
{
ImageType
type;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == colorspace)
return(MagickTrue);
image->colorspace=colorspace;
image->rendering_intent=UndefinedIntent;
image->gamma=1.000/2.200;
(void) memset(&image->chromaticity,0,sizeof(image->chromaticity));
type=image->type;
if (IsGrayColorspace(colorspace) != MagickFalse)
{
if (colorspace == LinearGRAYColorspace)
image->gamma=1.0;
type=GrayscaleType;
}
else
if ((IsRGBColorspace(colorspace) != MagickFalse) ||
(colorspace == XYZColorspace) || (colorspace == xyYColorspace))
image->gamma=1.0;
else
{
image->rendering_intent=PerceptualIntent;
image->chromaticity.red_primary.x=0.6400;
image->chromaticity.red_primary.y=0.3300;
image->chromaticity.red_primary.z=0.0300;
image->chromaticity.green_primary.x=0.3000;
image->chromaticity.green_primary.y=0.6000;
image->chromaticity.green_primary.z=0.1000;
image->chromaticity.blue_primary.x=0.1500;
image->chromaticity.blue_primary.y=0.0600;
image->chromaticity.blue_primary.z=0.7900;
image->chromaticity.white_point.x=0.3127;
image->chromaticity.white_point.y=0.3290;
image->chromaticity.white_point.z=0.3583;
}
status=SyncImagePixelCache(image,&image->exception);
image->type=type;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageGray() returns MagickTrue if all the pixels in the image have the
% same red, green, and blue intensities and changes the type of the image to
% bi-level or grayscale.
%
% The format of the SetImageGray method is:
%
% MagickBooleanType SetImageGray(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageGray(Image *image,
ExceptionInfo *exception)
{
const char
*value;
CacheView
*image_view;
ImageType
type;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleMatteType))
return(MagickTrue);
if ((IsGrayColorspace(image->colorspace) == MagickFalse) &&
(IssRGBCompatibleColorspace(image->colorspace) == MagickFalse))
return(MagickFalse);
value=GetImageProperty(image,"colorspace:auto-grayscale");
if (IsStringNotFalse(value) == MagickFalse)
return(MagickFalse);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsGrayPixel(p) == MagickFalse)
{
type=UndefinedType;
break;
}
if ((type == BilevelType) && (IsMonochromePixel(p) == MagickFalse))
type=GrayscaleType;
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == UndefinedType)
return(MagickFalse);
image->colorspace=GRAYColorspace;
if (SyncImagePixelCache((Image *) image,exception) == MagickFalse)
return(MagickFalse);
image->type=type;
if ((type == GrayscaleType) && (image->matte != MagickFalse))
image->type=GrayscaleMatteType;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMonochrome() returns MagickTrue if all the pixels in the image have
% the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange and changes the type of the image to bi-level.
%
% The format of the SetImageMonochrome method is:
%
% MagickBooleanType SetImageMonochrome(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageMonochrome(Image *image,
ExceptionInfo *exception)
{
const char
*value;
CacheView
*image_view;
ImageType
type;
register ssize_t
x;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if ((IsGrayColorspace(image->colorspace) == MagickFalse) &&
(IssRGBCompatibleColorspace(image->colorspace) == MagickFalse))
return(MagickFalse);
value=GetImageProperty(image,"colorspace:auto-grayscale");
if (IsStringNotFalse(value) == MagickFalse)
return(MagickFalse);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsMonochromePixel(p) == MagickFalse)
{
type=UndefinedType;
break;
}
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == UndefinedType)
return(MagickFalse);
image->colorspace=GRAYColorspace;
if (SyncImagePixelCache((Image *) image,exception) == MagickFalse)
return(MagickFalse);
image->type=type;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImageColorspace() transforms an image colorspace.
%
% The format of the TransformImageColorspace method is:
%
% MagickBooleanType TransformImageColorspace(Image *image,
% const ColorspaceType colorspace)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace.
%
*/
MagickExport MagickBooleanType TransformImageColorspace(Image *image,
const ColorspaceType colorspace)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == colorspace)
return(MagickTrue);
(void) DeleteImageProfile(image,"icc");
(void) DeleteImageProfile(image,"icm");
if (colorspace == LinearGRAYColorspace)
return(GrayscaleImage(image,Rec709LuminancePixelIntensityMethod));
if (colorspace == GRAYColorspace)
return(GrayscaleImage(image,Rec709LumaPixelIntensityMethod));
if (colorspace == UndefinedColorspace)
return(SetImageColorspace(image,colorspace));
/*
Convert the reference image from an alternate colorspace to sRGB.
*/
if (IssRGBColorspace(colorspace) != MagickFalse)
return(TransformRGBImage(image,image->colorspace));
status=MagickTrue;
if (IssRGBColorspace(image->colorspace) == MagickFalse)
status=TransformRGBImage(image,image->colorspace);
if (status == MagickFalse)
return(status);
/*
Convert the reference image from sRGB to an alternate colorspace.
*/
if (RGBTransformImage(image,colorspace) == MagickFalse)
status=MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a n s f o r m R G B I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformRGBImage() converts the reference image from an alternate
% colorspace to sRGB. The transformation matrices are not the standard ones:
% the weights are rescaled to normalize the range of the transformed values to
% be [0..QuantumRange].
%
% The format of the TransformRGBImage method is:
%
% MagickBooleanType TransformRGBImage(Image *image,
% const ColorspaceType colorspace)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace to transform the image to.
%
*/
static inline void ConvertCMYToRGB(const double cyan,const double magenta,
const double yellow,Quantum *red,Quantum *green,Quantum *blue)
{
*red=ClampToQuantum(QuantumRange*(1.0-cyan));
*green=ClampToQuantum(QuantumRange*(1.0-magenta));
*blue=ClampToQuantum(QuantumRange*(1.0-yellow));
}
static inline void ConvertLMSToXYZ(const double L,const double M,const double S,
double *X,double *Y,double *Z)
{
*X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S;
*Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S;
*Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S;
}
static inline void ConvertLMSToRGB(const double L,const double M,
const double S,Quantum *red,Quantum *green,Quantum *blue)
{
double
X,
Y,
Z;
ConvertLMSToXYZ(L,M,S,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertLuvToRGB(const double L,const double u,
const double v,Quantum *red,Quantum *green,Quantum *blue)
{
double
X,
Y,
Z;
ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline ssize_t RoundToYCC(const MagickRealType value)
{
if (value <= 0.0)
return(0);
if (value >= 1388.0)
return(1388);
return((ssize_t) (value+0.5));
}
static inline void ConvertLabToRGB(const double L,const double a,
const double b,Quantum *red,Quantum *green,Quantum *blue)
{
double
X,
Y,
Z;
ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertxyYToRGB(const double low_x,const double low_y,
const double cap_Y,Quantum *red,Quantum *green,Quantum *blue)
{
double
gamma,
X,
Y,
Z;
gamma=PerceptibleReciprocal(low_y);
X=gamma*cap_Y*low_x;
Y=cap_Y;
Z=gamma*cap_Y*(1.0-low_x-low_y);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr,
Quantum *red,Quantum *green,Quantum *blue)
{
*red=ClampToQuantum(QuantumRange*(0.99999999999914679361*Y-
1.2188941887145875e-06*(Pb-0.5)+1.4019995886561440468*(Pr-0.5)));
*green=ClampToQuantum(QuantumRange*(0.99999975910502514331*Y-
0.34413567816504303521*(Pb-0.5)-0.71413649331646789076*(Pr-0.5)));
*blue=ClampToQuantum(QuantumRange*(1.00000124040004623180*Y+
1.77200006607230409200*(Pb-0.5)+2.1453384174593273e-06*(Pr-0.5)));
}
static void ConvertYCbCrToRGB(const double Y,const double Cb,
const double Cr,Quantum *red,Quantum *green,Quantum *blue)
{
ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue);
}
static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr,
Quantum *red,Quantum *green,Quantum *blue)
{
*red=ClampToQuantum(QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)-
0.52591263066186533*(Dr-0.5)));
*green=ClampToQuantum(QuantumRange*(Y-0.12913289889050927*(Db-0.5)+
0.26789932820759876*(Dr-0.5)));
*blue=ClampToQuantum(QuantumRange*(Y+0.66467905997895482*(Db-0.5)-
7.9202543533108e-05*(Dr-0.5)));
}
static void ConvertYIQToRGB(const double Y,const double I,const double Q,
Quantum *red,Quantum *green,Quantum *blue)
{
*red=ClampToQuantum(QuantumRange*(Y+0.9562957197589482261*(I-0.5)+
0.6210244164652610754*(Q-0.5)));
*green=ClampToQuantum(QuantumRange*(Y-0.2721220993185104464*(I-0.5)-
0.6473805968256950427*(Q-0.5)));
*blue=ClampToQuantum(QuantumRange*(Y-1.1069890167364901945*(I-0.5)+
1.7046149983646481374*(Q-0.5)));
}
static void ConvertYUVToRGB(const double Y,const double U,const double V,
Quantum *red,Quantum *green,Quantum *blue)
{
*red=ClampToQuantum(QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+
1.1398279671717170825*(V-0.5)));
*green=ClampToQuantum(QuantumRange*(Y-0.3946101641414141437*(U-0.5)-
0.5805003156565656797*(V-0.5)));
*blue=ClampToQuantum(QuantumRange*(Y+2.0319996843434342537*(U-0.5)-
4.813762626262513e-04*(V-0.5)));
}
MagickExport MagickBooleanType TransformRGBImage(Image *image,
const ColorspaceType colorspace)
{
#define TransformRGBImageTag "Transform/Image"
static const float
YCCMap[1389] =
{
0.000000, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f,
0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f,
0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f,
0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f,
0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f,
0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f,
0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f,
0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f,
0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f,
0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f,
0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f,
0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f,
0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f,
0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f,
0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f,
0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f,
0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f,
0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f,
0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f,
0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f,
0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f,
0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f,
0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f,
0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f,
0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f,
0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f,
0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f,
0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f,
0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f,
0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f,
0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f,
0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f,
0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f,
0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f,
0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f,
0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f,
0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f,
0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f,
0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f,
0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f,
0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f,
0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f,
0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f,
0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f,
0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f,
0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f,
0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f,
0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f,
0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f,
0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f,
0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f,
0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f,
0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f,
0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f,
0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f,
0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f,
0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f,
0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f,
0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f,
0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f,
0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f,
0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f,
0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f,
0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f,
0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f,
0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f,
0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f,
0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f,
0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f,
0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f,
0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f,
0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f,
0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f,
0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f,
0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f,
0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f,
0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f,
0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f,
0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f,
0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f,
0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f,
0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f,
0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f,
0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f,
0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f,
0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f,
0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f,
0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f,
0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f,
0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f,
0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f,
0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f,
0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f,
0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f,
0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f,
0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f,
0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f,
0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f,
0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f,
0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f,
0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f,
0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f,
0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f,
0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f,
0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f,
0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f,
0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f,
0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f,
0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f,
0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f,
0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f,
0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f,
0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f,
0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f,
0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f,
0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f,
0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f,
0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f,
0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f,
0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f,
0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f,
0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f,
0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f,
0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f,
0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f,
0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f,
0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f,
0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f,
0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f,
0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f,
0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f,
0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f,
0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f,
0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f,
0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f,
0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f,
0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f,
0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f,
0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f,
0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f,
0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f,
0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f,
0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f,
0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f,
0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f,
0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f,
0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f,
0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f,
0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f,
0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f,
0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f,
0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f,
0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f,
0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f,
0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f,
0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f,
0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f,
0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f,
0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f,
0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f,
0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f,
0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f,
0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f,
0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f,
0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f,
0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f,
0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f,
0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f,
0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f,
0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f,
0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f,
0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f,
0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f,
0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f,
0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f,
0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f,
0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f,
0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f,
0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f,
0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f,
0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f,
0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f,
0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f,
0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f,
0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f,
0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f,
0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f,
0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f,
0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f,
0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f,
0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f,
0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f,
0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f,
0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f,
0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f,
0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f,
0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f,
0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f,
0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f,
0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f,
0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f,
0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f,
0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f,
0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f,
0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f,
0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f,
0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f,
0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f,
0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f,
0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f,
0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f,
0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f,
0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f,
0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f,
0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f,
0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f,
0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f,
0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f,
0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f,
0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f,
0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f,
0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f,
0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f,
0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f,
0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f,
0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f,
0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f,
0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f,
0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f,
0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f,
0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f,
0.998559f, 0.999280f, 1.000000
};
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
TransformPacket
*y_map,
*x_map,
*z_map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
progress=0;
exception=(&image->exception);
switch (colorspace)
{
case CMYKColorspace:
{
MagickPixelPacket
zero;
/*
Transform image from CMYK to sRGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
GetMagickPixelPacket(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
ConvertCMYKToRGB(&pixel);
SetPixelPacket(image,&pixel,q,indexes+x);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case GRAYColorspace:
case Rec601LumaColorspace:
case Rec709LumaColorspace:
{
/*
Transform linear RGB to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
MagickRealType
gray;
gray=(MagickRealType) GetPixelGray(q);
if ((image->intensity == Rec601LuminancePixelIntensityMethod) ||
(image->intensity == Rec709LuminancePixelIntensityMethod))
gray=EncodePixelGamma(gray);
SetPixelRed(q,ClampToQuantum(gray));
SetPixelGreen(q,ClampToQuantum(gray));
SetPixelBlue(q,ClampToQuantum(gray));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case CMYColorspace:
case HCLColorspace:
case HCLpColorspace:
case HSBColorspace:
case HSIColorspace:
case HSLColorspace:
case HSVColorspace:
case HWBColorspace:
case LabColorspace:
case LCHColorspace:
case LCHabColorspace:
case LCHuvColorspace:
case LMSColorspace:
case LuvColorspace:
case xyYColorspace:
case XYZColorspace:
case YCbCrColorspace:
case YDbDrColorspace:
case YIQColorspace:
case YPbPrColorspace:
case YUVColorspace:
{
/*
Transform image from source colorspace to sRGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
X,
Y,
Z;
Quantum
blue,
green,
red;
X=QuantumScale*GetPixelRed(q);
Y=QuantumScale*GetPixelGreen(q);
Z=QuantumScale*GetPixelBlue(q);
switch (colorspace)
{
case CMYColorspace:
{
ConvertCMYToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HCLColorspace:
{
ConvertHCLToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSBColorspace:
{
ConvertHSBToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSIColorspace:
{
ConvertHSIToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSLColorspace:
{
ConvertHSLToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSVColorspace:
{
ConvertHSVToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HWBColorspace:
{
ConvertHWBToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LabColorspace:
{
ConvertLabToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ConvertLCHabToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LCHuvColorspace:
{
ConvertLCHuvToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LMSColorspace:
{
ConvertLMSToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LuvColorspace:
{
ConvertLuvToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case xyYColorspace:
{
ConvertxyYToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case XYZColorspace:
{
ConvertXYZToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YCbCrColorspace:
{
ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YDbDrColorspace:
{
ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YIQColorspace:
{
ConvertYIQToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YPbPrColorspace:
{
ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YUVColorspace:
{
ConvertYUVToRGB(X,Y,Z,&red,&green,&blue);
break;
}
default:
{
red=ClampToQuantum(QuantumRange*X);
green=ClampToQuantum(QuantumRange*Y);
blue=ClampToQuantum(QuantumRange*Z);
break;
}
}
SetPixelRed(q,ClampToQuantum((MagickRealType) red));
SetPixelGreen(q,ClampToQuantum((MagickRealType) green));
SetPixelBlue(q,ClampToQuantum((MagickRealType) blue));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case LogColorspace:
{
const char
*value;
double
black,
density,
film_gamma,
gamma,
reference_black,
reference_white;
Quantum
*logmap;
/*
Transform Log to sRGB colorspace.
*/
density=DisplayGamma;
gamma=DisplayGamma;
value=GetImageProperty(image,"gamma");
if (value != (const char *) NULL)
gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL));
film_gamma=FilmGamma;
value=GetImageProperty(image,"film-gamma");
if (value != (const char *) NULL)
film_gamma=StringToDouble(value,(char **) NULL);
reference_black=ReferenceBlack;
value=GetImageProperty(image,"reference-black");
if (value != (const char *) NULL)
reference_black=StringToDouble(value,(char **) NULL);
reference_white=ReferenceWhite;
value=GetImageProperty(image,"reference-white");
if (value != (const char *) NULL)
reference_white=StringToDouble(value,(char **) NULL);
logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*logmap));
if (logmap == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/
film_gamma);
for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++)
logmap[i]=(Quantum) 0;
for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++)
logmap[i]=ClampToQuantum((MagickRealType) QuantumRange/(1.0-black)*
(pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002/
film_gamma)-black));
for ( ; i <= (ssize_t) MaxMap; i++)
logmap[i]=QuantumRange;
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
Quantum
blue,
green,
red;
red=ClampToQuantum(EncodePixelGamma((MagickRealType)
logmap[ScaleQuantumToMap(GetPixelRed(q))]));
green=ClampToQuantum(EncodePixelGamma((MagickRealType)
logmap[ScaleQuantumToMap(GetPixelGreen(q))]));
blue=ClampToQuantum(EncodePixelGamma((MagickRealType)
logmap[ScaleQuantumToMap(GetPixelBlue(q))]));
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
logmap=(Quantum *) RelinquishMagickMemory(logmap);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case RGBColorspace:
case scRGBColorspace:
{
/*
Transform linear RGB to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
Quantum
blue,
green,
red;
red=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelRed(q)));
green=ClampToQuantum(EncodePixelGamma((MagickRealType)
GetPixelGreen(q)));
blue=ClampToQuantum(EncodePixelGamma((MagickRealType)
GetPixelBlue(q)));
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
default:
break;
}
/*
Allocate the tables.
*/
x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*x_map));
y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*y_map));
z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*z_map));
if ((x_map == (TransformPacket *) NULL) ||
(y_map == (TransformPacket *) NULL) ||
(z_map == (TransformPacket *) NULL))
{
if (z_map != (TransformPacket *) NULL)
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
if (y_map != (TransformPacket *) NULL)
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
if (x_map != (TransformPacket *) NULL)
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
switch (colorspace)
{
case OHTAColorspace:
{
/*
Initialize OHTA tables:
R = I1+1.00000*I2-0.66668*I3
G = I1+0.00000*I2+1.33333*I3
B = I1-1.00000*I2-0.66668*I3
I and Q, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(1.0*(double) i);
y_map[i].x=(0.5*1.00000*(2.0*(double) i-MaxMap));
z_map[i].x=(-0.5*0.66668*(2.0*(double) i-MaxMap));
x_map[i].y=(1.0*(double) i);
y_map[i].y=(0.5*0.00000*(2.0*(double) i-MaxMap));
z_map[i].y=(0.5*1.33333*(2.0*(double) i-MaxMap));
x_map[i].z=(1.0*(double) i);
y_map[i].z=(-0.5*1.00000*(2.0*(double) i-MaxMap));
z_map[i].z=(-0.5*0.66668*(2.0*(double) i-MaxMap));
}
break;
}
case Rec601YCbCrColorspace:
{
/*
Initialize YCbCr tables:
R = Y +1.402000*Cr
G = Y-0.344136*Cb-0.714136*Cr
B = Y+1.772000*Cb
Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.99999999999914679361*(double) i;
y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap);
z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap);
x_map[i].y=0.99999975910502514331*(double) i;
y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap);
z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap);
x_map[i].z=1.00000124040004623180*(double) i;
y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap);
z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap);
}
break;
}
case Rec709YCbCrColorspace:
{
/*
Initialize YCbCr tables:
R = Y +1.574800*Cr
G = Y-0.187324*Cb-0.468124*Cr
B = Y+1.855600*Cb
Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*(double) i-MaxMap));
z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*(double) i-MaxMap));
x_map[i].y=(MagickRealType) (1.0*(double) i);
y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*(double) i-MaxMap));
z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*(double) i-MaxMap));
x_map[i].z=(MagickRealType) (1.0*(double) i);
y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*(double) i-MaxMap));
z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*(double) i-MaxMap));
}
break;
}
case YCCColorspace:
{
/*
Initialize YCC tables:
R = Y +1.340762*C2
G = Y-0.317038*C1-0.682243*C2
B = Y+1.632639*C1
YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.3584000*(double) i);
y_map[i].x=(MagickRealType) (0.0000000);
z_map[i].x=(MagickRealType) (1.8215000*((double) i-(MagickRealType)
ScaleQuantumToMap(ScaleCharToQuantum(137))));
x_map[i].y=(MagickRealType) (1.3584000*(double) i);
y_map[i].y=(MagickRealType) ((-0.4302726)*((double) i-(MagickRealType)
ScaleQuantumToMap(ScaleCharToQuantum(156))));
z_map[i].y=(MagickRealType) ((-0.9271435)*((double) i-(MagickRealType)
ScaleQuantumToMap(ScaleCharToQuantum(137))));
x_map[i].z=(MagickRealType) (1.3584000*(double) i);
y_map[i].z=(MagickRealType) (2.2179000*((double) i-(MagickRealType)
ScaleQuantumToMap(ScaleCharToQuantum(156))));
z_map[i].z=(MagickRealType) (0.0000000);
}
break;
}
default:
{
/*
Linear conversion tables.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
y_map[i].x=(MagickRealType) 0.0;
z_map[i].x=(MagickRealType) 0.0;
x_map[i].y=(MagickRealType) 0.0;
y_map[i].y=(MagickRealType) (1.0*(double) i);
z_map[i].y=(MagickRealType) 0.0;
x_map[i].z=(MagickRealType) 0.0;
y_map[i].z=(MagickRealType) 0.0;
z_map[i].z=(MagickRealType) (1.0*(double) i);
}
break;
}
}
/*
Convert to sRGB.
*/
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Convert DirectClass image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
blue,
green,
red;
red=ScaleQuantumToMap(GetPixelRed(q));
green=ScaleQuantumToMap(GetPixelGreen(q));
blue=ScaleQuantumToMap(GetPixelBlue(q));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z;
if (colorspace == YCCColorspace)
{
pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/
(double) MaxMap)];
pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/
(double) MaxMap)];
pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/
(double) MaxMap)];
}
else
{
pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red);
pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green);
pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue);
}
SetPixelRed(q,ClampToQuantum(pixel.red));
SetPixelGreen(q,ClampToQuantum(pixel.green));
SetPixelBlue(q,ClampToQuantum(pixel.blue));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TransformRGBImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
break;
}
case PseudoClass:
{
/*
Convert PseudoClass image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
MagickPixelPacket
pixel;
register size_t
blue,
green,
red;
red=ScaleQuantumToMap(image->colormap[i].red);
green=ScaleQuantumToMap(image->colormap[i].green);
blue=ScaleQuantumToMap(image->colormap[i].blue);
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z;
if (colorspace == YCCColorspace)
{
pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/
(double) MaxMap)];
pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/
(double) MaxMap)];
pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/
(double) MaxMap)];
}
else
{
pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red);
pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green);
pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue);
}
image->colormap[i].red=ClampToQuantum(pixel.red);
image->colormap[i].green=ClampToQuantum(pixel.green);
image->colormap[i].blue=ClampToQuantum(pixel.blue);
}
(void) SyncImage(image);
break;
}
}
/*
Relinquish resources.
*/
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (SetImageColorspace(image,sRGBColorspace) == MagickFalse)
return(MagickFalse);
return(MagickTrue);
}
|
dctz-comp-lib.c | /**
* @file dctz-comp-lib.c
* @author Seung Woo Son
* @date July 2019
* @brief DCTZ compression library routine
* (C) 2019 University of Massachuetts Lowell.
See LICENSE in top-level directory.
*/
#include <stdlib.h>
#include <memory.h>
#include <string.h>
#ifdef TIME_DEBUG
#include <sys/time.h>
#endif /* TIME_DEBUG */
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include <pthread.h>
#include "zlib.h"
#include "dctz.h"
#include "dct.h"
#define DEF_MEM_LEVEL 8
void *compress_thread (void *arg)
{
z_stream *defstream = (z_stream *)arg;
#ifdef DEBUG
printf("compress started ...\n");
#endif
deflate (defstream, Z_FINISH);
#ifdef DEBUG
printf ("done! compression...\n");
#endif
uLong ret = defstream->total_out;
deflateEnd (defstream);
pthread_exit ((void *)ret);
}
int dctz_compress (double *a, int N, size_t *outSize, char *a_z, double error_bound)
{
int i, j, nblk, rem;
#ifdef TIME_DEBUG
struct timeval start_t, end_t, gstart_t;
double sf_t, dct_t, DC_AC_t, zlib_t, comp_t, malloc_t, genbin_t;
#endif
double SF;
double min, max;
double *a_x; /* buffer to store transformed coefficients */
double *bin_maxes, *bin_center, bin_width, range_min, range_max;
unsigned short *bin_index, *bin_indexz, *bin_indexz2;
#ifdef USE_TRUNCATE
float *DC, *DCz, *DCz2, *AC_exact, *AC_exactz, *AC_exactz2;
#else
double *DC, *DCz, *DCz2, *AC_exact, *AC_exactz, *AC_exactz2;
#endif
struct header h;
struct bstat bs;
size_t typesize = 0;
#ifdef USE_QTABLE
double *qtable; // Quantizer Table
#endif
typesize = sizeof(double);
if (NULL == (a_x = (double *)malloc (N*typesize))) {
fprintf (stderr, "Out of memory: a_x\n");
exit (1);
}
if (error_bound < 1E-6) {
printf ("ERROR BOUND is not acceptable");
exit (1);
}
if (NULL == (bin_maxes = (double *)malloc (NBINS*sizeof(double)))) {
fprintf (stderr, "Out of memory: bin_maxes\n");
exit (1);
}
if (NULL == (bin_center = (double *)malloc (NBINS*sizeof(double)))) {
fprintf (stderr, "Out of memory: bin_center\n");
exit (1);
}
#ifdef DEBUG
for (i=0; i<BLK_SZ; i++) { // show the first block
printf ("a[%d] = %e\n", i, a[i]);
if (i%BLK_SZ == 0 && i != 0) printf ("\n");
}
#endif
#ifdef USE_QTABLE
// Start of Initialize Quantizer Table
if (NULL == (qtable = (double *)malloc (BLK_SZ*sizeof(double)))) {
fprintf (stderr, "Out of memory: qtable\n");
exit (1);
}
for (i=0; i<BLK_SZ; i++) {
qtable[i] = 0.0;
}
if (NULL == (bin_index = (unsigned short *)malloc (2*N*sizeof(unsigned short)))) {
fprintf (stderr, "Out of memory: bin_index[]\n");
exit (1);
}
memset (bin_index, 0, sizeof(unsigned short)*2*N);
#ifdef DEBUG
for (i=0; i<BLK_SZ; i++) {
printf ("qtable[%d] = %e\n", i, qtable[i]);
}
#endif
// End of Initialize Quantizer Table
#else
if (NULL == (bin_index = (unsigned short *)malloc (N*sizeof(unsigned short)))) {
fprintf (stderr, "Out of memory: bin_index[]\n");
exit (1);
}
memset (bin_index, 0, sizeof(unsigned short)*N);
#endif /* USE_QTABLE */
#ifdef TIME_DEBUG
gettimeofday (&start_t, NULL);
gstart_t = start_t;
#endif
// determine scaling factor
calc_data_stat (a, &bs, N);
SF = bs.sf; min = bs.min; max = bs.max;
#ifdef DEBUG
printf("SF = %f\n", SF);
#endif
double xscale = pow(10, SF-1);
// apply scaling factor
if (SF != 1.0)
#ifdef _OPENMP
#pragma omp parallel for private(i) shared(a, SF)
#endif
for (i=0; i<N; i++)
a[i] /= xscale;
#ifdef TIME_DEBUG
gettimeofday (&end_t, NULL);
sf_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(start_t.tv_sec*1000000 + start_t.tv_usec));
gettimeofday (&start_t, NULL);
#endif
// DCT over decomposed blocks
nblk = CEIL(N, BLK_SZ);
rem = N % BLK_SZ;
#ifdef DEBUG
printf ("\nnumber of blocks = %d, remainder = %d\n", nblk, rem);
#endif
#ifdef USE_TRUNCATE
if (NULL == (DC = (float *)malloc (nblk*sizeof(float)))) {
fprintf (stderr, "Out of memory: DC[]\n");
exit (1);
}
#else
if (NULL == (DC = (double *)malloc (nblk*sizeof(double)))) {
fprintf (stderr, "Out of memory: DC[]\n");
exit (1);
}
#endif
#ifdef USE_TRUNCATE
if (NULL == (DCz = (float *)malloc (nblk*sizeof(float)))) {
fprintf (stderr, "Out of memory: DCz[]\n");
exit (1);
}
memset (DCz, 0, sizeof(float)*nblk); /* TODO: is it necessary? */
#else
if (NULL == (DCz = (double *)malloc (nblk*sizeof(double)))) {
fprintf (stderr, "Out of memory: DCz[]\n");
exit (1);
}
#endif
if (NULL == (bin_indexz = (unsigned short *)malloc (N*sizeof(unsigned short)))) {
fprintf (stderr, "Out of memory: bin_indexz[]\n");
exit (1);
}
memset (bin_indexz, 0, sizeof(unsigned short)*N);
#ifdef TIME_DEBUG
gettimeofday (&end_t, NULL);
malloc_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(start_t.tv_sec*1000000 + start_t.tv_usec));
gettimeofday (&start_t, NULL);
#endif
gen_bins (min, max, bin_maxes, bin_center, NBINS, error_bound);
int half=NBINS/2;
bin_width = error_bound*2*BRSF;
range_min = -(half*2+1)*(error_bound*BRSF);
range_max = (half*2+1)*(error_bound*BRSF);
#ifdef TIME_DEBUG
gettimeofday (&end_t, NULL);
genbin_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(start_t.tv_sec*1000000 + start_t.tv_usec));
gettimeofday (&start_t, NULL);
#endif
#ifdef USE_TRUNCATE
if (NULL == (AC_exact = (float *)malloc (N*sizeof(float)))) {
fprintf (stderr, "Out of memory: AC_exact\n");
exit (1);
}
memset (AC_exact, 0, sizeof(float)*N); /* TODO: is it necessary? */
#else
if (NULL == (AC_exact = (double *)malloc (N*sizeof(double)))) {
fprintf (stderr, "Out of memory: AC_exact\n");
exit (1);
}
memset (AC_exact, 0, sizeof(double)*N); /* TODO: is it necessary? */
#endif
#ifdef USE_TRUNCATE
if (NULL == (AC_exactz = (float *)malloc (N*sizeof(float)))) {
fprintf (stderr, "Out of memory: AC_exactz[]\n");
exit (1);
}
memset (AC_exactz, 0, sizeof(float)*N); /* TODO: is it necessary? */
#else
if (NULL == (AC_exactz = (double *)malloc (N*sizeof(double)))) {
fprintf (stderr, "Out of memory: AC_exactz[]\n");
exit (1);
}
memset (AC_exactz, 0, sizeof(double)*N); /* TODO: is it necessary? */
#endif
dct_init (BLK_SZ);
int tot_AC_exact_count = 0;
/* DCT block decomposed */
for (i=0; i<nblk; i++) { // for each decomposed blk
int l_blk_sz = ((i==nblk-1)&&(rem!=0))?rem:BLK_SZ;
if ((i==nblk-1) && (rem!=0)) {
dct_finish ();
dct_init (rem);
}
dct_fftw (a+i*BLK_SZ, a_x+i*BLK_SZ, l_blk_sz, nblk);
#ifdef DEBUG
printf ("block %d: after DCT:\n", i);
for (j=0; j<BLK_SZ && (i<3); j++){ // show the first block only
printf ("a_x[%d] = %e \n", i*BLK_SZ+j, a_x[i*BLK_SZ+j]);
}
printf ("\n");
#endif
#ifdef USE_TRUNCATE
DC[i] = (float)(a_x[i*BLK_SZ]); /* save DC component in truncated*/
#else
DC[i] = a_x[i*BLK_SZ]; /* save DC component */
#endif
bin_index[i*BLK_SZ] = NBINS; /* store as it is */
double item;
unsigned short bin_id;
for (j=1; j<l_blk_sz; j++) {
item = a_x[i*BLK_SZ+j];
if (item < range_min || item > range_max) {
bin_id = NBINS;
#ifdef USE_QTABLE
/* The Start of Making Quantizer Table -QT applied to block coefficients */
if (fabs(item) >= qtable[j])
qtable[j] = fabs(item);
#endif /* USE_QTABLE */
}
else
bin_id = (unsigned short)((item-range_min)/bin_width);
#ifdef DEBUG
printf ("bin_id = %d\n", bin_id);
#endif
bin_index[i*BLK_SZ+j] = bin_id;
}
/* The End of of Making Quantizer Table */
#ifdef DEBUG
printf ("a_x[%d]=%e => %d\n", i*BLK_SZ+j, item, bin_id);
#endif
}
dct_finish ();
#ifdef DEBUG
FILE *fp = fopen ("dct_result_double.txt", "w+");
fwrite (a_x, sizeof(double), N, fp);
fclose (fp);
#endif
#ifdef USE_QTABLE
#ifdef DEBUG
printf ("Quantizer Table:\n");
for (j=0; j<BLK_SZ ; j++){ // Show Quantizer Table
printf ("before qtable[%d] = %e \n", j, qtable[j]);
}
#endif
for (j=1; j<BLK_SZ ; j++){ // Show Quantizer Table
//if (qtable[j] < bin_maxes[NBINS-1]) {
if (qtable[j] < 1.0) {
qtable[j] = 1.0;
}
}
#ifdef DEBUG
printf ("Quantizer Table:\n");
for (j=0; j<BLK_SZ ; j++){ // Show Quantizer Table
printf ("after qtable[%d] = %e \n", j, qtable[j]);
}
#endif
#endif
unsigned int k = N;
double qt_factor = (NBINS == 255 ? 10.0 : 2000.0);
for (i=0; i<nblk; i++) {
int l_blk_sz = ((i==nblk-1)&&(rem != 0))?rem:BLK_SZ;
for (j=1; j<l_blk_sz; j++) {
unsigned short bin_id;
bin_id = bin_index[i*BLK_SZ+j];
if (bin_id == NBINS) {
#ifdef USE_QTABLE
double item = a_x[i*BLK_SZ+j];
//if out of bin area, normalize it to the area from range_max/range_min to range_max/range_min +/- error_bound
if (item < range_min) {
item = (item/qtable[j])*error_bound*qt_factor + range_min;
} else if(item > range_max) {
item = (item/qtable[j])*error_bound*qt_factor + range_max;
}
a_x[i*BLK_SZ+j] = item; // update a_x with updated value
if (item < range_min || item > range_max) {
bin_id = NBINS;
#ifdef USE_TRUNCATE
AC_exact[tot_AC_exact_count++] = (float)(a_x[i*BLK_SZ+j]);
#else
AC_exact[tot_AC_exact_count++] = a_x[i*BLK_SZ+j];
#endif
}
else
bin_id = (unsigned short)((item-range_min)/bin_width);
bin_index[k++] = bin_id;
#ifdef DEBUG
printf ("a_x[%d]=%e => %d\n", i*BLK_SZ+j, item, bin_id);
#endif
#else
#ifdef USE_TRUNCATE
AC_exact[tot_AC_exact_count++] = (float)(a_x[i*BLK_SZ+j]);
#else
AC_exact[tot_AC_exact_count++] = a_x[i*BLK_SZ+j];
#endif
#endif /* USE_QTABLE */
}
}
}
#ifdef DEBUG
printf ("total AC_exact_count = %d\n", tot_AC_exact_count);
#endif
#ifdef TIME_DEBUG
gettimeofday (&end_t, NULL);
dct_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(start_t.tv_sec*1000000 + start_t.tv_usec));
gettimeofday (&start_t, NULL);
#endif
free (bin_maxes);
#ifdef DEBUG
int bin_freq[NBINS+1] = {0};
//unsigned short *temp = bin_index;
i=0;
while (i < N) {
bin_freq[(int)bin_index[i++]]++;
}
printf ("i=%d\n", i);
int sum = 0;
printf("bin_freq: ");
for (i=0; i<NBINS+1; i++) {
printf ("%d, ", bin_freq[i]);
sum += bin_freq[i];
}
printf ("sum=%d\n", sum);
#endif
#ifdef TIME_DEBUG
gettimeofday (&end_t, NULL);
DC_AC_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(start_t.tv_sec*1000000 + start_t.tv_usec));
gettimeofday (&start_t, NULL);
#endif
char zfile1[640];
FILE *fp_index;
sprintf (zfile1, "bdx.bin"); //, oriFilePath);
fp_index = fopen (zfile1, "wb");
fwrite (bin_index, N, 1, fp_index);
fclose (fp_index);
#ifdef DEBUG
printf ("tot_AC_exact_count=%d\n", tot_AC_exact_count);
#ifdef USE_QTABLE
printf ("bin_index before compression = %lu\n", k*sizeof(unsigned short));
#else
printf ("bin_index before compression = %lu\n", N*sizeof(unsigned short));
#endif
#ifdef USE_TRUNCATE
printf ("DC before compression = %lu\n", nblk*sizeof(float));
printf ("AC_exact before compression = %lu\n", tot_AC_exact_count*sizeof(float));
#else
printf ("DC before compression = %lu\n", nblk*sizeof(double));
printf ("AC_exact before compression = %lu\n", tot_AC_exact_count*sizeof(double));
#endif
#endif
pthread_t thread[3];
pthread_attr_t attr; /* thread attributes (left at defaults) */
/* set defaults (not all pthread implementations default to joinable) */
pthread_attr_init (&attr);
pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_JOINABLE);
/* setup for compress */
z_stream defstream[3];
defstream[0].zalloc = Z_NULL;
defstream[0].zfree = Z_NULL;
defstream[0].opaque = Z_NULL;
/* compress bin_index */
#ifdef USE_QTABLE
uLong ucompSize_binindex = k*sizeof(unsigned short);
#else
uLong ucompSize_binindex = N*sizeof(unsigned short);
#endif
uLong compSize_binindex = compressBound (ucompSize_binindex);
int windowBits = 14;
deflateInit2 (&defstream[0], 1, Z_DEFLATED, windowBits, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY);
defstream[0].avail_in = ucompSize_binindex;
defstream[0].next_in = (Bytef *)bin_index;
defstream[0].avail_out = compSize_binindex;
defstream[0].next_out = (Bytef *)bin_indexz;
defstream[0].data_type = Z_UNKNOWN; /* Z_ASCII, Z_BINARY, Z_UNKNOWN */
if (pthread_create (&thread[0], &attr, compress_thread, (void *)&defstream[0])) {
fprintf (stderr, "Error creating thread\n");
exit (0);
}
/* compress DC */
defstream[1].zalloc = Z_NULL;
defstream[1].zfree = Z_NULL;
defstream[1].opaque = Z_NULL;
#ifdef USE_TRUNCATE
uLong ucompSize_DC = nblk*sizeof(float);
uLong compSize_DC = compressBound (ucompSize_DC);
#else
uLong ucompSize_DC = nblk*sizeof(double);
uLong compSize_DC = compressBound (ucompSize_DC);
#endif
deflateInit2 (&defstream[1], 1, Z_DEFLATED, windowBits, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY);
defstream[1].avail_in = ucompSize_DC;
defstream[1].next_in = (Bytef *)DC;
defstream[1].avail_out = compSize_DC;
defstream[1].next_out = (Bytef *)DCz;
defstream[1].data_type = Z_UNKNOWN;
if (pthread_create (&thread[1], &attr, compress_thread, (void *)&defstream[1])) {
fprintf (stderr, "Error creating thread\n");
exit (0);
}
/* compress AC_exact */
defstream[2].zalloc = Z_NULL;
defstream[2].zfree = Z_NULL;
defstream[2].opaque = Z_NULL;
#ifdef USE_TRUNCATE
uLong ucompSize_AC_exact = N*sizeof(float);
uLong compSize_AC_exact = compressBound (ucompSize_AC_exact);
#else
uLong ucompSize_AC_exact = N*sizeof(double);
uLong compSize_AC_exact = compressBound (ucompSize_AC_exact);
#endif
deflateInit2 (&defstream[2], 1, Z_DEFLATED, windowBits, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY);
defstream[2].avail_in = ucompSize_AC_exact;
defstream[2].next_in = (Bytef *)AC_exact;
defstream[2].avail_out = compSize_AC_exact;
defstream[2].next_out = (Bytef *)AC_exactz;
defstream[2].data_type = Z_UNKNOWN;
if (pthread_create (&thread[2], &attr, compress_thread, (void *)&defstream[2])) {
fprintf (stderr, "Error creating thread\n");
exit (0);
}
#ifdef USE_TRUNCATE
//uLong ucompSize_AC_exact = tot_AC_exact_count*sizeof(float);
// uLong compSize_AC_exact = compressBound(ucompSize_AC_exact);
#else
//uLong ucompSize_AC_exact = tot_AC_exact_count*sizeof(double);
// uLong compSize_AC_exact = compressBound(ucompSize_AC_exact);
#endif
void *ret;
for (i=0; i<3; i++) {
pthread_join (thread[i], &ret);
#ifdef DEBUG
printf ("thread %d joined\n", i);
#endif
switch (i) {
case 0:
compSize_binindex = (uLong)ret;
break;
case 1:
compSize_DC = (uLong)ret;
break;
case 2:
compSize_AC_exact = (uLong)ret;
break;
}
}
pthread_attr_destroy (&attr);
#if 0
compSize_binindex = defstream[0].total_out; /* update with actual size */
deflateEnd (&defstream[0]);
compSize_DC = defstream[1].total_out; /* update with actual size */
deflateEnd (&defstream[1]);
compSize_AC_exact_count = defstream[2].total_out; /* update with actual size */
deflateEnd (&defstream[2]);
#endif
bin_indexz2 = (unsigned short*)realloc (bin_indexz, compSize_binindex); /* TODO: check error */
#ifdef SIZE_DEBUG
printf ("Compressed bin_index size is: %lu\n", compSize_binindex);
#endif
DCz2 = realloc (DCz, compSize_DC); /* TODO: check error */
#ifdef SIZE_DEBUG
printf ("Compressed DC size is: %lu\n", compSize_DC);
#endif
AC_exactz2 = realloc (AC_exactz, compSize_AC_exact); /* TODO: check error */
#ifdef SIZE_DEBUG
printf ("Compressed AC_exact size is: %lu\n", compSize_AC_exact);
#endif
#ifdef TIME_DEBUG
gettimeofday (&end_t, NULL);
double comp_rate;
zlib_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(start_t.tv_sec*1000000 + start_t.tv_usec));
comp_t = (double)((end_t.tv_sec*1000000 + end_t.tv_usec)-(gstart_t.tv_sec*1000000 + gstart_t.tv_usec));
comp_rate = (N*sizeof(double)/(double)(1024*1024))/(comp_t/1000000);
printf ("sf_t=%f(s), dct_t=%f(s), zlib_t(compress)=%f(s)\n", sf_t/1000000, dct_t/1000000, zlib_t/1000000);
printf ("malloc_t=%f(s), genbin=%f(s), DC_AC_t=%f(s)\n", malloc_t/1000000, genbin_t/1000000, DC_AC_t/1000000);
printf ("comp_time = %f (s), compression rate = %f (MB/s)\n", comp_t/1000000, comp_rate);
#endif
*outSize = sizeof(struct header)+compSize_binindex+compSize_DC+compSize_AC_exact;
h.num_elements = N;
h.error_bound = error_bound;
h.tot_AC_exact_count = tot_AC_exact_count;
h.scaling_factor = SF;
h.bindex_sz_compressed = compSize_binindex;
h.DC_sz_compressed = compSize_DC;
h.AC_exact_sz_compressed = compSize_AC_exact;
#ifdef USE_QTABLE
h.bindex_count = k;
#endif
//h.AC_exact_count_sz_compressed = compSize_AC_exact_count;
char *cur_p = a_z;
memcpy (cur_p, &h, sizeof(struct header));
cur_p += sizeof(struct header);
memcpy (cur_p, bin_indexz2, compSize_binindex);
cur_p += compSize_binindex;
//memcpy (cur_p, AC_exact_countz2, compSize_AC_exact_count);
//cur_p += compSize_AC_exact_count;
memcpy (cur_p, DCz2, compSize_DC);
cur_p += compSize_DC;
memcpy (cur_p, AC_exactz2, compSize_AC_exact);
#ifdef USE_QTABLE
cur_p += compSize_AC_exact;
memcpy (cur_p, qtable, BLK_SZ*sizeof(double));
#endif /* USE_QTABLE */
free (a_x);
free (DC); free (DCz2);
free (bin_center);
//free(AC_exact_count);
//free(AC_exact_countz2);
free (AC_exact); free (AC_exactz2);
free (bin_index);
free (bin_indexz2);
#ifdef USE_QTABLE
free (qtable);
#endif
#ifndef SIZE_DEBUG
printf ("outSize = %zu\n", *outSize);
#endif
return (1);
}
|
bfecc_convection.h | // KRATOS ___ ___ _ ___ __ ___ ___ ___ ___
// / __/ _ \| \| \ \ / /__| \_ _| __| __|
// | (_| (_) | .` |\ V /___| |) | || _|| _|
// \___\___/|_|\_| \_/ |___/___|_| |_| APPLICATION
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
#if !defined(KRATOS_BFECC_CONVECTION_INCLUDED )
#define KRATOS_BFECC_CONVECTION_INCLUDED
#define PRESSURE_ON_EULERIAN_MESH
#define USE_FEW_PARTICLES
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/node.h"
#include "utilities/geometry_utilities.h"
#include "geometries/tetrahedra_3d_4.h"
#include "includes/variables.h"
#include "spatial_containers/spatial_containers.h"
#include "utilities/timer.h"
#include "utilities/binbased_fast_point_locator.h"
#include <boost/timer.hpp>
#include "utilities/timer.h"
#include "utilities/openmp_utils.h"
namespace Kratos
{
template<std::size_t TDim> class BFECCConvection
{
public:
KRATOS_CLASS_POINTER_DEFINITION(BFECCConvection<TDim>);
BFECCConvection(typename BinBasedFastPointLocator<TDim>::Pointer pSearchStructure)
: mpSearchStructure(pSearchStructure)
{
}
~BFECCConvection()
{
}
//**********************************************************************************************
//**********************************************************************************************
void BFECCconvect(ModelPart& rModelPart, const Variable< double >& rVar, const Variable<array_1d<double,3> >& conv_var, const double substeps)
{
KRATOS_TRY
const double dt = rModelPart.GetProcessInfo()[DELTA_TIME];
//do movement
Vector N(TDim + 1);
Vector N_valid(TDim + 1);
const int max_results = 10000;
typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results);
const int nparticles = rModelPart.Nodes().size();
PointerVector< Element > elem_backward( rModelPart.Nodes().size());
std::vector< Vector > Ns( rModelPart.Nodes().size());
std::vector< bool > found( rModelPart.Nodes().size());
//FIRST LOOP: estimate rVar(n+1)
#pragma omp parallel for firstprivate(results,N,N_valid)
for (int i = 0; i < nparticles; i++)
{
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
ModelPart::NodesContainerType::iterator iparticle = rModelPart.NodesBegin() + i;
Element::Pointer pelement;
Element::Pointer pelement_valid;
array_1d<double,3> bckPos = iparticle->Coordinates();
const array_1d<double,3>& vel = iparticle->FastGetSolutionStepValue(conv_var);
bool has_valid_elem_pointer = false;
bool is_found = ConvectBySubstepping(dt,bckPos,vel, N,N_valid, pelement,pelement_valid, result_begin, max_results, -1.0, substeps, conv_var, has_valid_elem_pointer);
found[i] = is_found;
if(is_found) {
//save position backwards
elem_backward(i) = pelement;
Ns[i] = N;
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
double phi1 = N[0] * ( geom[0].FastGetSolutionStepValue(rVar,1));
for (unsigned int k = 1; k < geom.size(); k++) {
phi1 += N[k] * ( geom[k].FastGetSolutionStepValue(rVar,1) );
}
iparticle->FastGetSolutionStepValue(rVar) = phi1;
}
else if(has_valid_elem_pointer)
{
//save position backwards
elem_backward(i) = pelement_valid;
Ns[i] = N_valid;
Geometry< Node < 3 > >& geom = pelement_valid->GetGeometry();
double phi1 = N[0] * ( geom[0].FastGetSolutionStepValue(rVar,1));
for (unsigned int k = 1; k < geom.size(); k++) {
phi1 += N_valid[k] * ( geom[k].FastGetSolutionStepValue(rVar,1) );
}
iparticle->FastGetSolutionStepValue(rVar) = phi1;
}
}
//now obtain the value AT TIME STEP N by taking it from N+1
#pragma omp parallel for firstprivate(results,N,N_valid)
for (int i = 0; i < nparticles; i++)
{
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
ModelPart::NodesContainerType::iterator iparticle = rModelPart.NodesBegin() + i;
Element::Pointer pelement;
Element::Pointer pelement_valid;
array_1d<double,3> fwdPos = iparticle->Coordinates();
const array_1d<double,3>& vel = iparticle->FastGetSolutionStepValue(conv_var,1);
bool has_valid_elem_pointer = false;
bool is_found = ConvectBySubstepping(dt,fwdPos,vel, N, N_valid, pelement, pelement_valid, result_begin, max_results, 1.0, substeps, conv_var,has_valid_elem_pointer);
if(is_found) {
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
double phi_old = N[0] * ( geom[0].FastGetSolutionStepValue(rVar));
for (unsigned int k = 1; k < geom.size(); k++) {
phi_old += N[k] * ( geom[k].FastGetSolutionStepValue(rVar) );
}
//store correction
iparticle->GetValue(rVar) = 1.5*iparticle->FastGetSolutionStepValue(rVar,1) - 0.5 * phi_old;
// iparticle->FastGetSolutionStepValue(rVar) = iparticle->GetValue(rVar) - 0.5 * (phi2 - iparticle->FastGetSolutionStepValue(rVar,1));
}
else
{
iparticle->GetValue(rVar) = iparticle->FastGetSolutionStepValue(rVar,1);
}
}
#pragma omp parallel for
for (int i = 0; i < nparticles; i++)
{
ModelPart::NodesContainerType::iterator iparticle = rModelPart.NodesBegin() + i;
bool is_found = found[i];
if(is_found) {
Vector N = Ns[i];
Geometry< Node < 3 > >& geom = elem_backward[i].GetGeometry();
double phi1 = N[0] * ( geom[0].GetValue(rVar));
for (unsigned int k = 1; k < geom.size(); k++) {
phi1 += N[k] * ( geom[k].GetValue(rVar) );
}
iparticle->FastGetSolutionStepValue(rVar) = phi1;
}
// else
// std::cout << "it should find it" << std::endl;
}
KRATOS_CATCH("")
}
bool ConvectBySubstepping(
const double dt,
array_1d<double,3>& position, //IT WILL BE MODIFIED
const array_1d<double,3>& initial_velocity,
Vector& N,
Vector& N_valid,
Element::Pointer& pelement,
Element::Pointer& pelement_valid,
typename BinBasedFastPointLocator<TDim>::ResultIteratorType& result_begin,
const unsigned int max_results,
const double velocity_sign,
const double subdivisions,
const Variable<array_1d<double,3> >& conv_var,
bool& has_valid_elem_pointer)
{
bool is_found = false;
array_1d<double,3> veulerian;
const double small_dt = dt/subdivisions;
if(velocity_sign > 0.0) //going from the past to the future
{
noalias(position) += small_dt*initial_velocity;
unsigned int substep=0;
while(substep++ < subdivisions)
{
is_found = mpSearchStructure->FindPointOnMesh(position, N, pelement, result_begin, max_results);
if (is_found == true)
{
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
const double new_step_factor = static_cast<double>(substep)/subdivisions;
const double old_step_factor = (1.0 - new_step_factor);
noalias(veulerian) = N[0] * ( new_step_factor*geom[0].FastGetSolutionStepValue(conv_var) + old_step_factor*geom[0].FastGetSolutionStepValue(conv_var,1));
for (unsigned int k = 1; k < geom.size(); k++)
noalias(veulerian) += N[k] * ( new_step_factor*geom[k].FastGetSolutionStepValue(conv_var) + old_step_factor*geom[k].FastGetSolutionStepValue(conv_var,1) );
noalias(position) += small_dt*veulerian;
N_valid = N;
pelement_valid = pelement;
has_valid_elem_pointer = true;
}
else
break;
}
}
else //going from the future to the past
{
noalias(position) -= small_dt*initial_velocity;
unsigned int substep=0;
while(substep++ < subdivisions)
{
is_found = mpSearchStructure->FindPointOnMesh(position, N, pelement, result_begin, max_results);
if (is_found == true)
{
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
//this factors get inverted from the other case
const double old_step_factor = static_cast<double>(substep)/subdivisions;
const double new_step_factor = (1.0 - old_step_factor);
noalias(veulerian) = N[0] * ( new_step_factor*geom[0].FastGetSolutionStepValue(conv_var) + old_step_factor*geom[0].FastGetSolutionStepValue(conv_var,1));
for (unsigned int k = 1; k < geom.size(); k++)
noalias(veulerian) += N[k] * ( new_step_factor*geom[k].FastGetSolutionStepValue(conv_var) + old_step_factor*geom[k].FastGetSolutionStepValue(conv_var,1) );
noalias(position) -= small_dt*veulerian;
N_valid = N;
pelement_valid = pelement;
has_valid_elem_pointer = true;
}
else
break;
}
}
return is_found;
}
void ResetBoundaryConditions(ModelPart& rModelPart, const Variable< double >& rVar)
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = rModelPart.NodesBegin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, rModelPart.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
if (inode->IsFixed(rVar))
{
inode->FastGetSolutionStepValue(rVar)=inode->GetSolutionStepValue(rVar,1);
}
}
}
KRATOS_CATCH("")
}
void CopyScalarVarToPreviousTimeStep(ModelPart& rModelPart, const Variable< double >& rVar)
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = rModelPart.NodesBegin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, rModelPart.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->GetSolutionStepValue(rVar,1) = inode->FastGetSolutionStepValue(rVar);
}
}
KRATOS_CATCH("")
}
private:
typename BinBasedFastPointLocator<TDim>::Pointer mpSearchStructure;
};
} // namespace Kratos.
#endif // KRATOS_BFECC_CONVECTION_INCLUDED defined
|
parallel-4.c | // { dg-do compile }
extern void bar (void);
int main (void)
{
int i;
#pragma omp parallel for nowait /* { dg-error "'nowait'" } */
for (i = 0; i < 10; i++)
bar ();
}
|
GB_binop__isgt_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isgt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_01__isgt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__isgt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__isgt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_fp64)
// A*D function (colscale): GB (_AxD__isgt_fp64)
// D*A function (rowscale): GB (_DxB__isgt_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__isgt_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__isgt_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_fp64)
// C=scalar+B GB (_bind1st__isgt_fp64)
// C=scalar+B' GB (_bind1st_tran__isgt_fp64)
// C=A+scalar GB (_bind2nd__isgt_fp64)
// C=A'+scalar GB (_bind2nd_tran__isgt_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGT || GxB_NO_FP64 || GxB_NO_ISGT_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isgt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isgt_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isgt_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isgt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isgt_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isgt_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isgt_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isgt_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isgt_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isgt_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isgt_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isgt_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__isgt_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__isgt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
reduce_weight_grads.c | int jobs = ofm * ifm * kh * kw;
int jn = jobs/gp->num_numa_nodes;
int jnv = jn/VLEN;
int jpt = (jnv % ntps == 0) ? (jnv/ntps)*VLEN : ((jnv/ntps)+1)*VLEN;
int ltid = tid - n*ntps;
int tb = (ltid * jpt < jn) ? ltid*jpt : jn;
int te = ((ltid+1)*jpt < jn) ? (ltid+1)*jpt : jn;
float *wgp = (float*)dwt_ptr[n]+n*jn;
for(int nn=0; nn<gp->num_numa_nodes; nn++)
{
if(n == nn) continue;
float *rgp = (float*)dwt_ptr[nn]+n*jn;
#pragma omp simd
for(int i=tb; i<te; i++)
wgp[i] += rgp[i];
}
#pragma omp barrier
for(int nn=0; nn<gp->num_numa_nodes; nn++)
{
if(n == nn) continue;
float *wgp = (float*)dwt_ptr[n]+nn*jn;
float *rgp = (float*)dwt_ptr[nn]+nn*jn;
#pragma vector nontemporal
#pragma omp simd
for(int i=tb; i<te; i++)
wgp[i] = rgp[i];
}
|
target_teams_distribute_simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute simd'}}
#pragma omp target teams distribute simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute simd'}}
#pragma omp target teams distribute simd foo
void test_no_clause() {
int i;
#pragma omp target teams distribute simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp target teams distribute simd' must be a for loop}}
#pragma omp target teams distribute simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp target teams distribute simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute simd' are ignored}}
#pragma omp target teams distribute simd foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute simd' are ignored}}
#pragma omp target teams distribute simd;
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute simd' are ignored}}
#pragma omp target teams distribute simd private(x);
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute simd' are ignored}}
#pragma omp target teams distribute simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target teams distribute simd collapse
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute simd collapse(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute simd collapse()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute simd collapse(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute simd collapse(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target teams distribute simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target teams distribute simd collapse 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}}
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}}
#pragma omp target teams distribute simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}}
// expected-error@+1 {{integer constant expression}}
#pragma omp target teams distribute simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{integer constant expression}}
#pragma omp target teams distribute simd collapse(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute simd collapse(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute simd collapse(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
// expected-error@+4 {{OpenMP constructs may not be nested inside a simd region}}
#pragma omp target teams distribute simd collapse(2) firstprivate(i) // expected-note {{defined as firstprivate}}
for (i = 0; i < 16; ++i) // expected-error {{loop iteration variable in the associated loop of 'omp target teams distribute simd' directive may not be firstprivate, predetermined as lastprivate}}
for (int j = 0; j < 16; ++j)
#pragma omp parallel for reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_private() {
int i;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute simd private(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute simd private(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute simd private(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute simd private()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute simd private(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target teams distribute simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute simd lastprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute simd lastprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute simd lastprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target teams distribute simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute simd firstprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute simd firstprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute simd firstprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
// expected-error@+1 {{lastprivate variable cannot be firstprivate}} expected-note@+1 {{defined as lastprivate}}
#pragma omp target teams distribute simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{lastprivate variable cannot be firstprivate}} expected-note@+1 2 {{defined as lastprivate}}
#pragma omp target teams distribute simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 3 {{lastprivate variable cannot be firstprivate}} expected-note@+1 3 {{defined as lastprivate}}
#pragma omp target teams distribute simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp target teams distribute simd simdlen(64) safelen(8)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target teams distribute simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target teams distribute simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
void test_nontemporal() {
int i;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute simd nontemporal(
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute simd nontemporal(,
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute simd nontemporal(, )
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 {{expected expression}}
#pragma omp target teams distribute simd nontemporal()
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 {{expected expression}}
#pragma omp target teams distribute simd nontemporal(int)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} omp50-error@+1 {{expected variable name}}
#pragma omp target teams distribute simd nontemporal(0)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp target teams distribute simd nontemporal(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp target teams distribute simd nontemporal(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp target teams distribute simd nontemporal(x, y, z)
for (i = 0; i < 16; ++i)
;
int x, y;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute simd nontemporal(x :)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}}
#pragma omp target teams distribute simd nontemporal(x :, )
for (i = 0; i < 16; ++i)
;
// omp50-note@+2 {{defined as nontemporal}}
// omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}}
#pragma omp target teams distribute simd nontemporal(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}}
#pragma omp target teams distribute simd private(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}}
#pragma omp target teams distribute simd nontemporal(x) private(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}}
#pragma omp target teams distribute simd nontemporal(x, y : 0)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}}
#pragma omp target teams distribute simd nontemporal(x) lastprivate(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}}
#pragma omp target teams distribute simd lastprivate(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute simd order // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp target teams distribute simd'}} expected-error {{expected '(' after 'order'}}
for (int i = 0; i < 10; ++i)
;
#pragma omp target teams distribute simd order( // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp target teams distribute simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}}
for (int i = 0; i < 10; ++i)
;
#pragma omp target teams distribute simd order(none // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp target teams distribute simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}}
for (int i = 0; i < 10; ++i)
;
#pragma omp target teams distribute simd order(concurrent // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp target teams distribute simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}}
for (int i = 0; i < 10; ++i)
;
#pragma omp target teams distribute simd order(concurrent) // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp target teams distribute simd'}}
for (int i = 0; i < 10; ++i)
;
}
|
misc.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
void zero_vector(level_type * level, int component_id){
// zero's the entire grid INCLUDING ghost zones...
uint64_t _timeStart = CycleTime();
int box;
#pragma omp parallel for private(box) OMP_THREAD_ACROSS_BOXES(level->concurrent_boxes)
for(box=0;box<level->num_my_boxes;box++){
int i,j,k;
int jStride = level->my_boxes[box].jStride;
int kStride = level->my_boxes[box].kStride;
int ghosts = level->my_boxes[box].ghosts;
int dim = level->my_boxes[box].dim;
double * __restrict__ grid = level->my_boxes[box].vectors[component_id] + ghosts*(1+jStride+kStride);
#pragma omp parallel for private(k,j,i) OMP_THREAD_WITHIN_A_BOX(level->threads_per_box)
for(k=-ghosts;k<dim+ghosts;k++){
for(j=-ghosts;j<dim+ghosts;j++){
for(i=-ghosts;i<dim+ghosts;i++){
int ijk = i + j*jStride + k*kStride;
grid[ijk] = 0.0;
}}}
}
level->cycles.blas1 += (uint64_t)(CycleTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
void initialize_valid_region(level_type * level){
uint64_t _timeStart = CycleTime();
int box;
#pragma omp parallel for private(box) OMP_THREAD_ACROSS_BOXES(level->concurrent_boxes)
for(box=0;box<level->num_my_boxes;box++){
int i,j,k;
int jStride = level->my_boxes[box].jStride;
int kStride = level->my_boxes[box].kStride;
int ghosts = level->my_boxes[box].ghosts;
int dim = level->my_boxes[box].dim;
double * __restrict__ valid = level->my_boxes[box].vectors[VECTOR_VALID] + ghosts*(1+jStride+kStride);
#pragma omp parallel for private(k,j,i) OMP_THREAD_WITHIN_A_BOX(level->threads_per_box)
for(k=-ghosts;k<dim+ghosts;k++){
for(j=-ghosts;j<dim+ghosts;j++){
for(i=-ghosts;i<dim+ghosts;i++){
int ijk = i + j*jStride + k*kStride;
valid[ijk] = 1.0; // i.e. all cells including ghosts are valid for periodic BC's
if(level->domain_boundary_condition == BC_DIRICHLET){ // cells outside the domain boundaries are not valid
if(i + level->my_boxes[box].low.i < 0)valid[ijk] = 0.0;
if(j + level->my_boxes[box].low.j < 0)valid[ijk] = 0.0;
if(k + level->my_boxes[box].low.k < 0)valid[ijk] = 0.0;
if(i + level->my_boxes[box].low.i >= level->dim.i)valid[ijk] = 0.0;
if(j + level->my_boxes[box].low.j >= level->dim.j)valid[ijk] = 0.0;
if(k + level->my_boxes[box].low.k >= level->dim.k)valid[ijk] = 0.0;
}
}}}
}
level->cycles.blas1 += (uint64_t)(CycleTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
void initialize_grid_to_scalar(level_type * level, int component_id, double scalar){
// initializes the grid to a scalar while zero'ing the ghost zones...
uint64_t _timeStart = CycleTime();
int box;
#pragma omp parallel for private(box) OMP_THREAD_ACROSS_BOXES(level->concurrent_boxes)
for(box=0;box<level->num_my_boxes;box++){
int i,j,k;
int jStride = level->my_boxes[box].jStride;
int kStride = level->my_boxes[box].kStride;
int ghosts = level->my_boxes[box].ghosts;
int dim = level->my_boxes[box].dim;
double * __restrict__ grid = level->my_boxes[box].vectors[component_id] + ghosts*(1+jStride+kStride);
#pragma omp parallel for private(k,j,i) OMP_THREAD_WITHIN_A_BOX(level->threads_per_box)
for(k=-ghosts;k<dim+ghosts;k++){
for(j=-ghosts;j<dim+ghosts;j++){
for(i=-ghosts;i<dim+ghosts;i++){
int ijk = i + j*jStride + k*kStride;
int ghostZone = (i<0) || (j<0) || (k<0) || (i>=dim) || (j>=dim) || (k>=dim);
grid[ijk] = ghostZone ? 0.0 : scalar;
}}}
}
level->cycles.blas1 += (uint64_t)(CycleTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
void add_vectors(level_type * level, int id_c, double scale_a, int id_a, double scale_b, int id_b){ // c=scale_a*id_a + scale_b*id_b
uint64_t _timeStart = CycleTime();
int box;
#pragma omp parallel for private(box) OMP_THREAD_ACROSS_BOXES(level->concurrent_boxes)
for(box=0;box<level->num_my_boxes;box++){
int i,j,k;
int jStride = level->my_boxes[box].jStride;
int kStride = level->my_boxes[box].kStride;
int ghosts = level->my_boxes[box].ghosts;
int dim = level->my_boxes[box].dim;
double * __restrict__ grid_c = level->my_boxes[box].vectors[id_c] + ghosts*(1+jStride+kStride);
double * __restrict__ grid_a = level->my_boxes[box].vectors[id_a] + ghosts*(1+jStride+kStride);
double * __restrict__ grid_b = level->my_boxes[box].vectors[id_b] + ghosts*(1+jStride+kStride);
#pragma omp parallel for private(k,j,i) OMP_THREAD_WITHIN_A_BOX(level->threads_per_box)
for(k=0;k<dim;k++){
for(j=0;j<dim;j++){
for(i=0;i<dim;i++){
int ijk = i + j*jStride + k*kStride;
grid_c[ijk] = scale_a*grid_a[ijk] + scale_b*grid_b[ijk];
}}}
}
level->cycles.blas1 += (uint64_t)(CycleTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
void mul_vectors(level_type * level, int id_c, double scale, int id_a, int id_b){ // id_c=scale*id_a*id_b
uint64_t _timeStart = CycleTime();
int box;
#pragma omp parallel for private(box) OMP_THREAD_ACROSS_BOXES(level->concurrent_boxes)
for(box=0;box<level->num_my_boxes;box++){
int i,j,k;
int jStride = level->my_boxes[box].jStride;
int kStride = level->my_boxes[box].kStride;
int ghosts = level->my_boxes[box].ghosts;
int dim = level->my_boxes[box].dim;
double * __restrict__ grid_c = level->my_boxes[box].vectors[id_c] + ghosts*(1+jStride+kStride);
double * __restrict__ grid_a = level->my_boxes[box].vectors[id_a] + ghosts*(1+jStride+kStride);
double * __restrict__ grid_b = level->my_boxes[box].vectors[id_b] + ghosts*(1+jStride+kStride);
#pragma omp parallel for private(k,j,i) OMP_THREAD_WITHIN_A_BOX(level->threads_per_box)
for(k=0;k<dim;k++){
for(j=0;j<dim;j++){
for(i=0;i<dim;i++){
int ijk = i + j*jStride + k*kStride;
grid_c[ijk] = scale*grid_a[ijk]*grid_b[ijk];
}}}
}
level->cycles.blas1 += (uint64_t)(CycleTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
void invert_vector(level_type * level, int id_c, double scale_a, int id_a){ // c[]=scale_a/a[]
uint64_t _timeStart = CycleTime();
int box;
#pragma omp parallel for private(box) OMP_THREAD_ACROSS_BOXES(level->concurrent_boxes)
for(box=0;box<level->num_my_boxes;box++){
int i,j,k;
int jStride = level->my_boxes[box].jStride;
int kStride = level->my_boxes[box].kStride;
int ghosts = level->my_boxes[box].ghosts;
int dim = level->my_boxes[box].dim;
double * __restrict__ grid_c = level->my_boxes[box].vectors[id_c] + ghosts*(1+jStride+kStride);
double * __restrict__ grid_a = level->my_boxes[box].vectors[id_a] + ghosts*(1+jStride+kStride);
#pragma omp parallel for private(k,j,i) OMP_THREAD_WITHIN_A_BOX(level->threads_per_box)
for(k=0;k<dim;k++){
for(j=0;j<dim;j++){
for(i=0;i<dim;i++){
int ijk = i + j*jStride + k*kStride;
grid_c[ijk] = scale_a/grid_a[ijk];
}}}
}
level->cycles.blas1 += (uint64_t)(CycleTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
void scale_vector(level_type * level, int id_c, double scale_a, int id_a){ // c[]=scale_a*a[]
uint64_t _timeStart = CycleTime();
int box;
#pragma omp parallel for private(box) OMP_THREAD_ACROSS_BOXES(level->concurrent_boxes)
for(box=0;box<level->num_my_boxes;box++){
int i,j,k;
int jStride = level->my_boxes[box].jStride;
int kStride = level->my_boxes[box].kStride;
int ghosts = level->my_boxes[box].ghosts;
int dim = level->my_boxes[box].dim;
double * __restrict__ grid_c = level->my_boxes[box].vectors[id_c] + ghosts*(1+jStride+kStride);
double * __restrict__ grid_a = level->my_boxes[box].vectors[id_a] + ghosts*(1+jStride+kStride);
#pragma omp parallel for private(k,j,i) OMP_THREAD_WITHIN_A_BOX(level->threads_per_box)
for(k=0;k<dim;k++){
for(j=0;j<dim;j++){
for(i=0;i<dim;i++){
int ijk = i + j*jStride + k*kStride;
grid_c[ijk] = scale_a*grid_a[ijk];
}}}
}
level->cycles.blas1 += (uint64_t)(CycleTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
double dot(level_type * level, int id_a, int id_b){
uint64_t _timeStart = CycleTime();
int box;
double a_dot_b_level = 0.0;
// FIX, schedule(static) is a stand in to guarantee reproducibility...
#pragma omp parallel for private(box) OMP_THREAD_ACROSS_BOXES(level->concurrent_boxes) reduction(+:a_dot_b_level) schedule(static)
for(box=0;box<level->num_my_boxes;box++){
int i,j,k;
int jStride = level->my_boxes[box].jStride;
int kStride = level->my_boxes[box].kStride;
int ghosts = level->my_boxes[box].ghosts;
int dim = level->my_boxes[box].dim;
double * __restrict__ grid_a = level->my_boxes[box].vectors[id_a] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point
double * __restrict__ grid_b = level->my_boxes[box].vectors[id_b] + ghosts*(1+jStride+kStride);
double a_dot_b_box = 0.0;
#pragma omp parallel for private(k,j,i) OMP_THREAD_WITHIN_A_BOX(level->threads_per_box) reduction(+:a_dot_b_box) schedule(static)
for(k=0;k<dim;k++){
for(j=0;j<dim;j++){
for(i=0;i<dim;i++){
int ijk = i + j*jStride + k*kStride;
a_dot_b_box += grid_a[ijk]*grid_b[ijk];
}}}
a_dot_b_level+=a_dot_b_box;
}
level->cycles.blas1 += (uint64_t)(CycleTime()-_timeStart);
#ifdef USE_MPI
uint64_t _timeStartAllReduce = CycleTime();
double send = a_dot_b_level;
MPI_Allreduce(&send,&a_dot_b_level,1,MPI_DOUBLE,MPI_SUM,level->MPI_COMM_ALLREDUCE);
uint64_t _timeEndAllReduce = CycleTime();
level->cycles.collectives += (uint64_t)(_timeEndAllReduce-_timeStartAllReduce);
#endif
return(a_dot_b_level);
}
//------------------------------------------------------------------------------------------------------------------------------
double norm(level_type * level, int component_id){ // implements the max norm
uint64_t _timeStart = CycleTime();
int box;
double max_norm = 0.0;
// FIX, schedule(static) is a stand in to guarantee reproducibility...
#pragma omp parallel for private(box) OMP_THREAD_ACROSS_BOXES(level->concurrent_boxes) reduction(max:max_norm) schedule(static)
for(box=0;box<level->num_my_boxes;box++){
int i,j,k;
int jStride = level->my_boxes[box].jStride;
int kStride = level->my_boxes[box].kStride;
int ghosts = level->my_boxes[box].ghosts;
int dim = level->my_boxes[box].dim;
double * __restrict__ grid = level->my_boxes[box].vectors[component_id] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point
double box_norm = 0.0;
#pragma omp parallel for private(k,j,i) OMP_THREAD_WITHIN_A_BOX(level->threads_per_box) reduction(max:box_norm) schedule(static)
for(k=0;k<dim;k++){
for(j=0;j<dim;j++){
for(i=0;i<dim;i++){
int ijk = i + j*jStride + k*kStride;
double fabs_grid_ijk = fabs(grid[ijk]);
if(fabs_grid_ijk>box_norm){box_norm=fabs_grid_ijk;} // max norm
}}}
if(box_norm>max_norm){max_norm = box_norm;}
} // box list
level->cycles.blas1 += (uint64_t)(CycleTime()-_timeStart);
#ifdef USE_MPI
uint64_t _timeStartAllReduce = CycleTime();
double send = max_norm;
MPI_Allreduce(&send,&max_norm,1,MPI_DOUBLE,MPI_MAX,level->MPI_COMM_ALLREDUCE);
uint64_t _timeEndAllReduce = CycleTime();
level->cycles.collectives += (uint64_t)(_timeEndAllReduce-_timeStartAllReduce);
#endif
return(max_norm);
}
//------------------------------------------------------------------------------------------------------------------------------
double mean(level_type * level, int id_a){
uint64_t _timeStart = CycleTime();
int box;
double sum_level = 0.0;
#pragma omp parallel for private(box) OMP_THREAD_ACROSS_BOXES(level->concurrent_boxes) reduction(+:sum_level) schedule(static)
for(box=0;box<level->num_my_boxes;box++){
int i,j,k;
int jStride = level->my_boxes[box].jStride;
int kStride = level->my_boxes[box].kStride;
int ghosts = level->my_boxes[box].ghosts;
int dim = level->my_boxes[box].dim;
double * __restrict__ grid_a = level->my_boxes[box].vectors[id_a] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point
double sum_box = 0.0;
#pragma omp parallel for private(k,j,i) OMP_THREAD_WITHIN_A_BOX(level->threads_per_box) reduction(+:sum_box) schedule(static)
for(k=0;k<dim;k++){
for(j=0;j<dim;j++){
for(i=0;i<dim;i++){
int ijk = i + j*jStride + k*kStride;
sum_box += grid_a[ijk];
}}}
sum_level+=sum_box;
}
level->cycles.blas1 += (uint64_t)(CycleTime()-_timeStart);
double ncells_level = (double)level->dim.i*(double)level->dim.j*(double)level->dim.k;
#ifdef USE_MPI
uint64_t _timeStartAllReduce = CycleTime();
double send = sum_level;
MPI_Allreduce(&send,&sum_level,1,MPI_DOUBLE,MPI_SUM,level->MPI_COMM_ALLREDUCE);
uint64_t _timeEndAllReduce = CycleTime();
level->cycles.collectives += (uint64_t)(_timeEndAllReduce-_timeStartAllReduce);
#endif
double mean_level = sum_level / ncells_level;
return(mean_level);
}
void shift_vector(level_type * level, int id_c, int id_a, double shift_a){
uint64_t _timeStart = CycleTime();
int box;
#pragma omp parallel for private(box) OMP_THREAD_ACROSS_BOXES(level->concurrent_boxes)
for(box=0;box<level->num_my_boxes;box++){
int i,j,k;
int jStride = level->my_boxes[box].jStride;
int kStride = level->my_boxes[box].kStride;
int ghosts = level->my_boxes[box].ghosts;
int dim = level->my_boxes[box].dim;
double * __restrict__ grid_c = level->my_boxes[box].vectors[id_c] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point
double * __restrict__ grid_a = level->my_boxes[box].vectors[id_a] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point
#pragma omp parallel for private(k,j,i) OMP_THREAD_WITHIN_A_BOX(level->threads_per_box)
for(k=0;k<dim;k++){
for(j=0;j<dim;j++){
for(i=0;i<dim;i++){
int ijk = i + j*jStride + k*kStride;
grid_c[ijk] = grid_a[ijk] + shift_a;
}}}
}
level->cycles.blas1 += (uint64_t)(CycleTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
void project_cell_to_face(level_type * level, int id_cell, int id_face, int dir){
uint64_t _timeStart = CycleTime();
int box;
#pragma omp parallel for private(box) OMP_THREAD_ACROSS_BOXES(level->concurrent_boxes)
for(box=0;box<level->num_my_boxes;box++){
int i,j,k;
int jStride = level->my_boxes[box].jStride;
int kStride = level->my_boxes[box].kStride;
int ghosts = level->my_boxes[box].ghosts;
int dim = level->my_boxes[box].dim;
double * __restrict__ grid_cell = level->my_boxes[box].vectors[id_cell] + ghosts*(1+jStride+kStride);
double * __restrict__ grid_face = level->my_boxes[box].vectors[id_face] + ghosts*(1+jStride+kStride);
int stride;
switch(dir){
case 0: stride = 1;break;//i-direction
case 1: stride = jStride;break;//j-direction
case 2: stride = kStride;break;//k-direction
}
#pragma omp parallel for private(k,j,i) OMP_THREAD_WITHIN_A_BOX(level->threads_per_box)
for(k=0;k<=dim;k++){ // <= to ensure you do low and high faces
for(j=0;j<=dim;j++){
for(i=0;i<=dim;i++){
int ijk = i + j*jStride + k*kStride;
grid_face[ijk] = 0.5*(grid_cell[ijk-stride] + grid_cell[ijk]); // simple linear interpolation
}}}
}
level->cycles.blas1 += (uint64_t)(CycleTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
double error(level_type * level, int id_a, int id_b){
double h3 = level->h * level->h * level->h;
add_vectors(level,VECTOR_TEMP,1.0,id_a,-1.0,id_b); // VECTOR_TEMP = id_a - id_b
double max = norm(level,VECTOR_TEMP); return(max); // max norm of error function
double L2 = sqrt( dot(level,VECTOR_TEMP,VECTOR_TEMP)*h3);return( L2); // normalized L2 error ?
}
|
im2col.c | void im2col(double *img, double *col, int width, int height, int channels,
int kernel_w, int kernel_h, int pad_w, int pad_h, int stride_w, int stride_h)
{
int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1;
int channels_col = channels * kernel_h * kernel_w;
// This makes performance much worse
// #pragma omp parallel for
for (int c = 0; c < channels_col; ++c) {
int w_offset = c % kernel_w;
int h_offset = (c / kernel_w) % kernel_h;
int c_im = c / (kernel_h * kernel_w);
for (int h = 0; h < height_col; ++h) {
for (int w = 0; w < width_col; ++w) {
int h_pad = h*stride_h - pad_h + h_offset;
int w_pad = w*stride_w - pad_w + w_offset;
if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width) {
col[(c*height_col+h) * width_col + w] =
img[(c_im * height + h_pad) * width + w_pad];
} else {
col[(c*height_col+h) * width_col + w] = 0;
}
}
}
}
}
|
JeeIOrbitalSoA.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//
// File created by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//////////////////////////////////////////////////////////////////////////////////////
#ifndef QMCPLUSPLUS_EEIJASTROW_OPTIMIZED_SOA_H
#define QMCPLUSPLUS_EEIJASTROW_OPTIMIZED_SOA_H
#include "Configuration.h"
#if QMC_BUILD_LEVEL<5
#include "QMCWaveFunctions/WaveFunctionComponent.h"
#endif
#include "Particle/DistanceTableData.h"
#include <simd/allocator.hpp>
#include <simd/algorithm.hpp>
#include <map>
#include <numeric>
namespace qmcplusplus
{
/** @ingroup WaveFunctionComponent
* @brief Specialization for three-body Jastrow function using multiple functors
*
*Each pair-type can have distinct function \f$u(r_{ij})\f$.
*For electrons, distinct pair correlation functions are used
*for spins up-up/down-down and up-down/down-up.
*/
template<class FT>
class JeeIOrbitalSoA: public WaveFunctionComponent
{
///type of each component U, dU, d2U;
using valT=typename FT::real_type;
///element position type
using posT=TinyVector<valT,OHMMS_DIM>;
///use the same container
using RowContainer=DistanceTableData::RowContainer;
///table index for i-el, el-el is always zero
int myTableID;
//nuber of particles
int Nelec, Nion;
///number of particles + padded
size_t Nelec_padded;
//number of groups of the target particleset
int eGroups, iGroups;
///reference to the sources (ions)
const ParticleSet& Ions;
///diff value
RealType DiffVal;
///\f$Uat[i] = sum_(j) u_{i,j}\f$
Vector<valT> Uat,oldUk,newUk;
///\f$dUat[i] = sum_(j) du_{i,j}\f$
using gContainer_type=VectorSoaContainer<valT,OHMMS_DIM>;
gContainer_type dUat,olddUk,newdUk;
///\f$d2Uat[i] = sum_(j) d2u_{i,j}\f$
Vector<valT> d2Uat,oldd2Uk,newd2Uk;
/// current values during PbyP
valT cur_Uat,cur_d2Uat;
posT cur_dUat, dUat_temp;
///container for the Jastrow functions
Array<FT*,3> F;
std::map<std::string,FT*> J3Unique;
//YYYY
std::map<FT*,int> J3UniqueIndex;
/// the cutoff for e-I pairs
std::vector<valT> Ion_cutoff;
/// the electrons around ions within the cutoff radius, grouped by species
Array<std::vector<int>,2> elecs_inside;
Array<std::vector<valT>,2> elecs_inside_dist;
Array<std::vector<posT>,2> elecs_inside_displ;
/// the ids of ions within the cutoff radius of an electron on which a move is proposed
std::vector<int> ions_nearby_old, ions_nearby_new;
/// work buffer size
size_t Nbuffer;
/// compressed distances
aligned_vector<valT> Distjk_Compressed, DistkI_Compressed, DistjI_Compressed;
std::vector<int> DistIndice_k;
/// compressed displacements
gContainer_type Disp_jk_Compressed, Disp_jI_Compressed, Disp_kI_Compressed;
/// work result buffer
VectorSoaContainer<valT,9> mVGL;
// Used for evaluating derivatives with respect to the parameters
int NumVars;
Array<std::pair<int,int>,3> VarOffset;
Vector<RealType> dLogPsi;
Array<PosType,2> gradLogPsi;
Array<RealType,2> lapLogPsi;
// Temporary store for parameter derivatives of functor
// The first index is the functor index in J3Unique. The second is the parameter index w.r.t. to that
// functor
std::vector<std::vector<RealType> > du_dalpha;
std::vector<std::vector<PosType> > dgrad_dalpha;
std::vector<std::vector<Tensor<RealType,3> > > dhess_dalpha;
public:
///alias FuncType
using FuncType=FT;
JeeIOrbitalSoA(const ParticleSet& ions, ParticleSet& elecs, bool is_master=false)
: Ions(ions), NumVars(0)
{
ClassName = "JeeIOrbitalSoA";
myTableID=elecs.addTable(Ions,DT_SOA);
elecs.DistTables[myTableID]->Need_full_table_loadWalker=true;
init(elecs);
}
~JeeIOrbitalSoA() { }
WaveFunctionComponentPtr makeClone(ParticleSet& elecs) const
{
JeeIOrbitalSoA<FT>* eeIcopy= new JeeIOrbitalSoA<FT>(Ions, elecs, false);
std::map<const FT*,FT*> fcmap;
for (int iG=0; iG<iGroups; iG++)
for (int eG1=0; eG1<eGroups; eG1++)
for (int eG2=0; eG2<eGroups; eG2++)
{
if(F(iG,eG1,eG2)==0)
continue;
typename std::map<const FT*,FT*>::iterator fit=fcmap.find(F(iG,eG1,eG2));
if(fit == fcmap.end())
{
FT* fc=new FT(*F(iG,eG1,eG2));
eeIcopy->addFunc(iG, eG1, eG2, fc);
fcmap[F(iG,eG1,eG2)]=fc;
}
}
// Ye: I don't like the following memory allocated by default.
eeIcopy->myVars.clear();
eeIcopy->myVars.insertFrom(myVars);
eeIcopy->NumVars=NumVars;
eeIcopy->dLogPsi.resize(NumVars);
eeIcopy->gradLogPsi.resize(NumVars,Nelec);
eeIcopy->lapLogPsi.resize(NumVars,Nelec);
eeIcopy->VarOffset=VarOffset;
eeIcopy->Optimizable = Optimizable;
return eeIcopy;
}
void init(ParticleSet& p)
{
Nelec=p.getTotalNum();
Nelec_padded=getAlignedSize<valT>(Nelec);
Nion = Ions.getTotalNum();
iGroups=Ions.getSpeciesSet().getTotalNum();
eGroups=p.groups();
Uat.resize(Nelec);
dUat.resize(Nelec);
d2Uat.resize(Nelec);
oldUk.resize(Nelec);
olddUk.resize(Nelec);
oldd2Uk.resize(Nelec);
newUk.resize(Nelec);
newdUk.resize(Nelec);
newd2Uk.resize(Nelec);
F.resize(iGroups,eGroups,eGroups);
F=nullptr;
elecs_inside.resize(eGroups,Nion);
elecs_inside_dist.resize(eGroups,Nion);
elecs_inside_displ.resize(eGroups,Nion);
ions_nearby_old.resize(Nion);
ions_nearby_new.resize(Nion);
Ion_cutoff.resize(Nion, 0.0);
//initialize buffers
Nbuffer=Nelec;
mVGL.resize(Nbuffer);
Distjk_Compressed.resize(Nbuffer);
DistjI_Compressed.resize(Nbuffer);
DistkI_Compressed.resize(Nbuffer);
Disp_jk_Compressed.resize(Nbuffer);
Disp_jI_Compressed.resize(Nbuffer);
Disp_kI_Compressed.resize(Nbuffer);
DistIndice_k.resize(Nbuffer);
}
void initUnique()
{
typename std::map<std::string,FT*>::iterator it(J3Unique.begin()),it_end(J3Unique.end());
du_dalpha.resize(J3Unique.size());
dgrad_dalpha.resize(J3Unique.size());
dhess_dalpha.resize(J3Unique.size());
int ifunc=0;
while(it != it_end)
{
J3UniqueIndex[it->second]=ifunc;
FT &functor = *(it->second);
int numParams = functor.getNumParameters();
du_dalpha[ifunc].resize(numParams);
dgrad_dalpha[ifunc].resize(numParams);
dhess_dalpha[ifunc].resize(numParams);
++it;
ifunc++;
}
}
void addFunc(int iSpecies, int eSpecies1, int eSpecies2, FT* j)
{
if(eSpecies1==eSpecies2)
{
//if only up-up is specified, assume spin-unpolarized correlations
if(eSpecies1==0)
for (int eG1=0; eG1<eGroups; eG1++)
for (int eG2=0; eG2<eGroups; eG2++)
{
if(F(iSpecies,eG1,eG2)==0)
F(iSpecies,eG1,eG2)=j;
}
}
else
{
F(iSpecies,eSpecies1,eSpecies2) = j;
F(iSpecies,eSpecies2,eSpecies1) = j;
}
if(j)
{
RealType rcut = 0.5 * j->cutoff_radius;
for (int i=0; i<Nion; i++)
if (Ions.GroupID[i] == iSpecies)
Ion_cutoff[i] = rcut;
}
else
{
APP_ABORT("JeeIOrbitalSoA::addFunc Jastrow function pointer is NULL");
}
std::stringstream aname;
aname << iSpecies << "_" << eSpecies1 << "_" << eSpecies2;
J3Unique[aname.str()]=j;
initUnique();
}
/** check that correlation information is complete
*/
void check_complete()
{
//check that correlation pointers are either all 0 or all assigned
bool complete = true;
for(int i=0; i<iGroups; ++i)
{
int nfilled = 0;
bool partial;
for(int e1=0; e1<eGroups; ++e1)
for(int e2=0; e2<eGroups; ++e2)
if(F(i,e1,e2)!=0)
nfilled++;
partial = nfilled>0 && nfilled<eGroups*eGroups;
if(partial)
app_log() << "J3 eeI is missing correlation for ion "<<i<< std::endl;
complete = complete && !partial;
}
if(!complete)
{
APP_ABORT("JeeIOrbitalSoA::check_complete J3 eeI is missing correlation components\n see preceding messages for details");
}
//first set radii
for(int i=0; i<Nion; ++i)
{
FT* f = F(Ions.GroupID[i],0,0);
if(f!=0)
Ion_cutoff[i] = .5*f->cutoff_radius;
}
//then check radii
bool all_radii_match = true;
for(int i=0; i<iGroups; ++i)
{
if(F(i,0,0)!=0)
{
bool radii_match = true;
RealType rcut = F(i,0,0)->cutoff_radius;
for(int e1=0; e1<eGroups; ++e1)
for(int e2=0; e2<eGroups; ++e2)
radii_match = radii_match && F(i,e1,e2)->cutoff_radius==rcut;
if(!radii_match)
app_log() << "eeI functors for ion species " << i
<< " have different radii"<< std::endl;
all_radii_match = all_radii_match && radii_match;
}
}
if(!all_radii_match)
{
APP_ABORT("JeeIOrbitalSoA::check_radii J3 eeI are inconsistent for some ion species\n see preceding messages for details");
}
}
//evaluate the distance table with els
void resetTargetParticleSet(ParticleSet& P) {}
/** check in an optimizable parameter
* @param o a super set of optimizable variables
*/
void checkInVariables(opt_variables_type& active)
{
myVars.clear();
typename std::map<std::string,FT*>::iterator it(J3Unique.begin()),it_end(J3Unique.end());
while(it != it_end)
{
(*it).second->checkInVariables(active);
(*it).second->checkInVariables(myVars);
++it;
}
}
/** check out optimizable variables
*/
void checkOutVariables(const opt_variables_type& active)
{
myVars.clear();
typename std::map<std::string,FT*>::iterator it(J3Unique.begin()),it_end(J3Unique.end());
while (it != it_end)
{
(*it).second->myVars.getIndex(active);
myVars.insertFrom((*it).second->myVars);
++it;
}
myVars.getIndex(active);
NumVars=myVars.size();
if (NumVars)
{
dLogPsi.resize(NumVars);
gradLogPsi.resize(NumVars,Nelec);
lapLogPsi.resize(NumVars,Nelec);
VarOffset.resize(iGroups, eGroups, eGroups);
int varoffset=myVars.Index[0];
for (int ig=0; ig<iGroups; ig++)
for (int jg=0; jg<eGroups; jg++)
for (int kg=0; kg<eGroups; kg++)
{
FT *func_ijk = F(ig, jg, kg);
if(func_ijk==nullptr) continue;
VarOffset(ig,jg,kg).first = func_ijk->myVars.Index.front()-varoffset;
VarOffset(ig,jg,kg).second = func_ijk->myVars.Index.size()+VarOffset(ig,jg,kg).first;
}
}
}
///reset the value of all the unique Two-Body Jastrow functions
void resetParameters(const opt_variables_type& active)
{
if(!Optimizable)
return;
typename std::map<std::string,FT*>::iterator it(J3Unique.begin()),it_end(J3Unique.end());
while(it != it_end)
{
(*it++).second->resetParameters(active);
}
for(int i=0; i<myVars.size(); ++i)
{
int ii=myVars.Index[i];
if(ii>=0)
myVars[i]= active[ii];
}
}
/** print the state, e.g., optimizables */
void reportStatus(std::ostream& os)
{
typename std::map<std::string,FT*>::iterator it(J3Unique.begin()),it_end(J3Unique.end());
while(it != it_end)
{
(*it).second->myVars.print(os);
++it;
}
}
void build_compact_list(ParticleSet& P)
{
const DistanceTableData& eI_table=(*P.DistTables[myTableID]);
for(int iat=0; iat<Nion; ++iat)
for(int jg=0; jg<eGroups; ++jg)
{
elecs_inside(jg,iat).clear();
elecs_inside_dist(jg,iat).clear();
elecs_inside_displ(jg,iat).clear();
}
for(int jg=0; jg<eGroups; ++jg)
for(int jel=P.first(jg); jel<P.last(jg); jel++)
for(int iat=0; iat<Nion; ++iat)
if(eI_table.Distances[jel][iat]<Ion_cutoff[iat])
{
elecs_inside(jg,iat).push_back(jel);
elecs_inside_dist(jg,iat).push_back(eI_table.Distances[jel][iat]);
elecs_inside_displ(jg,iat).push_back(eI_table.Displacements[jel][iat]);
}
}
RealType evaluateLog(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L)
{
evaluateGL(P,G,L,true);
return LogValue;
}
ValueType ratio(ParticleSet& P, int iat)
{
UpdateMode=ORB_PBYP_RATIO;
const DistanceTableData& eI_table=(*P.DistTables[myTableID]);
const DistanceTableData& ee_table=(*P.DistTables[0]);
cur_Uat=computeU(P, iat, P.GroupID[iat], eI_table.Temp_r.data(), ee_table.Temp_r.data(), ions_nearby_new);
DiffVal=Uat[iat]-cur_Uat;
return std::exp(DiffVal);
}
void evaluateRatios(VirtualParticleSet& VP, std::vector<ValueType>& ratios)
{
for(int k=0; k<ratios.size(); ++k)
ratios[k]=std::exp(Uat[VP.refPtcl] -
computeU(VP.refPS, VP.refPtcl, VP.refPS.GroupID[VP.refPtcl],
VP.DistTables[myTableID]->Distances[k],
VP.DistTables[0]->Distances[k], ions_nearby_old));
}
void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios)
{
const DistanceTableData* d_table=P.DistTables[0];
const DistanceTableData& eI_table=(*P.DistTables[myTableID]);
const DistanceTableData& ee_table=(*P.DistTables[0]);
for(int jg=0; jg<eGroups; ++jg)
{
const valT sumU=computeU(P, -1, jg, eI_table.Temp_r.data(), ee_table.Temp_r.data(), ions_nearby_new);
for(int j=P.first(jg); j<P.last(jg); ++j)
{
// remove self-interaction
valT Uself(0);
for(int iat=0; iat<Nion; ++iat)
{
const valT &r_Ij = eI_table.Temp_r[iat];
const valT &r_Ik = eI_table.Distances[j][iat];
if(r_Ij<Ion_cutoff[iat]&&r_Ik<Ion_cutoff[iat])
{
const int ig=Ions.GroupID[iat];
Uself+=F(ig,jg,jg)->evaluate(ee_table.Temp_r[j],r_Ij,r_Ik);
}
}
ratios[j]=std::exp(Uat[j]+Uself-sumU);
}
}
}
GradType evalGrad(ParticleSet& P, int iat)
{
return GradType(dUat[iat]);
}
ValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat)
{
UpdateMode=ORB_PBYP_PARTIAL;
const DistanceTableData& eI_table=(*P.DistTables[myTableID]);
const DistanceTableData& ee_table=(*P.DistTables[0]);
computeU3(P, iat, eI_table.Temp_r.data(), eI_table.Temp_dr, ee_table.Temp_r.data(), ee_table.Temp_dr,
cur_Uat, cur_dUat, cur_d2Uat, newUk, newdUk, newd2Uk, ions_nearby_new);
DiffVal=Uat[iat]-cur_Uat;
grad_iat+=cur_dUat;
return std::exp(DiffVal);
}
inline void restore(int iat) {}
void acceptMove(ParticleSet& P, int iat)
{
const DistanceTableData& eI_table=(*P.DistTables[myTableID]);
const DistanceTableData& ee_table=(*P.DistTables[0]);
// get the old value, grad, lapl
computeU3(P, iat, eI_table.Distances[iat], eI_table.Displacements[iat], ee_table.Distances[iat], ee_table.Displacements[iat],
Uat[iat], dUat_temp, d2Uat[iat], oldUk, olddUk, oldd2Uk, ions_nearby_old);
if(UpdateMode == ORB_PBYP_RATIO)
{//ratio-only during the move; need to compute derivatives
computeU3(P, iat, eI_table.Temp_r.data(), eI_table.Temp_dr, ee_table.Temp_r.data(), ee_table.Temp_dr,
cur_Uat, cur_dUat, cur_d2Uat, newUk, newdUk, newd2Uk, ions_nearby_new);
}
#pragma omp simd
for(int jel=0; jel<Nelec; jel++)
{
Uat[jel] += newUk[jel]-oldUk[jel];
d2Uat[jel] += newd2Uk[jel]-oldd2Uk[jel];
}
for(int idim=0; idim<OHMMS_DIM; ++idim)
{
valT* restrict save_g=dUat.data(idim);
const valT* restrict new_g=newdUk.data(idim);
const valT* restrict old_g=olddUk.data(idim);
#pragma omp simd aligned(save_g,new_g,old_g)
for(int jel=0; jel<Nelec; jel++)
save_g[jel]+=new_g[jel]-old_g[jel];
}
LogValue += Uat[iat]-cur_Uat;
Uat[iat] = cur_Uat;
dUat(iat) = cur_dUat;
d2Uat[iat] = cur_d2Uat;
const int ig = P.GroupID[iat];
// update compact list elecs_inside
// if the old position exists in elecs_inside
for (int iind=0; iind<ions_nearby_old.size(); iind++)
{
int jat=ions_nearby_old[iind];
auto iter = find(elecs_inside(ig,jat).begin(), elecs_inside(ig,jat).end(), iat);
auto iter_dist = elecs_inside_dist(ig,jat).begin()+std::distance(elecs_inside(ig,jat).begin(),iter);
auto iter_displ = elecs_inside_displ(ig,jat).begin()+std::distance(elecs_inside(ig,jat).begin(),iter);
if(eI_table.Temp_r[jat] < Ion_cutoff[jat]) // the new position is still inside
{
*iter_dist = eI_table.Temp_r[jat];
*iter_displ = eI_table.Temp_dr[jat];
*std::find(ions_nearby_new.begin(), ions_nearby_new.end(), jat) = -1;
}
else
{
*iter = elecs_inside(ig,jat).back();
elecs_inside(ig,jat).pop_back();
*iter_dist = elecs_inside_dist(ig,jat).back();
elecs_inside_dist(ig,jat).pop_back();
*iter_displ = elecs_inside_displ(ig,jat).back();
elecs_inside_displ(ig,jat).pop_back();
}
}
// if the old position doesn't exist in elecs_inside but the new position do
for (int iind=0; iind<ions_nearby_new.size(); iind++)
{
int jat=ions_nearby_new[iind];
if(jat>=0)
{
elecs_inside(ig,jat).push_back(iat);
elecs_inside_dist(ig,jat).push_back(eI_table.Temp_r[jat]);
elecs_inside_displ(ig,jat).push_back(eI_table.Temp_dr[jat]);
}
}
}
inline void recompute(ParticleSet& P)
{
const DistanceTableData& eI_table=(*P.DistTables[myTableID]);
const DistanceTableData& ee_table=(*P.DistTables[0]);
build_compact_list(P);
for(int jel=0; jel<Nelec; ++jel)
{
computeU3(P, jel, eI_table.Distances[jel], eI_table.Displacements[jel], ee_table.Distances[jel], ee_table.Displacements[jel],
Uat[jel], dUat_temp, d2Uat[jel], newUk, newdUk, newd2Uk, ions_nearby_new, true);
dUat(jel) = dUat_temp;
// add the contribution from the upper triangle
#pragma omp simd
for(int kel=0; kel<jel; kel++)
{
Uat[kel] += newUk[kel];
d2Uat[kel] += newd2Uk[kel];
}
for(int idim=0; idim<OHMMS_DIM; ++idim)
{
valT* restrict save_g=dUat.data(idim);
const valT* restrict new_g=newdUk.data(idim);
#pragma omp simd aligned(save_g,new_g)
for(int kel=0; kel<jel; kel++)
save_g[kel]+=new_g[kel];
}
}
}
inline valT computeU(const ParticleSet& P, int jel, int jg,
const RealType* distjI, const RealType* distjk,
std::vector<int>& ions_nearby)
{
const DistanceTableData& eI_table=(*P.DistTables[myTableID]);
ions_nearby.clear();
for(int iat=0; iat<Nion; ++iat)
if(distjI[iat]<Ion_cutoff[iat])
ions_nearby.push_back(iat);
valT Uj = valT(0);
for(int kg=0; kg<eGroups; ++kg)
{
int kel_counter = 0;
for(int iind=0; iind<ions_nearby.size(); ++iind)
{
const int iat = ions_nearby[iind];
const int ig = Ions.GroupID[iat];
const valT r_jI = distjI[iat];
for(int kind=0; kind<elecs_inside(kg,iat).size(); kind++)
{
const int kel=elecs_inside(kg,iat)[kind];
if(kel!=jel)
{
DistkI_Compressed[kel_counter]=elecs_inside_dist(kg,iat)[kind];
Distjk_Compressed[kel_counter]=distjk[kel];
DistjI_Compressed[kel_counter]=r_jI;
kel_counter++;
if(kel_counter==Nbuffer)
{
const FT& feeI(*F(ig,jg,kg));
Uj += feeI.evaluateV(kel_counter, Distjk_Compressed.data(), DistjI_Compressed.data(), DistkI_Compressed.data());
kel_counter = 0;
}
}
}
if((iind+1==ions_nearby.size() || ig!=Ions.GroupID[ions_nearby[iind+1]]) && kel_counter>0)
{
const FT& feeI(*F(ig,jg,kg));
Uj += feeI.evaluateV(kel_counter, Distjk_Compressed.data(), DistjI_Compressed.data(), DistkI_Compressed.data());
kel_counter = 0;
}
}
}
return Uj;
}
inline void computeU3_engine(const ParticleSet& P,
const FT &feeI, int kel_counter,
valT& Uj, posT& dUj, valT& d2Uj,
Vector<valT>& Uk, gContainer_type& dUk, Vector<valT>& d2Uk)
{
const DistanceTableData& eI_table=(*P.DistTables[myTableID]);
constexpr valT czero(0);
constexpr valT cone(1);
constexpr valT ctwo(2);
constexpr valT lapfac=OHMMS_DIM-cone;
valT* restrict val=mVGL.data(0);
valT* restrict gradF0=mVGL.data(1);
valT* restrict gradF1=mVGL.data(2);
valT* restrict gradF2=mVGL.data(3);
valT* restrict hessF00=mVGL.data(4);
valT* restrict hessF11=mVGL.data(5);
valT* restrict hessF22=mVGL.data(6);
valT* restrict hessF01=mVGL.data(7);
valT* restrict hessF02=mVGL.data(8);
feeI.evaluateVGL(kel_counter, Distjk_Compressed.data(), DistjI_Compressed.data(), DistkI_Compressed.data(),
val, gradF0, gradF1, gradF2, hessF00, hessF11, hessF22, hessF01, hessF02);
// compute the contribution to jel, kel
Uj=simd::accumulate_n(val,kel_counter,Uj);
valT gradF0_sum=simd::accumulate_n(gradF0,kel_counter,czero);
valT gradF1_sum=simd::accumulate_n(gradF1,kel_counter,czero);
valT hessF00_sum=simd::accumulate_n(hessF00,kel_counter,czero);
valT hessF11_sum=simd::accumulate_n(hessF11,kel_counter,czero);
d2Uj-=hessF00_sum+hessF11_sum+lapfac*(gradF0_sum+gradF1_sum);
std::fill_n(hessF11,kel_counter,czero);
for(int idim=0; idim<OHMMS_DIM; ++idim)
{
valT *restrict jk = Disp_jk_Compressed.data(idim);
valT *restrict jI = Disp_jI_Compressed.data(idim);
valT *restrict kI = Disp_kI_Compressed.data(idim);
valT dUj_x(0);
#pragma omp simd aligned(gradF0,gradF1,gradF2,hessF11,jk,jI,kI) reduction(+:dUj_x)
for(int kel_index=0; kel_index<kel_counter; kel_index++)
{
// recycle hessF11
hessF11[kel_index] += kI[kel_index] * jk[kel_index];
dUj_x += gradF1[kel_index] * jI[kel_index];
// destroy jk, kI
const valT temp = jk[kel_index] * gradF0[kel_index];
dUj_x += temp;
jk[kel_index] *= jI[kel_index];
kI[kel_index] = kI[kel_index] * gradF2[kel_index] - temp;
}
dUj[idim] += dUj_x;
valT *restrict jk0 = Disp_jk_Compressed.data(0);
if(idim>0)
{
#pragma omp simd aligned(jk,jk0)
for(int kel_index=0; kel_index<kel_counter; kel_index++)
jk0[kel_index] += jk[kel_index];
}
valT *restrict dUk_x = dUk.data(idim);
for(int kel_index=0; kel_index<kel_counter; kel_index++)
dUk_x[DistIndice_k[kel_index]] += kI[kel_index];
}
valT sum(0);
valT *restrict jk0 = Disp_jk_Compressed.data(0);
#pragma omp simd aligned(jk0,hessF01) reduction(+:sum)
for(int kel_index=0; kel_index<kel_counter; kel_index++)
sum += hessF01[kel_index] * jk0[kel_index];
d2Uj -= ctwo * sum;
#pragma omp simd aligned(hessF00,hessF22,gradF0,gradF2,hessF02,hessF11)
for(int kel_index=0; kel_index<kel_counter; kel_index++)
hessF00[kel_index] = hessF00[kel_index] + hessF22[kel_index]
+ lapfac*(gradF0[kel_index] + gradF2[kel_index])
- ctwo*hessF02[kel_index] * hessF11[kel_index];
for(int kel_index=0; kel_index<kel_counter; kel_index++)
{
const int kel=DistIndice_k[kel_index];
Uk[kel] += val[kel_index];
d2Uk[kel] -= hessF00[kel_index];
}
}
inline void computeU3(const ParticleSet& P, int jel,
const RealType* distjI, const RowContainer& displjI,
const RealType* distjk, const RowContainer& displjk,
valT& Uj, posT& dUj, valT& d2Uj,
Vector<valT>& Uk, gContainer_type& dUk, Vector<valT>& d2Uk,
std::vector<int>& ions_nearby, bool triangle=false)
{
constexpr valT czero(0);
Uj = czero;
dUj = posT();
d2Uj = czero;
const int jg=P.GroupID[jel];
const int kelmax=triangle?jel:Nelec;
std::fill_n(Uk.data(),kelmax,czero);
std::fill_n(d2Uk.data(),kelmax,czero);
for(int idim=0; idim<OHMMS_DIM; ++idim)
std::fill_n(dUk.data(idim),kelmax,czero);
ions_nearby.clear();
for(int iat=0; iat<Nion; ++iat)
if(distjI[iat]<Ion_cutoff[iat])
ions_nearby.push_back(iat);
for(int kg=0; kg<eGroups; ++kg)
{
int kel_counter = 0;
for(int iind=0; iind<ions_nearby.size(); ++iind)
{
const int iat = ions_nearby[iind];
const int ig = Ions.GroupID[iat];
const valT r_jI = distjI[iat];
const posT disp_Ij = displjI[iat];
for(int kind=0; kind<elecs_inside(kg,iat).size(); kind++)
{
const int kel=elecs_inside(kg,iat)[kind];
if(kel<kelmax && kel!=jel)
{
DistkI_Compressed[kel_counter]=elecs_inside_dist(kg,iat)[kind];
DistjI_Compressed[kel_counter]=r_jI;
Distjk_Compressed[kel_counter]=distjk[kel];
Disp_kI_Compressed(kel_counter)=elecs_inside_displ(kg,iat)[kind];
Disp_jI_Compressed(kel_counter)=disp_Ij;
Disp_jk_Compressed(kel_counter)=displjk[kel];
DistIndice_k[kel_counter]=kel;
kel_counter++;
if(kel_counter==Nbuffer)
{
const FT& feeI(*F(ig,jg,kg));
computeU3_engine(P, feeI, kel_counter, Uj, dUj, d2Uj, Uk, dUk, d2Uk);
kel_counter = 0;
}
}
}
if((iind+1==ions_nearby.size() || ig!=Ions.GroupID[ions_nearby[iind+1]]) && kel_counter>0)
{
const FT& feeI(*F(ig,jg,kg));
computeU3_engine(P, feeI, kel_counter, Uj, dUj, d2Uj, Uk, dUk, d2Uk);
kel_counter = 0;
}
}
}
}
inline void registerData(ParticleSet& P, WFBufferType& buf)
{
if ( Bytes_in_WFBuffer == 0 )
{
Bytes_in_WFBuffer = buf.current();
buf.add(Uat.begin(), Uat.end());
buf.add(dUat.data(), dUat.end());
buf.add(d2Uat.begin(), d2Uat.end());
Bytes_in_WFBuffer = buf.current()-Bytes_in_WFBuffer;
// free local space
Uat.free();
dUat.free();
d2Uat.free();
}
else
{
buf.forward(Bytes_in_WFBuffer);
}
}
inline RealType updateBuffer(ParticleSet& P, WFBufferType& buf,
bool fromscratch=false)
{
evaluateGL(P, P.G, P.L, false);
buf.forward(Bytes_in_WFBuffer);
return LogValue;
}
inline void copyFromBuffer(ParticleSet& P, WFBufferType& buf)
{
Uat.attachReference(buf.lendReference<valT>(Nelec), Nelec);
dUat.attachReference(Nelec, Nelec_padded, buf.lendReference<valT>(Nelec_padded*OHMMS_DIM));
d2Uat.attachReference(buf.lendReference<valT>(Nelec), Nelec);
build_compact_list(P);
}
void evaluateGL(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L,
bool fromscratch=false)
{
if(fromscratch) recompute(P);
LogValue=valT(0);
for(int iat=0; iat<Nelec; ++iat)
{
LogValue += Uat[iat];
G[iat] += dUat[iat];
L[iat] += d2Uat[iat];
}
constexpr valT mhalf(-0.5);
LogValue=mhalf*LogValue;
}
void evaluateDerivatives(ParticleSet& P,
const opt_variables_type& optvars,
std::vector<RealType>& dlogpsi,
std::vector<RealType>& dhpsioverpsi)
{
bool recalculate(false);
std::vector<bool> rcsingles(myVars.size(),false);
for (int k=0; k<myVars.size(); ++k)
{
int kk=myVars.where(k);
if (kk<0)
continue;
if (optvars.recompute(kk))
recalculate=true;
rcsingles[k]=true;
}
if (recalculate)
{
constexpr valT czero(0);
constexpr valT cone(1);
constexpr valT cminus(-1);
constexpr valT ctwo(2);
constexpr valT lapfac=OHMMS_DIM-cone;
const DistanceTableData& ee_table=(*P.DistTables[0]);
const DistanceTableData& eI_table=(*P.DistTables[myTableID]);
build_compact_list(P);
dLogPsi = czero;
gradLogPsi = PosType();
lapLogPsi = czero;
for(int iat=0; iat<Nion; ++iat)
{
const int ig=Ions.GroupID[iat];
for(int jg=0; jg<eGroups; ++jg)
for(int jind=0; jind<elecs_inside(jg,iat).size(); jind++)
{
const int jel=elecs_inside(jg,iat)[jind];
const valT r_Ij = elecs_inside_dist(jg,iat)[jind];
const posT disp_Ij = cminus*elecs_inside_displ(jg,iat)[jind];
const valT r_Ij_inv = cone/r_Ij;
for(int kg=0; kg<eGroups; ++kg)
for(int kind=0; kind<elecs_inside(kg,iat).size(); kind++)
{
const int kel=elecs_inside(kg,iat)[kind];
if(kel<jel)
{
const FT& feeI(*F(ig,jg,kg));
const valT r_Ik = elecs_inside_dist(kg,iat)[kind];
const posT disp_Ik = cminus*elecs_inside_displ(kg,iat)[kind];
const valT r_Ik_inv = cone/r_Ik;
const valT r_jk = ee_table.Distances[jel][kel];
const posT disp_jk = ee_table.Displacements[jel][kel];
const valT r_jk_inv = cone/r_jk;
FT &func = *F(ig, jg, kg);
int idx = J3UniqueIndex[F(ig, jg, kg)];
func.evaluateDerivatives(r_jk, r_Ij, r_Ik, du_dalpha[idx],
dgrad_dalpha[idx], dhess_dalpha[idx]);
int first = VarOffset(ig,jg,kg).first;
int last = VarOffset(ig,jg,kg).second;
std::vector<RealType> &dlog = du_dalpha[idx];
std::vector<PosType> &dgrad = dgrad_dalpha[idx];
std::vector<Tensor<RealType,3> > &dhess = dhess_dalpha[idx];
for (int p=first,ip=0; p<last; p++,ip++)
{
RealType& dval = dlog[ip];
PosType& dg = dgrad[ip];
Tensor<RealType,3>& dh = dhess[ip];
dg[0]*=r_jk_inv;
dg[1]*=r_Ij_inv;
dg[2]*=r_Ik_inv;
PosType gr_ee = dg[0] * disp_jk;
gradLogPsi(p,jel) -= dg[1] * disp_Ij - gr_ee;
lapLogPsi(p,jel) -= (dh(0,0) + lapfac*dg[0] -
ctwo*dh(0,1)*dot(disp_jk,disp_Ij)*r_jk_inv*r_Ij_inv
+ dh(1,1) + lapfac*dg[1]);
gradLogPsi(p,kel) -= dg[2] * disp_Ik + gr_ee;
lapLogPsi(p,kel) -= (dh(0,0) + lapfac*dg[0] +
ctwo*dh(0,2)*dot(disp_jk,disp_Ik)*r_jk_inv*r_Ik_inv
+ dh(2,2) + lapfac*dg[2]);
dLogPsi[p] -= dval;
}
}
}
}
}
for (int k=0; k<myVars.size(); ++k)
{
int kk=myVars.where(k);
if (kk<0)
continue;
dlogpsi[kk]=dLogPsi[k];
RealType sum = 0.0;
for (int i=0; i<Nelec; i++)
{
#if defined(QMC_COMPLEX)
sum -= 0.5*lapLogPsi(k,i);
for(int jdim=0; jdim<OHMMS_DIM; ++jdim)
sum -= P.G[i][jdim].real()*gradLogPsi(k,i)[jdim];
#else
sum -= 0.5*lapLogPsi(k,i) + dot(P.G[i], gradLogPsi(k,i));
#endif
}
dhpsioverpsi[kk] = sum;
}
}
}
};
}
#endif
|
GB_binop__rminus_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rminus_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__rminus_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__rminus_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__rminus_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_int64)
// A*D function (colscale): GB (_AxD__rminus_int64)
// D*A function (rowscale): GB (_DxB__rminus_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__rminus_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__rminus_int64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_int64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_int64)
// C=scalar+B GB (_bind1st__rminus_int64)
// C=scalar+B' GB (_bind1st_tran__rminus_int64)
// C=A+scalar GB (_bind2nd__rminus_int64)
// C=A'+scalar GB (_bind2nd_tran__rminus_int64)
// C type: int64_t
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (y - x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_INT64 || GxB_NO_RMINUS_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rminus_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rminus_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rminus_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rminus_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rminus_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rminus_int64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rminus_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rminus_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rminus_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rminus_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rminus_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rminus_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rminus_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB (_bind1st_tran__rminus_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB (_bind2nd_tran__rminus_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__isfinite_bool_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__isfinite_bool_fc64)
// op(A') function: GB (_unop_tran__isfinite_bool_fc64)
// C type: bool
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = (aij)
// unaryop: cij = GB_cisfinite (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_cisfinite (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = (aij) ; \
Cx [pC] = GB_cisfinite (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISFINITE || GxB_NO_BOOL || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__isfinite_bool_fc64)
(
bool *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = (aij) ;
Cx [p] = GB_cisfinite (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = (aij) ;
Cx [p] = GB_cisfinite (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__isfinite_bool_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
J2OrbitalSoA.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
// Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp.
// Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//
// File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
//////////////////////////////////////////////////////////////////////////////////////
// -*- C++ -*-
#ifndef QMCPLUSPLUS_TWOBODYJASTROW_OPTIMIZED_SOA_H
#define QMCPLUSPLUS_TWOBODYJASTROW_OPTIMIZED_SOA_H
#include <map>
#include <numeric>
#include "Configuration.h"
#if !defined(QMC_BUILD_SANDBOX_ONLY)
#include "QMCWaveFunctions/WaveFunctionComponent.h"
#include "QMCWaveFunctions/Jastrow/DiffTwoBodyJastrowOrbital.h"
#endif
#include "Particle/DistanceTableData.h"
#include "LongRange/StructFact.h"
#include <simd/allocator.hpp>
#include <simd/algorithm.hpp>
namespace qmcplusplus
{
// helper class to activate KEcorr during optimizing Jastrow
template<typename RT, class FT>
class J2KECorrection
{
size_t num_groups_;
std::vector<size_t> num_elec_in_groups_;
RT num_elecs_;
RT vol;
RT G0mag;
const std::vector<FT*>& F_;
bool SK_enabled;
public:
J2KECorrection(const ParticleSet& targetPtcl, const std::vector<FT*>& F)
: num_groups_(targetPtcl.groups()), num_elecs_(targetPtcl.getTotalNum()),
vol(targetPtcl.Lattice.Volume), F_(F), SK_enabled(targetPtcl.SK != nullptr)
{
// compute num_elec_in_groups_
num_elec_in_groups_.reserve(3);
for (int i = 0; i < num_groups_; i++)
num_elec_in_groups_.push_back(targetPtcl.last(i) - targetPtcl.first(i));
if (SK_enabled)
G0mag = std::sqrt(targetPtcl.SK->KLists.ksq[0]);
}
RT computeKEcorr()
{
if (!SK_enabled) return 0;
const int numPoints = 1000;
RT uk = 0.0;
RT a = 1.0;
for (int i = 0; i < num_groups_; i++)
{
int Ni = num_elec_in_groups_[i];
for (int j = 0; j < num_groups_; j++)
{
int Nj = num_elec_in_groups_[j];
if (F_[i * num_groups_ + j])
{
FT& ufunc = *(F_[i * num_groups_ + j]);
RT radius = ufunc.cutoff_radius;
RT k = G0mag;
RT dr = radius / (RT)(numPoints - 1);
for (int ir = 0; ir < numPoints; ir++)
{
RT r = dr * (RT)ir;
RT u = ufunc.evaluate(r);
uk += 0.5 * 4.0 * M_PI * r * std::sin(k * r) / k * u * dr * (RT)Nj / (RT)(Ni + Nj);
}
}
}
}
for (int iter = 0; iter < 20; iter++)
a = uk / (4.0 * M_PI * (1.0 / (G0mag * G0mag) - 1.0 / (G0mag * G0mag + 1.0 / a)));
return 4.0 * M_PI * a / (4.0 * vol) * num_elecs_;
}
};
/** @ingroup WaveFunctionComponent
* @brief Specialization for two-body Jastrow function using multiple functors
*
* Each pair-type can have distinct function \f$u(r_{ij})\f$.
* For electrons, distinct pair correlation functions are used
* for spins up-up/down-down and up-down/down-up.
*
* Based on J2OrbitalSoA.h with these considerations
* - DistanceTableData using SoA containers
* - support mixed precision: FT::real_type != OHMMS_PRECISION
* - loops over the groups: elminated PairID
* - support simd function
* - double the loop counts
* - Memory use is O(N).
*/
template<class FT>
class J2OrbitalSoA : public WaveFunctionComponent
{
public:
///alias FuncType
using FuncType = FT;
///type of each component U, dU, d2U;
using valT = typename FT::real_type;
///element position type
using posT = TinyVector<valT, OHMMS_DIM>;
///use the same container
using RowContainer = DistanceTableData::RowContainer;
using gContainer_type = VectorSoaContainer<valT, OHMMS_DIM>;
// Ye: leaving this public is bad but currently used by unit tests.
///Container for \f$F[ig*NumGroups+jg]\f$.
std::vector<FT*> F;
protected:
///number of particles
size_t N;
///number of particles + padded
size_t N_padded;
///number of groups of the target particleset
size_t NumGroups;
///diff value
RealType DiffVal;
///Correction
RealType KEcorr;
///\f$Uat[i] = sum_(j) u_{i,j}\f$
Vector<valT> Uat;
///\f$dUat[i] = sum_(j) du_{i,j}\f$
gContainer_type dUat;
///\f$d2Uat[i] = sum_(j) d2u_{i,j}\f$
Vector<valT> d2Uat;
valT cur_Uat;
aligned_vector<valT> cur_u, cur_du, cur_d2u;
aligned_vector<valT> old_u, old_du, old_d2u;
aligned_vector<valT> DistCompressed;
aligned_vector<int> DistIndice;
///Uniquue J2 set for cleanup
std::map<std::string, FT*> J2Unique;
/// e-e table ID
const int my_table_ID_;
// helper for compute J2 Chiesa KE correction
J2KECorrection<RealType, FT> j2_ke_corr_helper;
public:
J2OrbitalSoA(ParticleSet& p, int tid);
J2OrbitalSoA(const J2OrbitalSoA& rhs) = delete;
~J2OrbitalSoA();
/* initialize storage */
void init(ParticleSet& p);
/** add functor for (ia,ib) pair */
void addFunc(int ia, int ib, FT* j);
void resetTargetParticleSet(ParticleSet& P)
{
if (dPsi)
dPsi->resetTargetParticleSet(P);
}
/** check in an optimizable parameter
* @param o a super set of optimizable variables
*/
void checkInVariables(opt_variables_type& active)
{
myVars.clear();
typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end());
while (it != it_end)
{
(*it).second->checkInVariables(active);
(*it).second->checkInVariables(myVars);
++it;
}
}
/** check out optimizable variables
*/
void checkOutVariables(const opt_variables_type& active)
{
myVars.getIndex(active);
Optimizable = myVars.is_optimizable();
typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end());
while (it != it_end)
{
(*it).second->checkOutVariables(active);
++it;
}
if (dPsi)
dPsi->checkOutVariables(active);
}
///reset the value of all the unique Two-Body Jastrow functions
void resetParameters(const opt_variables_type& active)
{
if (!Optimizable)
return;
typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end());
while (it != it_end)
{
(*it).second->resetParameters(active);
++it;
}
if (dPsi)
dPsi->resetParameters(active);
for (int i = 0; i < myVars.size(); ++i)
{
int ii = myVars.Index[i];
if (ii >= 0)
myVars[i] = active[ii];
}
}
void finalizeOptimization() { KEcorr = j2_ke_corr_helper.computeKEcorr(); }
/** print the state, e.g., optimizables */
void reportStatus(std::ostream& os)
{
typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end());
while (it != it_end)
{
(*it).second->myVars.print(os);
++it;
}
}
WaveFunctionComponentPtr makeClone(ParticleSet& tqp) const;
LogValueType evaluateLog(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L);
void evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi);
/** recompute internal data assuming distance table is fully ready */
void recompute(ParticleSet& P);
ValueType ratio(ParticleSet& P, int iat);
void evaluateRatios(VirtualParticleSet& VP, std::vector<ValueType>& ratios)
{
for (int k = 0; k < ratios.size(); ++k)
ratios[k] =
std::exp(Uat[VP.refPtcl] - computeU(VP.refPS, VP.refPtcl, VP.getDistTable(my_table_ID_).Distances[k]));
}
void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios);
GradType evalGrad(ParticleSet& P, int iat);
ValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat);
void acceptMove(ParticleSet& P, int iat);
inline void restore(int iat) {}
/** compute G and L after the sweep
*/
void evaluateGL(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L,
bool fromscratch = false);
inline void registerData(ParticleSet& P, WFBufferType& buf)
{
if (Bytes_in_WFBuffer == 0)
{
Bytes_in_WFBuffer = buf.current();
buf.add(Uat.begin(), Uat.end());
buf.add(dUat.data(), dUat.end());
buf.add(d2Uat.begin(), d2Uat.end());
Bytes_in_WFBuffer = buf.current() - Bytes_in_WFBuffer;
// free local space
Uat.free();
dUat.free();
d2Uat.free();
}
else
{
buf.forward(Bytes_in_WFBuffer);
}
}
inline void copyFromBuffer(ParticleSet& P, WFBufferType& buf)
{
Uat.attachReference(buf.lendReference<valT>(N), N);
dUat.attachReference(N, N_padded, buf.lendReference<valT>(N_padded * OHMMS_DIM));
d2Uat.attachReference(buf.lendReference<valT>(N), N);
}
LogValueType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false)
{
evaluateGL(P, P.G, P.L, false);
buf.forward(Bytes_in_WFBuffer);
return LogValue;
}
/*@{ internal compute engines*/
inline valT computeU(const ParticleSet& P, int iat, const RealType* restrict dist)
{
valT curUat(0);
const int igt = P.GroupID[iat] * NumGroups;
for (int jg = 0; jg < NumGroups; ++jg)
{
const FuncType& f2(*F[igt + jg]);
int iStart = P.first(jg);
int iEnd = P.last(jg);
curUat += f2.evaluateV(iat, iStart, iEnd, dist, DistCompressed.data());
}
return curUat;
}
inline void computeU3(const ParticleSet& P,
int iat,
const RealType* restrict dist,
RealType* restrict u,
RealType* restrict du,
RealType* restrict d2u,
bool triangle = false);
/** compute gradient
*/
inline posT accumulateG(const valT* restrict du, const RowContainer& displ) const
{
posT grad;
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
const valT* restrict dX = displ.data(idim);
valT s = valT();
#pragma omp simd reduction(+ : s) aligned(du, dX)
for (int jat = 0; jat < N; ++jat)
s += du[jat] * dX[jat];
grad[idim] = s;
}
return grad;
}
/**@} */
RealType ChiesaKEcorrection() { return KEcorr = j2_ke_corr_helper.computeKEcorr(); }
RealType KECorrection() { return KEcorr; }
};
template<typename FT>
J2OrbitalSoA<FT>::J2OrbitalSoA(ParticleSet& p, int tid) : my_table_ID_(p.addTable(p, DT_SOA)), j2_ke_corr_helper(p, F)
{
init(p);
KEcorr = 0.0;
ClassName = "J2OrbitalSoA";
}
template<typename FT>
J2OrbitalSoA<FT>::~J2OrbitalSoA()
{
auto it = J2Unique.begin();
while (it != J2Unique.end())
{
delete ((*it).second);
++it;
}
} //need to clean up J2Unique
template<typename FT>
void J2OrbitalSoA<FT>::init(ParticleSet& p)
{
N = p.getTotalNum();
N_padded = getAlignedSize<valT>(N);
NumGroups = p.groups();
Uat.resize(N);
dUat.resize(N);
d2Uat.resize(N);
cur_u.resize(N);
cur_du.resize(N);
cur_d2u.resize(N);
old_u.resize(N);
old_du.resize(N);
old_d2u.resize(N);
F.resize(NumGroups * NumGroups, nullptr);
DistCompressed.resize(N);
DistIndice.resize(N);
}
template<typename FT>
void J2OrbitalSoA<FT>::addFunc(int ia, int ib, FT* j)
{
if (ia == ib)
{
if (ia == 0) //first time, assign everything
{
int ij = 0;
for (int ig = 0; ig < NumGroups; ++ig)
for (int jg = 0; jg < NumGroups; ++jg, ++ij)
if (F[ij] == nullptr)
F[ij] = j;
}
else
F[ia * NumGroups + ib] = j;
}
else
{
if (N == 2)
{
// a very special case, 1 up + 1 down
// uu/dd was prevented by the builder
for (int ig = 0; ig < NumGroups; ++ig)
for (int jg = 0; jg < NumGroups; ++jg)
F[ig * NumGroups + jg] = j;
}
else
{
// generic case
F[ia * NumGroups + ib] = j;
F[ib * NumGroups + ia] = j;
}
}
std::stringstream aname;
aname << ia << ib;
J2Unique[aname.str()] = j;
}
template<typename FT>
WaveFunctionComponentPtr J2OrbitalSoA<FT>::makeClone(ParticleSet& tqp) const
{
J2OrbitalSoA<FT>* j2copy = new J2OrbitalSoA<FT>(tqp, -1);
if (dPsi)
j2copy->dPsi = dPsi->makeClone(tqp);
std::map<const FT*, FT*> fcmap;
for (int ig = 0; ig < NumGroups; ++ig)
for (int jg = ig; jg < NumGroups; ++jg)
{
int ij = ig * NumGroups + jg;
if (F[ij] == 0)
continue;
typename std::map<const FT*, FT*>::iterator fit = fcmap.find(F[ij]);
if (fit == fcmap.end())
{
FT* fc = new FT(*F[ij]);
j2copy->addFunc(ig, jg, fc);
//if (dPsi) (j2copy->dPsi)->addFunc(aname.str(),ig,jg,fc);
fcmap[F[ij]] = fc;
}
}
j2copy->Optimizable = Optimizable;
return j2copy;
}
/** intenal function to compute \f$\sum_j u(r_j), du/dr, d2u/dr2\f$
* @param P particleset
* @param iat particle index
* @param dist starting distance
* @param u starting value
* @param du starting first deriv
* @param d2u starting second deriv
*/
template<typename FT>
inline void J2OrbitalSoA<FT>::computeU3(const ParticleSet& P,
int iat,
const RealType* restrict dist,
RealType* restrict u,
RealType* restrict du,
RealType* restrict d2u,
bool triangle)
{
const int jelmax = triangle ? iat : N;
constexpr valT czero(0);
std::fill_n(u, jelmax, czero);
std::fill_n(du, jelmax, czero);
std::fill_n(d2u, jelmax, czero);
const int igt = P.GroupID[iat] * NumGroups;
for (int jg = 0; jg < NumGroups; ++jg)
{
const FuncType& f2(*F[igt + jg]);
int iStart = P.first(jg);
int iEnd = std::min(jelmax, P.last(jg));
f2.evaluateVGL(iat, iStart, iEnd, dist, u, du, d2u, DistCompressed.data(), DistIndice.data());
}
//u[iat]=czero;
//du[iat]=czero;
//d2u[iat]=czero;
}
template<typename FT>
typename J2OrbitalSoA<FT>::ValueType J2OrbitalSoA<FT>::ratio(ParticleSet& P, int iat)
{
//only ratio, ready to compute it again
UpdateMode = ORB_PBYP_RATIO;
cur_Uat = computeU(P, iat, P.getDistTable(my_table_ID_).Temp_r.data());
return std::exp(Uat[iat] - cur_Uat);
}
template<typename FT>
inline void J2OrbitalSoA<FT>::evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios)
{
const auto& d_table = P.getDistTable(my_table_ID_);
const auto* restrict dist = d_table.Temp_r.data();
for (int ig = 0; ig < NumGroups; ++ig)
{
const int igt = ig * NumGroups;
valT sumU(0);
for (int jg = 0; jg < NumGroups; ++jg)
{
const FuncType& f2(*F[igt + jg]);
int iStart = P.first(jg);
int iEnd = P.last(jg);
sumU += f2.evaluateV(-1, iStart, iEnd, dist, DistCompressed.data());
}
for (int i = P.first(ig); i < P.last(ig); ++i)
{
// remove self-interaction
const valT Uself = F[igt + ig]->evaluate(dist[i]);
ratios[i] = std::exp(Uat[i] + Uself - sumU);
}
}
}
template<typename FT>
typename J2OrbitalSoA<FT>::GradType J2OrbitalSoA<FT>::evalGrad(ParticleSet& P, int iat)
{
return GradType(dUat[iat]);
}
template<typename FT>
typename J2OrbitalSoA<FT>::ValueType J2OrbitalSoA<FT>::ratioGrad(ParticleSet& P, int iat, GradType& grad_iat)
{
UpdateMode = ORB_PBYP_PARTIAL;
computeU3(P, iat, P.getDistTable(my_table_ID_).Temp_r.data(), cur_u.data(), cur_du.data(), cur_d2u.data());
cur_Uat = simd::accumulate_n(cur_u.data(), N, valT());
DiffVal = Uat[iat] - cur_Uat;
grad_iat += accumulateG(cur_du.data(), P.getDistTable(my_table_ID_).Temp_dr);
return std::exp(DiffVal);
}
template<typename FT>
void J2OrbitalSoA<FT>::acceptMove(ParticleSet& P, int iat)
{
// get the old u, du, d2u
const auto& d_table = P.getDistTable(my_table_ID_);
computeU3(P, iat, d_table.Distances[iat], old_u.data(), old_du.data(), old_d2u.data());
if (UpdateMode == ORB_PBYP_RATIO)
{ //ratio-only during the move; need to compute derivatives
const auto* restrict dist = d_table.Temp_r.data();
computeU3(P, iat, dist, cur_u.data(), cur_du.data(), cur_d2u.data());
}
valT cur_d2Uat(0);
const auto& new_dr = d_table.Temp_dr;
const auto& old_dr = d_table.Displacements[iat];
constexpr valT lapfac = OHMMS_DIM - RealType(1);
#pragma omp simd reduction(+ : cur_d2Uat)
for (int jat = 0; jat < N; jat++)
{
const valT du = cur_u[jat] - old_u[jat];
const valT newl = cur_d2u[jat] + lapfac * cur_du[jat];
const valT dl = old_d2u[jat] + lapfac * old_du[jat] - newl;
Uat[jat] += du;
d2Uat[jat] += dl;
cur_d2Uat -= newl;
}
posT cur_dUat;
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
const valT* restrict new_dX = new_dr.data(idim);
const valT* restrict old_dX = old_dr.data(idim);
const valT* restrict cur_du_pt = cur_du.data();
const valT* restrict old_du_pt = old_du.data();
valT* restrict save_g = dUat.data(idim);
valT cur_g = cur_dUat[idim];
#pragma omp simd reduction(+ : cur_g) aligned(old_dX, new_dX, save_g, cur_du_pt, old_du_pt)
for (int jat = 0; jat < N; jat++)
{
const valT newg = cur_du_pt[jat] * new_dX[jat];
const valT dg = newg - old_du_pt[jat] * old_dX[jat];
save_g[jat] -= dg;
cur_g += newg;
}
cur_dUat[idim] = cur_g;
}
LogValue += Uat[iat] - cur_Uat;
Uat[iat] = cur_Uat;
dUat(iat) = cur_dUat;
d2Uat[iat] = cur_d2Uat;
}
template<typename FT>
void J2OrbitalSoA<FT>::recompute(ParticleSet& P)
{
const auto& d_table = P.getDistTable(my_table_ID_);
for (int ig = 0; ig < NumGroups; ++ig)
{
for (int iat = P.first(ig), last = P.last(ig); iat < last; ++iat)
{
computeU3(P, iat, d_table.Distances[iat], cur_u.data(), cur_du.data(), cur_d2u.data(), true);
Uat[iat] = simd::accumulate_n(cur_u.data(), iat, valT());
posT grad;
valT lap(0);
const valT* restrict u = cur_u.data();
const valT* restrict du = cur_du.data();
const valT* restrict d2u = cur_d2u.data();
const RowContainer& displ = d_table.Displacements[iat];
constexpr valT lapfac = OHMMS_DIM - RealType(1);
#pragma omp simd reduction(+ : lap) aligned(du, d2u)
for (int jat = 0; jat < iat; ++jat)
lap += d2u[jat] + lapfac * du[jat];
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
const valT* restrict dX = displ.data(idim);
valT s = valT();
#pragma omp simd reduction(+ : s) aligned(du, dX)
for (int jat = 0; jat < iat; ++jat)
s += du[jat] * dX[jat];
grad[idim] = s;
}
dUat(iat) = grad;
d2Uat[iat] = -lap;
// add the contribution from the upper triangle
#pragma omp simd aligned(u, du, d2u)
for (int jat = 0; jat < iat; jat++)
{
Uat[jat] += u[jat];
d2Uat[jat] -= d2u[jat] + lapfac * du[jat];
}
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
valT* restrict save_g = dUat.data(idim);
const valT* restrict dX = displ.data(idim);
#pragma omp simd aligned(save_g, du, dX)
for (int jat = 0; jat < iat; jat++)
save_g[jat] -= du[jat] * dX[jat];
}
}
}
}
template<typename FT>
typename J2OrbitalSoA<FT>::LogValueType J2OrbitalSoA<FT>::evaluateLog(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L)
{
evaluateGL(P, G, L, true);
return LogValue;
}
template<typename FT>
void J2OrbitalSoA<FT>::evaluateGL(ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L,
bool fromscratch)
{
if (fromscratch)
recompute(P);
LogValue = valT(0);
for (int iat = 0; iat < N; ++iat)
{
LogValue += Uat[iat];
G[iat] += dUat[iat];
L[iat] += d2Uat[iat];
}
LogValue = - LogValue * 0.5;
}
template<typename FT>
void J2OrbitalSoA<FT>::evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi)
{
LogValue = 0.0;
const DistanceTableData& d_ee(P.getDistTable(my_table_ID_));
valT dudr, d2udr2;
Tensor<valT, DIM> ident;
grad_grad_psi = 0.0;
ident.diagonal(1.0);
for (int i = 1; i < N; ++i)
{
const valT* dist = d_ee.Distances[i];
const RowContainer& displ = d_ee.Displacements[i];
auto ig = P.GroupID[i];
const int igt = ig * NumGroups;
for (int j = 0; j < i; ++j)
{
auto r = dist[j];
auto rinv = 1.0 / r;
auto dr = displ[j];
auto jg = P.GroupID[j];
auto uij = F[igt + jg]->evaluate(r, dudr, d2udr2);
LogValue -= uij;
auto hess = rinv * rinv * outerProduct(dr, dr) * (d2udr2 - dudr * rinv) + ident * dudr * rinv;
grad_grad_psi[i] -= hess;
grad_grad_psi[j] -= hess;
}
}
}
} // namespace qmcplusplus
#endif
|
GB_binop__isle_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isle_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__isle_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__isle_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__isle_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_int8)
// A*D function (colscale): GB (_AxD__isle_int8)
// D*A function (rowscale): GB (_DxB__isle_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__isle_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__isle_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_int8)
// C=scalar+B GB (_bind1st__isle_int8)
// C=scalar+B' GB (_bind1st_tran__isle_int8)
// C=A+scalar GB (_bind2nd__isle_int8)
// C=A'+scalar GB (_bind2nd_tran__isle_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLE || GxB_NO_INT8 || GxB_NO_ISLE_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isle_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isle_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isle_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isle_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isle_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isle_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__isle_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__isle_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
yescrypt-simd.c | /*-
* Copyright 2009 Colin Percival
* Copyright 2012-2014 Alexander Peslyak
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* This file was originally written by Colin Percival as part of the Tarsnap
* online backup system.
*/
/*
* On 64-bit, enabling SSE4.1 helps our pwxform code indirectly, via avoiding
* gcc bug 54349 (fixed for gcc 4.9+). On 32-bit, it's of direct help. AVX
* and XOP are of further help either way.
*/
#ifndef __SSE4_1__
#warning "Consider enabling SSE4.1, AVX, or XOP in the C compiler for significantly better performance"
#endif
#include <emmintrin.h>
#ifdef __XOP__
#include <x86intrin.h>
#endif
#include <errno.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include "sha256_Y.h"
#include "sysendian.h"
#include "yescrypt.h"
#include "yescrypt-platform.h"
#include "compat.h"
#if __STDC_VERSION__ >= 199901L
/* have restrict */
#elif defined(__GNUC__)
#define restrict __restrict
#else
#define restrict
#endif
#define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint));
#define PREFETCH_OUT(x, hint) /* disabled */
#ifdef __XOP__
#define ARX(out, in1, in2, s) \
out = _mm_xor_si128(out, _mm_roti_epi32(_mm_add_epi32(in1, in2), s));
#else
#define ARX(out, in1, in2, s) \
{ \
__m128i T = _mm_add_epi32(in1, in2); \
out = _mm_xor_si128(out, _mm_slli_epi32(T, s)); \
out = _mm_xor_si128(out, _mm_srli_epi32(T, 32-s)); \
}
#endif
#define SALSA20_2ROUNDS \
/* Operate on "columns" */ \
ARX(X1, X0, X3, 7) \
ARX(X2, X1, X0, 9) \
ARX(X3, X2, X1, 13) \
ARX(X0, X3, X2, 18) \
\
/* Rearrange data */ \
X1 = _mm_shuffle_epi32(X1, 0x93); \
X2 = _mm_shuffle_epi32(X2, 0x4E); \
X3 = _mm_shuffle_epi32(X3, 0x39); \
\
/* Operate on "rows" */ \
ARX(X3, X0, X1, 7) \
ARX(X2, X3, X0, 9) \
ARX(X1, X2, X3, 13) \
ARX(X0, X1, X2, 18) \
\
/* Rearrange data */ \
X1 = _mm_shuffle_epi32(X1, 0x39); \
X2 = _mm_shuffle_epi32(X2, 0x4E); \
X3 = _mm_shuffle_epi32(X3, 0x93);
/**
* Apply the salsa20/8 core to the block provided in (X0 ... X3).
*/
#define SALSA20_8_BASE(maybe_decl, out) \
{ \
maybe_decl Y0 = X0; \
maybe_decl Y1 = X1; \
maybe_decl Y2 = X2; \
maybe_decl Y3 = X3; \
SALSA20_2ROUNDS \
SALSA20_2ROUNDS \
SALSA20_2ROUNDS \
SALSA20_2ROUNDS \
(out)[0] = X0 = _mm_add_epi32(X0, Y0); \
(out)[1] = X1 = _mm_add_epi32(X1, Y1); \
(out)[2] = X2 = _mm_add_epi32(X2, Y2); \
(out)[3] = X3 = _mm_add_epi32(X3, Y3); \
}
#define SALSA20_8(out) \
SALSA20_8_BASE(__m128i, out)
/**
* Apply the salsa20/8 core to the block provided in (X0 ... X3) ^ (Z0 ... Z3).
*/
#define SALSA20_8_XOR_ANY(maybe_decl, Z0, Z1, Z2, Z3, out) \
X0 = _mm_xor_si128(X0, Z0); \
X1 = _mm_xor_si128(X1, Z1); \
X2 = _mm_xor_si128(X2, Z2); \
X3 = _mm_xor_si128(X3, Z3); \
SALSA20_8_BASE(maybe_decl, out)
#define SALSA20_8_XOR_MEM(in, out) \
SALSA20_8_XOR_ANY(__m128i, (in)[0], (in)[1], (in)[2], (in)[3], out)
#define SALSA20_8_XOR_REG(out) \
SALSA20_8_XOR_ANY(/* empty */, Y0, Y1, Y2, Y3, out)
typedef union {
uint32_t w[16];
__m128i q[4];
} salsa20_blk_t;
/**
* blockmix_salsa8(Bin, Bout, r):
* Compute Bout = BlockMix_{salsa20/8, r}(Bin). The input Bin must be 128r
* bytes in length; the output Bout must also be the same size.
*/
static inline void
blockmix_salsa8(const salsa20_blk_t *restrict Bin,
salsa20_blk_t *restrict Bout, size_t r)
{
__m128i X0, X1, X2, X3;
size_t i;
r--;
PREFETCH(&Bin[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin[i * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH(&Bin[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin[r * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
/* 1: X <-- B_{2r - 1} */
X0 = Bin[r * 2 + 1].q[0];
X1 = Bin[r * 2 + 1].q[1];
X2 = Bin[r * 2 + 1].q[2];
X3 = Bin[r * 2 + 1].q[3];
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[0].q, Bout[0].q)
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < r;) {
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[i * 2 + 1].q, Bout[r + 1 + i].q)
i++;
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[i * 2].q, Bout[i].q)
}
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
SALSA20_8_XOR_MEM(Bin[r * 2 + 1].q, Bout[r * 2 + 1].q)
}
/*
* (V)PSRLDQ and (V)PSHUFD have higher throughput than (V)PSRLQ on some CPUs
* starting with Sandy Bridge. Additionally, PSHUFD uses separate source and
* destination registers, whereas the shifts would require an extra move
* instruction for our code when building without AVX. Unfortunately, PSHUFD
* is much slower on Conroe (4 cycles latency vs. 1 cycle latency for PSRLQ)
* and somewhat slower on some non-Intel CPUs (luckily not including AMD
* Bulldozer and Piledriver). Since for many other CPUs using (V)PSHUFD is a
* win in terms of throughput or/and not needing a move instruction, we
* currently use it despite of the higher latency on some older CPUs. As an
* alternative, the #if below may be patched to only enable use of (V)PSHUFD
* when building with SSE4.1 or newer, which is not available on older CPUs
* where this instruction has higher latency.
*/
#if 1
#define HI32(X) \
_mm_shuffle_epi32((X), _MM_SHUFFLE(2,3,0,1))
#elif 0
#define HI32(X) \
_mm_srli_si128((X), 4)
#else
#define HI32(X) \
_mm_srli_epi64((X), 32)
#endif
#if defined(__x86_64__) && (defined(__ICC) || defined(__llvm__))
/* Intel's name, also supported by recent gcc */
#define EXTRACT64(X) _mm_cvtsi128_si64(X)
#elif defined(__x86_64__) && !defined(_MSC_VER) && !defined(__OPEN64__)
/* gcc got the 'x' name earlier than non-'x', MSVC and Open64 had bugs */
#define EXTRACT64(X) _mm_cvtsi128_si64x(X)
#elif defined(__x86_64__) && defined(__SSE4_1__)
/* No known bugs for this intrinsic */
#include <smmintrin.h>
#define EXTRACT64(X) _mm_extract_epi64((X), 0)
#elif defined(__SSE4_1__)
/* 32-bit */
#include <smmintrin.h>
#if 0
/* This is currently unused by the code below, which instead uses these two
* intrinsics explicitly when (!defined(__x86_64__) && defined(__SSE4_1__)) */
#define EXTRACT64(X) \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \
((uint64_t)(uint32_t)_mm_extract_epi32((X), 1) << 32))
#endif
#else
/* 32-bit or compilers with known past bugs in _mm_cvtsi128_si64*() */
#define EXTRACT64(X) \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \
((uint64_t)(uint32_t)_mm_cvtsi128_si32(HI32(X)) << 32))
#endif
/* This is tunable */
#define S_BITS 8
/* Not tunable in this implementation, hard-coded in a few places */
#define S_SIMD 2
#define S_P 4
/* Number of S-boxes. Not tunable by design, hard-coded in a few places. */
#define S_N 2
/* Derived values. Not tunable except via S_BITS above. */
#define S_SIZE1 (1 << S_BITS)
#define S_MASK ((S_SIZE1 - 1) * S_SIMD * 8)
#define S_MASK2 (((uint64_t)S_MASK << 32) | S_MASK)
#define S_SIZE_ALL (S_N * S_SIZE1 * S_SIMD * 8)
#if !defined(__x86_64__) && defined(__SSE4_1__)
/* 32-bit with SSE4.1 */
#define PWXFORM_X_T __m128i
#define PWXFORM_SIMD(X, x, s0, s1) \
x = _mm_and_si128(X, _mm_set1_epi64x(S_MASK2)); \
s0 = *(const __m128i *)(S0 + (uint32_t)_mm_cvtsi128_si32(x)); \
s1 = *(const __m128i *)(S1 + (uint32_t)_mm_extract_epi32(x, 1)); \
X = _mm_mul_epu32(HI32(X), X); \
X = _mm_add_epi64(X, s0); \
X = _mm_xor_si128(X, s1);
#else
/* 64-bit, or 32-bit without SSE4.1 */
#define PWXFORM_X_T uint64_t
#define PWXFORM_SIMD(X, x, s0, s1) \
x = EXTRACT64(X) & S_MASK2; \
s0 = *(const __m128i *)(S0 + (uint32_t)x); \
s1 = *(const __m128i *)(S1 + (x >> 32)); \
X = _mm_mul_epu32(HI32(X), X); \
X = _mm_add_epi64(X, s0); \
X = _mm_xor_si128(X, s1);
#endif
#define PWXFORM_ROUND \
PWXFORM_SIMD(X0, x0, s00, s01) \
PWXFORM_SIMD(X1, x1, s10, s11) \
PWXFORM_SIMD(X2, x2, s20, s21) \
PWXFORM_SIMD(X3, x3, s30, s31)
#define PWXFORM \
{ \
PWXFORM_X_T x0, x1, x2, x3; \
__m128i s00, s01, s10, s11, s20, s21, s30, s31; \
PWXFORM_ROUND PWXFORM_ROUND \
PWXFORM_ROUND PWXFORM_ROUND \
PWXFORM_ROUND PWXFORM_ROUND \
}
#define XOR4(in) \
X0 = _mm_xor_si128(X0, (in)[0]); \
X1 = _mm_xor_si128(X1, (in)[1]); \
X2 = _mm_xor_si128(X2, (in)[2]); \
X3 = _mm_xor_si128(X3, (in)[3]);
#define XOUT(out) \
(out)[0] = X0; \
(out)[1] = X1; \
(out)[2] = X2; \
(out)[3] = X3;
/**
* blockmix_pwxform(Bin, Bout, r, S):
* Compute Bout = BlockMix_pwxform{salsa20/8, r, S}(Bin). The input Bin must
* be 128r bytes in length; the output Bout must also be the same size.
*/
static void
blockmix(const salsa20_blk_t *restrict Bin, salsa20_blk_t *restrict Bout,
size_t r, const __m128i *restrict S)
{
const uint8_t * S0, * S1;
__m128i X0, X1, X2, X3;
size_t i;
if (!S) {
blockmix_salsa8(Bin, Bout, r);
return;
}
S0 = (const uint8_t *)S;
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
/* Convert 128-byte blocks to 64-byte blocks */
r *= 2;
r--;
PREFETCH(&Bin[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
/* X <-- B_{r1 - 1} */
X0 = Bin[r].q[0];
X1 = Bin[r].q[1];
X2 = Bin[r].q[2];
X3 = Bin[r].q[3];
/* for i = 0 to r1 - 1 do */
for (i = 0; i < r; i++) {
/* X <-- H'(X \xor B_i) */
XOR4(Bin[i].q)
PWXFORM
/* B'_i <-- X */
XOUT(Bout[i].q)
}
/* Last iteration of the loop above */
XOR4(Bin[i].q)
PWXFORM
/* B'_i <-- H(B'_i) */
SALSA20_8(Bout[i].q)
}
#define XOR4_2(in1, in2) \
X0 = _mm_xor_si128((in1)[0], (in2)[0]); \
X1 = _mm_xor_si128((in1)[1], (in2)[1]); \
X2 = _mm_xor_si128((in1)[2], (in2)[2]); \
X3 = _mm_xor_si128((in1)[3], (in2)[3]);
static inline uint32_t
blockmix_salsa8_xor(const salsa20_blk_t *restrict Bin1,
const salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r, int Bin2_in_ROM)
{
__m128i X0, X1, X2, X3;
size_t i;
r--;
if (Bin2_in_ROM) {
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_NTA)
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i * 2], _MM_HINT_NTA)
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_NTA)
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
} else {
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i * 2], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
}
PREFETCH(&Bin1[r * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
/* 1: X <-- B_{2r - 1} */
XOR4_2(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[0].q)
SALSA20_8_XOR_MEM(Bin2[0].q, Bout[0].q)
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < r;) {
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2 + 1].q)
SALSA20_8_XOR_MEM(Bin2[i * 2 + 1].q, Bout[r + 1 + i].q)
i++;
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2].q)
SALSA20_8_XOR_MEM(Bin2[i * 2].q, Bout[i].q)
}
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[r * 2 + 1].q)
SALSA20_8_XOR_MEM(Bin2[r * 2 + 1].q, Bout[r * 2 + 1].q)
return _mm_cvtsi128_si32(X0);
}
static uint32_t
blockmix_xor(const salsa20_blk_t *restrict Bin1,
const salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r, int Bin2_in_ROM, const __m128i *restrict S)
{
const uint8_t * S0, * S1;
__m128i X0, X1, X2, X3;
size_t i;
if (!S)
return blockmix_salsa8_xor(Bin1, Bin2, Bout, r, Bin2_in_ROM);
S0 = (const uint8_t *)S;
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
/* Convert 128-byte blocks to 64-byte blocks */
r *= 2;
r--;
if (Bin2_in_ROM) {
PREFETCH(&Bin2[r], _MM_HINT_NTA)
PREFETCH(&Bin1[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i], _MM_HINT_NTA)
PREFETCH(&Bin1[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
} else {
PREFETCH(&Bin2[r], _MM_HINT_T0)
PREFETCH(&Bin1[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i], _MM_HINT_T0)
PREFETCH(&Bin1[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
}
PREFETCH_OUT(&Bout[r], _MM_HINT_T0);
/* X <-- B_{r1 - 1} */
XOR4_2(Bin1[r].q, Bin2[r].q)
/* for i = 0 to r1 - 1 do */
for (i = 0; i < r; i++) {
/* X <-- H'(X \xor B_i) */
XOR4(Bin1[i].q)
XOR4(Bin2[i].q)
PWXFORM
/* B'_i <-- X */
XOUT(Bout[i].q)
}
/* Last iteration of the loop above */
XOR4(Bin1[i].q)
XOR4(Bin2[i].q)
PWXFORM
/* B'_i <-- H(B'_i) */
SALSA20_8(Bout[i].q)
return _mm_cvtsi128_si32(X0);
}
#undef XOR4
#define XOR4(in, out) \
(out)[0] = Y0 = _mm_xor_si128((in)[0], (out)[0]); \
(out)[1] = Y1 = _mm_xor_si128((in)[1], (out)[1]); \
(out)[2] = Y2 = _mm_xor_si128((in)[2], (out)[2]); \
(out)[3] = Y3 = _mm_xor_si128((in)[3], (out)[3]);
static inline uint32_t
blockmix_salsa8_xor_save(const salsa20_blk_t *restrict Bin1,
salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r)
{
__m128i X0, X1, X2, X3, Y0, Y1, Y2, Y3;
size_t i;
r--;
PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i * 2], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2], _MM_HINT_T0)
PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_T0)
PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r + 1 + i], _MM_HINT_T0)
}
PREFETCH(&Bin2[r * 2], _MM_HINT_T0)
PREFETCH(&Bin1[r * 2], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r], _MM_HINT_T0)
PREFETCH_OUT(&Bout[r * 2 + 1], _MM_HINT_T0)
/* 1: X <-- B_{2r - 1} */
XOR4_2(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[0].q, Bin2[0].q)
SALSA20_8_XOR_REG(Bout[0].q)
/* 2: for i = 0 to 2r - 1 do */
for (i = 0; i < r;) {
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2 + 1].q, Bin2[i * 2 + 1].q)
SALSA20_8_XOR_REG(Bout[r + 1 + i].q)
i++;
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[i * 2].q, Bin2[i * 2].q)
SALSA20_8_XOR_REG(Bout[i].q)
}
/* 3: X <-- H(X \xor B_i) */
/* 4: Y_i <-- X */
/* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */
XOR4(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q)
SALSA20_8_XOR_REG(Bout[r * 2 + 1].q)
return _mm_cvtsi128_si32(X0);
}
#define XOR4_Y \
X0 = _mm_xor_si128(X0, Y0); \
X1 = _mm_xor_si128(X1, Y1); \
X2 = _mm_xor_si128(X2, Y2); \
X3 = _mm_xor_si128(X3, Y3);
static uint32_t
blockmix_xor_save(const salsa20_blk_t *restrict Bin1,
salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout,
size_t r, const __m128i *restrict S)
{
const uint8_t * S0, * S1;
__m128i X0, X1, X2, X3, Y0, Y1, Y2, Y3;
size_t i;
if (!S)
return blockmix_salsa8_xor_save(Bin1, Bin2, Bout, r);
S0 = (const uint8_t *)S;
S1 = (const uint8_t *)S + S_SIZE_ALL / 2;
/* Convert 128-byte blocks to 64-byte blocks */
r *= 2;
r--;
PREFETCH(&Bin2[r], _MM_HINT_T0)
PREFETCH(&Bin1[r], _MM_HINT_T0)
for (i = 0; i < r; i++) {
PREFETCH(&Bin2[i], _MM_HINT_T0)
PREFETCH(&Bin1[i], _MM_HINT_T0)
PREFETCH_OUT(&Bout[i], _MM_HINT_T0)
}
PREFETCH_OUT(&Bout[r], _MM_HINT_T0);
/* X <-- B_{r1 - 1} */
XOR4_2(Bin1[r].q, Bin2[r].q)
/* for i = 0 to r1 - 1 do */
for (i = 0; i < r; i++) {
XOR4(Bin1[i].q, Bin2[i].q)
/* X <-- H'(X \xor B_i) */
XOR4_Y
PWXFORM
/* B'_i <-- X */
XOUT(Bout[i].q)
}
/* Last iteration of the loop above */
XOR4(Bin1[i].q, Bin2[i].q)
XOR4_Y
PWXFORM
/* B'_i <-- H(B'_i) */
SALSA20_8(Bout[i].q)
return _mm_cvtsi128_si32(X0);
}
#undef ARX
#undef SALSA20_2ROUNDS
#undef SALSA20_8
#undef SALSA20_8_XOR_ANY
#undef SALSA20_8_XOR_MEM
#undef SALSA20_8_XOR_REG
#undef PWXFORM_SIMD_1
#undef PWXFORM_SIMD_2
#undef PWXFORM_ROUND
#undef PWXFORM
#undef OUT
#undef XOR4
#undef XOR4_2
#undef XOR4_Y
/**
* integerify(B, r):
* Return the result of parsing B_{2r-1} as a little-endian integer.
*/
static inline uint32_t
integerify(const salsa20_blk_t * B, size_t r)
{
return B[2 * r - 1].w[0];
}
/**
* smix1(B, r, N, flags, V, NROM, shared, XY, S):
* Compute first loop of B = SMix_r(B, N). The input B must be 128r bytes in
* length; the temporary storage V must be 128rN bytes in length; the temporary
* storage XY must be 128r bytes in length. The value N must be even and no
* smaller than 2. The array V must be aligned to a multiple of 64 bytes, and
* arrays B and XY to a multiple of at least 16 bytes (aligning them to 64
* bytes as well saves cache lines, but might result in cache bank conflicts).
*/
static void
smix1(uint8_t * B, size_t r, uint32_t N, yescrypt_flags_t flags,
salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared,
salsa20_blk_t * XY, void * S)
{
const salsa20_blk_t * VROM = shared->shared1.aligned;
uint32_t VROM_mask = shared->mask1;
size_t s = 2 * r;
salsa20_blk_t * X = V, * Y;
uint32_t i, j;
size_t k;
/* 1: X <-- B */
/* 3: V_i <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
X[k].w[i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]);
}
}
if (NROM && (VROM_mask & 1)) {
uint32_t n;
salsa20_blk_t * V_n;
const salsa20_blk_t * V_j;
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[s];
blockmix(X, Y, r, S);
X = &V[2 * s];
if ((1 & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j = integerify(Y, r) & (NROM - 1);
V_j = &VROM[j * s];
/* X <-- H(X \xor VROM_j) */
j = blockmix_xor(Y, V_j, X, r, 1, S);
} else {
/* X <-- H(X) */
blockmix(Y, X, r, S);
j = integerify(X, r);
}
for (n = 2; n < N; n <<= 1) {
uint32_t m = (n < N / 2) ? n : (N - 1 - n);
V_n = &V[n * s];
/* 2: for i = 0 to N - 1 do */
for (i = 1; i < m; i += 2) {
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i - 1;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V_n[i * s];
j = blockmix_xor(X, V_j, Y, r, 0, S);
if (((n + i) & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j &= NROM - 1;
V_j = &VROM[j * s];
} else {
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i;
V_j = &V[j * s];
}
/* X <-- H(X \xor VROM_j) */
X = &V_n[(i + 1) * s];
j = blockmix_xor(Y, V_j, X, r, 1, S);
}
}
n >>= 1;
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 2 - n;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[(N - 1) * s];
j = blockmix_xor(X, V_j, Y, r, 0, S);
if (((N - 1) & VROM_mask) == 1) {
/* j <-- Integerify(X) mod NROM */
j &= NROM - 1;
V_j = &VROM[j * s];
} else {
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 1 - n;
V_j = &V[j * s];
}
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
X = XY;
blockmix_xor(Y, V_j, X, r, 1, S);
} else if (flags & YESCRYPT_RW) {
uint32_t n;
salsa20_blk_t * V_n, * V_j;
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[s];
blockmix(X, Y, r, S);
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
X = &V[2 * s];
blockmix(Y, X, r, S);
j = integerify(X, r);
for (n = 2; n < N; n <<= 1) {
uint32_t m = (n < N / 2) ? n : (N - 1 - n);
V_n = &V[n * s];
/* 2: for i = 0 to N - 1 do */
for (i = 1; i < m; i += 2) {
Y = &V_n[i * s];
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i - 1;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
j = blockmix_xor(X, V_j, Y, r, 0, S);
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += i;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
X = &V_n[(i + 1) * s];
j = blockmix_xor(Y, V_j, X, r, 0, S);
}
}
n >>= 1;
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 2 - n;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[(N - 1) * s];
j = blockmix_xor(X, V_j, Y, r, 0, S);
/* j <-- Wrap(Integerify(X), i) */
j &= n - 1;
j += N - 1 - n;
V_j = &V[j * s];
/* X <-- X \xor V_j */
/* 4: X <-- H(X) */
X = XY;
blockmix_xor(Y, V_j, X, r, 0, S);
} else {
/* 2: for i = 0 to N - 1 do */
for (i = 1; i < N - 1; i += 2) {
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[i * s];
blockmix(X, Y, r, S);
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
X = &V[(i + 1) * s];
blockmix(Y, X, r, S);
}
/* 4: X <-- H(X) */
/* 3: V_i <-- X */
Y = &V[i * s];
blockmix(X, Y, r, S);
/* 4: X <-- H(X) */
X = XY;
blockmix(Y, X, r, S);
}
/* B' <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X[k].w[i]);
}
}
}
/**
* smix2(B, r, N, Nloop, flags, V, NROM, shared, XY, S):
* Compute second loop of B = SMix_r(B, N). The input B must be 128r bytes in
* length; the temporary storage V must be 128rN bytes in length; the temporary
* storage XY must be 256r bytes in length. The value N must be a power of 2
* greater than 1. The value Nloop must be even. The array V must be aligned
* to a multiple of 64 bytes, and arrays B and XY to a multiple of at least 16
* bytes (aligning them to 64 bytes as well saves cache lines, but might result
* in cache bank conflicts).
*/
static void
smix2(uint8_t * B, size_t r, uint32_t N, uint64_t Nloop,
yescrypt_flags_t flags, salsa20_blk_t * V, uint32_t NROM,
const yescrypt_shared_t * shared, salsa20_blk_t * XY, void * S)
{
const salsa20_blk_t * VROM = shared->shared1.aligned;
uint32_t VROM_mask = shared->mask1;
size_t s = 2 * r;
salsa20_blk_t * X = XY, * Y = &XY[s];
uint64_t i;
uint32_t j;
size_t k;
if (Nloop == 0)
return;
/* X <-- B' */
/* 3: V_i <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
X[k].w[i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]);
}
}
i = Nloop / 2;
/* 7: j <-- Integerify(X) mod N */
j = integerify(X, r) & (N - 1);
/*
* Normally, NROM implies YESCRYPT_RW, but we check for these separately
* because YESCRYPT_PARALLEL_SMIX resets YESCRYPT_RW for the smix2() calls
* operating on the entire V.
*/
if (NROM && (flags & YESCRYPT_RW)) {
/* 6: for i = 0 to N - 1 do */
for (i = 0; i < Nloop; i += 2) {
salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* j <-- Integerify(X) mod NROM */
j = blockmix_xor_save(X, V_j, Y, r, S);
if (((i + 1) & VROM_mask) == 1) {
const salsa20_blk_t * VROM_j;
j &= NROM - 1;
VROM_j = &VROM[j * s];
/* X <-- H(X \xor VROM_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(Y, VROM_j, X, r, 1, S);
} else {
j &= N - 1;
V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* j <-- Integerify(X) mod NROM */
j = blockmix_xor_save(Y, V_j, X, r, S);
}
j &= N - 1;
V_j = &V[j * s];
}
} else if (NROM) {
/* 6: for i = 0 to N - 1 do */
for (i = 0; i < Nloop; i += 2) {
const salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* j <-- Integerify(X) mod NROM */
j = blockmix_xor(X, V_j, Y, r, 0, S);
if (((i + 1) & VROM_mask) == 1) {
j &= NROM - 1;
V_j = &VROM[j * s];
} else {
j &= N - 1;
V_j = &V[j * s];
}
/* X <-- H(X \xor VROM_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(Y, V_j, X, r, 1, S);
j &= N - 1;
V_j = &V[j * s];
}
} else if (flags & YESCRYPT_RW) {
/* 6: for i = 0 to N - 1 do */
do {
salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor_save(X, V_j, Y, r, S);
j &= N - 1;
V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* V_j <-- Xprev \xor V_j */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor_save(Y, V_j, X, r, S);
j &= N - 1;
} while (--i);
} else {
/* 6: for i = 0 to N - 1 do */
do {
const salsa20_blk_t * V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(X, V_j, Y, r, 0, S);
j &= N - 1;
V_j = &V[j * s];
/* 8: X <-- H(X \xor V_j) */
/* 7: j <-- Integerify(X) mod N */
j = blockmix_xor(Y, V_j, X, r, 0, S);
j &= N - 1;
} while (--i);
}
/* 10: B' <-- X */
for (k = 0; k < 2 * r; k++) {
for (i = 0; i < 16; i++) {
le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X[k].w[i]);
}
}
}
/**
* p2floor(x):
* Largest power of 2 not greater than argument.
*/
static uint64_t
p2floor(uint64_t x)
{
uint64_t y;
while ((y = x & (x - 1)))
x = y;
return x;
}
/**
* smix(B, r, N, p, t, flags, V, NROM, shared, XY, S):
* Compute B = SMix_r(B, N). The input B must be 128rp bytes in length; the
* temporary storage V must be 128rN bytes in length; the temporary storage XY
* must be 256r or 256rp bytes in length (the larger size is required with
* OpenMP-enabled builds). The value N must be a power of 2 greater than 1.
* The array V must be aligned to a multiple of 64 bytes, and arrays B and
* XY to a multiple of at least 16 bytes (aligning them to 64 bytes as well
* saves cache lines and helps avoid false sharing in OpenMP-enabled builds
* when p > 1, but it might also result in cache bank conflicts).
*/
static void
smix(uint8_t * B, size_t r, uint32_t N, uint32_t p, uint32_t t,
yescrypt_flags_t flags,
salsa20_blk_t * V, uint32_t NROM, const yescrypt_shared_t * shared,
salsa20_blk_t * XY, void * S)
{
size_t s = 2 * r;
uint32_t Nchunk = N / p;
uint64_t Nloop_all, Nloop_rw;
uint32_t i;
Nloop_all = Nchunk;
if (flags & YESCRYPT_RW) {
if (t <= 1) {
if (t)
Nloop_all *= 2; /* 2/3 */
Nloop_all = (Nloop_all + 2) / 3; /* 1/3, round up */
} else {
Nloop_all *= t - 1;
}
} else if (t) {
if (t == 1)
Nloop_all += (Nloop_all + 1) / 2; /* 1.5, round up */
Nloop_all *= t;
}
Nloop_rw = 0;
if (flags & __YESCRYPT_INIT_SHARED)
Nloop_rw = Nloop_all;
else if (flags & YESCRYPT_RW)
Nloop_rw = Nloop_all / p;
Nchunk &= ~(uint32_t)1; /* round down to even */
Nloop_all++; Nloop_all &= ~(uint64_t)1; /* round up to even */
Nloop_rw &= ~(uint64_t)1; /* round down to even */
#ifdef _OPENMP
#pragma omp parallel if (p > 1) default(none) private(i) shared(B, r, N, p, flags, V, NROM, shared, XY, S, s, Nchunk, Nloop_all, Nloop_rw)
{
#pragma omp for
#endif
for (i = 0; i < p; i++) {
uint32_t Vchunk = i * Nchunk;
uint8_t * Bp = &B[128 * r * i];
salsa20_blk_t * Vp = &V[Vchunk * s];
#ifdef _OPENMP
salsa20_blk_t * XYp = &XY[i * (2 * s)];
#else
salsa20_blk_t * XYp = XY;
#endif
uint32_t Np = (i < p - 1) ? Nchunk : (N - Vchunk);
void * Sp = S ? ((uint8_t *)S + i * S_SIZE_ALL) : S;
if (Sp)
smix1(Bp, 1, S_SIZE_ALL / 128,
flags & ~YESCRYPT_PWXFORM,
Sp, NROM, shared, XYp, NULL);
if (!(flags & __YESCRYPT_INIT_SHARED_2))
smix1(Bp, r, Np, flags, Vp, NROM, shared, XYp, Sp);
smix2(Bp, r, p2floor(Np), Nloop_rw, flags, Vp,
NROM, shared, XYp, Sp);
}
if (Nloop_all > Nloop_rw) {
#ifdef _OPENMP
#pragma omp for
#endif
for (i = 0; i < p; i++) {
uint8_t * Bp = &B[128 * r * i];
#ifdef _OPENMP
salsa20_blk_t * XYp = &XY[i * (2 * s)];
#else
salsa20_blk_t * XYp = XY;
#endif
void * Sp = S ? ((uint8_t *)S + i * S_SIZE_ALL) : S;
smix2(Bp, r, N, Nloop_all - Nloop_rw,
flags & ~YESCRYPT_RW, V, NROM, shared, XYp, Sp);
}
}
#ifdef _OPENMP
}
#endif
}
/**
* yescrypt_kdf(shared, local, passwd, passwdlen, salt, saltlen,
* N, r, p, t, flags, buf, buflen):
* Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r,
* p, buflen), or a revision of scrypt as requested by flags and shared, and
* write the result into buf. The parameters r, p, and buflen must satisfy
* r * p < 2^30 and buflen <= (2^32 - 1) * 32. The parameter N must be a power
* of 2 greater than 1. (This optimized implementation currently additionally
* limits N to the range from 8 to 2^31, but other implementation might not.)
*
* t controls computation time while not affecting peak memory usage. shared
* and flags may request special modes as described in yescrypt.h. local is
* the thread-local data structure, allowing to preserve and reuse a memory
* allocation across calls, thereby reducing its overhead.
*
* Return 0 on success; or -1 on error.
*/
int
yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local,
const uint8_t * passwd, size_t passwdlen,
const uint8_t * salt, size_t saltlen,
uint64_t N, uint32_t r, uint32_t p, uint32_t t, yescrypt_flags_t flags,
uint8_t * buf, size_t buflen)
{
uint8_t _ALIGN(128) sha256[32];
yescrypt_region_t tmp;
uint64_t NROM;
size_t B_size, V_size, XY_size, need;
uint8_t * B, * S;
salsa20_blk_t * V, * XY;
/*
* YESCRYPT_PARALLEL_SMIX is a no-op at p = 1 for its intended purpose,
* so don't let it have side-effects. Without this adjustment, it'd
* enable the SHA-256 password pre-hashing and output post-hashing,
* because any deviation from classic scrypt implies those.
*/
if (p == 1)
flags &= ~YESCRYPT_PARALLEL_SMIX;
/* Sanity-check parameters */
if (flags & ~YESCRYPT_KNOWN_FLAGS) {
errno = EINVAL;
return -1;
}
#if SIZE_MAX > UINT32_MAX
if (buflen > (((uint64_t)(1) << 32) - 1) * 32) {
errno = EFBIG;
return -1;
}
#endif
if ((uint64_t)(r) * (uint64_t)(p) >= (1 << 30)) {
errno = EFBIG;
return -1;
}
if (N > UINT32_MAX) {
errno = EFBIG;
return -1;
}
if (((N & (N - 1)) != 0) || (N <= 7) || (r < 1) || (p < 1)) {
errno = EINVAL;
return -1;
}
if ((flags & YESCRYPT_PARALLEL_SMIX) && (N / p <= 7)) {
errno = EINVAL;
return -1;
}
if ((r > SIZE_MAX / 256 / p) ||
(N > SIZE_MAX / 128 / r)) {
errno = ENOMEM;
return -1;
}
#ifdef _OPENMP
if (!(flags & YESCRYPT_PARALLEL_SMIX) &&
(N > SIZE_MAX / 128 / (r * p))) {
errno = ENOMEM;
return -1;
}
#endif
if ((flags & YESCRYPT_PWXFORM) &&
#ifndef _OPENMP
(flags & YESCRYPT_PARALLEL_SMIX) &&
#endif
p > SIZE_MAX / S_SIZE_ALL) {
errno = ENOMEM;
return -1;
}
NROM = 0;
if (shared->shared1.aligned) {
NROM = shared->shared1.aligned_size / ((size_t)128 * r);
if (NROM > UINT32_MAX) {
errno = EFBIG;
return -1;
}
if (((NROM & (NROM - 1)) != 0) || (NROM <= 7) ||
!(flags & YESCRYPT_RW)) {
errno = EINVAL;
return -1;
}
}
/* Allocate memory */
V = NULL;
V_size = (size_t)128 * r * N;
#ifdef _OPENMP
if (!(flags & YESCRYPT_PARALLEL_SMIX))
V_size *= p;
#endif
need = V_size;
if (flags & __YESCRYPT_INIT_SHARED) {
if (local->aligned_size < need) {
if (local->base || local->aligned ||
local->base_size || local->aligned_size) {
errno = EINVAL;
return -1;
}
if (!alloc_region(local, need))
return -1;
}
V = (salsa20_blk_t *)local->aligned;
need = 0;
}
B_size = (size_t)128 * r * p;
need += B_size;
if (need < B_size) {
errno = ENOMEM;
return -1;
}
XY_size = (size_t)256 * r;
#ifdef _OPENMP
XY_size *= p;
#endif
need += XY_size;
if (need < XY_size) {
errno = ENOMEM;
return -1;
}
if (flags & YESCRYPT_PWXFORM) {
size_t S_size = S_SIZE_ALL;
#ifdef _OPENMP
S_size *= p;
#else
if (flags & YESCRYPT_PARALLEL_SMIX)
S_size *= p;
#endif
need += S_size;
if (need < S_size) {
errno = ENOMEM;
return -1;
}
}
if (flags & __YESCRYPT_INIT_SHARED) {
if (!alloc_region(&tmp, need))
return -1;
B = (uint8_t *)tmp.aligned;
XY = (salsa20_blk_t *)((uint8_t *)B + B_size);
} else {
init_region(&tmp);
if (local->aligned_size < need) {
if (free_region(local))
return -1;
if (!alloc_region(local, need))
return -1;
}
B = (uint8_t *)local->aligned;
V = (salsa20_blk_t *)((uint8_t *)B + B_size);
XY = (salsa20_blk_t *)((uint8_t *)V + V_size);
}
S = NULL;
if (flags & YESCRYPT_PWXFORM)
S = (uint8_t *)XY + XY_size;
if (t || flags) {
SHA256_CTX_Y ctx;
SHA256_Init_Y(&ctx);
SHA256_Update_Y(&ctx, passwd, passwdlen);
SHA256_Final_Y(sha256, &ctx);
passwd = sha256;
passwdlen = sizeof(sha256);
}
/* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */
PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, 1, B, B_size);
if (t || flags)
memcpy(sha256, B, sizeof(sha256));
if (p == 1 || (flags & YESCRYPT_PARALLEL_SMIX)) {
smix(B, r, N, p, t, flags, V, NROM, shared, XY, S);
} else {
uint32_t i;
/* 2: for i = 0 to p - 1 do */
#ifdef _OPENMP
#pragma omp parallel for default(none) private(i) shared(B, r, N, p, t, flags, V, NROM, shared, XY, S)
#endif
for (i = 0; i < p; i++) {
/* 3: B_i <-- MF(B_i, N) */
#ifdef _OPENMP
smix(&B[(size_t)128 * r * i], r, N, 1, t, flags,
&V[(size_t)2 * r * i * N],
NROM, shared,
&XY[(size_t)4 * r * i],
S ? &S[S_SIZE_ALL * i] : S);
#else
smix(&B[(size_t)128 * r * i], r, N, 1, t, flags, V,
NROM, shared, XY, S);
#endif
}
}
/* 5: DK <-- PBKDF2(P, B, 1, dkLen) */
PBKDF2_SHA256(passwd, passwdlen, B, B_size, 1, buf, buflen);
/*
* Except when computing classic scrypt, allow all computation so far
* to be performed on the client. The final steps below match those of
* SCRAM (RFC 5802), so that an extension of SCRAM (with the steps so
* far in place of SCRAM's use of PBKDF2 and with SHA-256 in place of
* SCRAM's use of SHA-1) would be usable with yescrypt hashes.
*/
if ((t || flags) && buflen == sizeof(sha256)) {
/* Compute ClientKey */
{
HMAC_SHA256_CTX_Y ctx;
HMAC_SHA256_Init_Y(&ctx, buf, buflen);
#if 0
/* Proper yescrypt */
HMAC_SHA256_Update_Y(&ctx, "Client Key", 10);
#else
/* GlobalBoost-Y buggy yescrypt */
HMAC_SHA256_Update_Y(&ctx, salt, saltlen);
#endif
HMAC_SHA256_Final_Y(sha256, &ctx);
}
/* Compute StoredKey */
{
SHA256_CTX_Y ctx;
SHA256_Init_Y(&ctx);
SHA256_Update_Y(&ctx, sha256, sizeof(sha256));
SHA256_Final_Y(buf, &ctx);
}
}
if (free_region(&tmp))
return -1;
/* Success! */
return 0;
}
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define PrimitiveExtentPad 2048
#define MaxBezierCoordinates 6291456
#define ThrowPointExpectedException(token,exception) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _MVGInfo
{
PrimitiveInfo
**primitive_info;
size_t
*extent;
ssize_t
offset;
PointInfo
point;
ExceptionInfo
*exception;
} MVGInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static Image
*DrawClippingMask(Image *,const DrawInfo *,const char *,const char *,
ExceptionInfo *);
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *,
ExceptionInfo *),
RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *),
TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(MVGInfo *,const size_t),
TraceCircle(MVGInfo *,const PointInfo,const PointInfo),
TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
static PrimitiveInfo
*TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *);
static size_t
TracePath(MVGInfo *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
ExceptionInfo
*exception;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
exception=AcquireExceptionInfo();
if (draw_info->id != (char *) NULL)
(void) CloneString(&clone_info->id,draw_info->id);
if (draw_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->compliance=draw_info->compliance;
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
exception);
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)*
sizeof(*clone_info->dash_pattern));
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops,
(size_t) number_stops*sizeof(*clone_info->gradient.stops));
}
clone_info->bounds=draw_info->bounds;
clone_info->fill_alpha=draw_info->fill_alpha;
clone_info->stroke_alpha=draw_info->stroke_alpha;
clone_info->element_reference=draw_info->element_reference;
clone_info->clip_path=draw_info->clip_path;
clone_info->clip_units=draw_info->clip_units;
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0,
MagickTrue,exception);
if (draw_info->composite_mask != (Image *) NULL)
clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0,
MagickTrue,exception);
clone_info->render=draw_info->render;
clone_info->debug=IsEventLogging();
exception=DestroyExceptionInfo(exception);
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
%
% A description of each parameter follows:
%
% o Method ConvertPathToPolygon returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int DrawCompareEdges(const void *p_edge,const void *q_edge)
{
#define DrawCompareEdge(p,q) \
{ \
if (((p)-(q)) < 0.0) \
return(-1); \
if (((p)-(q)) > 0.0) \
return(1); \
}
register const PointInfo
*p,
*q;
/*
Edge sorting for right-handed coordinate system.
*/
p=((const EdgeInfo *) p_edge)->points;
q=((const EdgeInfo *) q_edge)->points;
DrawCompareEdge(p[0].y,q[0].y);
DrawCompareEdge(p[0].x,q[0].x);
DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)*
(q[1].x-q[0].x));
DrawCompareEdge(p[1].y,q[1].y);
DrawCompareEdge(p[1].x,q[1].x);
return(0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
return((PolygonInfo *) NULL);
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=0.0;
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) direction;
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->number_edges=0;
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < MagickEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
ghostline=MagickFalse;
edge++;
}
}
polygon_info->number_edges=edge;
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),DrawCompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o Method ConvertPrimitiveToPath returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info)
{
MagickBooleanType
closed_subpath;
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case AlphaPrimitive:
case ColorPrimitive:
case ImagePrimitive:
case PointPrimitive:
case TextPrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
return((PathInfo *) NULL);
coordinates=0;
closed_subpath=MagickFalse;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
/*
New subpath.
*/
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
closed_subpath=primitive_info[i].closed_subpath;
}
coordinates--;
if ((code == MoveToCode) || (coordinates <= 0) ||
(fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
/*
Eliminate duplicate points.
*/
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue; /* next point in current subpath */
if (closed_subpath != MagickFalse)
{
closed_subpath=MagickFalse;
continue;
}
/*
Mark the p point as open if the subpath is not closed.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1),
sizeof(*path_info));
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
assert(draw_info != (DrawInfo *) NULL);
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->id != (char *) NULL)
draw_info->id=DestroyString(draw_info->id);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y E d g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyEdge() destroys the specified polygon edge.
%
% The format of the DestroyEdge method is:
%
% ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
% o edge: the polygon edge number to destroy.
%
*/
static size_t DestroyEdge(PolygonInfo *polygon_info,
const size_t edge)
{
assert(edge < polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P o l y g o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPolygonInfo() destroys the PolygonInfo data structure.
%
% The format of the DestroyPolygonInfo method is:
%
% PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges);
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
% o exception: return any errors or warnings in this structure.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine,ExceptionInfo *exception)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
extent[4],
min,
max;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
PointInfo
point;
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetPixelInfo(image,&zero);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
PixelInfo
composite,
pixel;
PointInfo
point;
register ssize_t
x;
register Quantum
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (Quantum *) NULL)
continue;
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel,
point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
GetPixelInfoPixel(image,q,&composite);
CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha,
&composite);
SetPixelViaPixelInfo(image,&composite,q);
x_offset++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% MagickBooleanType DrawBoundingRectangles(Image *image,
% const DrawInfo *draw_info,PolygonInfo *polygon_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double SaneStrokeWidth(const Image *image,
const DrawInfo *draw_info)
{
return(MagickMin((double) draw_info->stroke_width,
(2.0*sqrt(2.0)+MagickEpsilon)*MagickMax(image->columns,image->rows)));
}
static MagickBooleanType DrawBoundingRectangles(Image *image,
const DrawInfo *draw_info,const PolygonInfo *polygon_info,
ExceptionInfo *exception)
{
double
mid;
DrawInfo
*clone_info;
MagickStatusType
status;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
(void) memset(primitive_info,0,sizeof(primitive_info));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
SaneStrokeWidth(image,clone_info)/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke,
exception);
else
status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
break;
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
if (status == MagickFalse)
break;
}
if (i < (ssize_t) polygon_info->number_edges)
{
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
}
status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *id,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *id,ExceptionInfo *exception)
{
const char
*clip_path;
Image
*clipping_mask;
MagickBooleanType
status;
clip_path=GetImageArtifact(image,id);
if (clip_path == (const char *) NULL)
return(MagickFalse);
clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path,
exception);
if (clipping_mask == (Image *) NULL)
return(MagickFalse);
status=SetImageMask(image,WritePixelMask,clipping_mask,exception);
clipping_mask=DestroyImage(clipping_mask);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p p i n g M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClippingMask() draws the clip path and returns it as an image clipping
% mask.
%
% The format of the DrawClippingMask method is:
%
% Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *clip_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o clip_path: the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *clip_path,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
Image
*clip_mask,
*separate_mask;
MagickStatusType
status;
/*
Draw a clip path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
clip_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(clip_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(clip_mask));
status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception);
status=QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
clip_mask->background_color.alpha_trait=BlendPixelTrait;
status=SetImageBackgroundColor(clip_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,clip_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
clone_info->clip_path=MagickTrue;
status=RenderMVGContent(clip_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(clip_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
clip_mask=DestroyImage(clip_mask);
clip_mask=separate_mask;
status=NegateImage(clip_mask,MagickFalse,exception);
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
}
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(clip_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C o m p o s i t e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawCompositeMask() draws the mask path and returns it as an image mask.
%
% The format of the DrawCompositeMask method is:
%
% Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *mask_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the mask path id.
%
% o mask_path: the mask path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *mask_path,ExceptionInfo *exception)
{
Image
*composite_mask,
*separate_mask;
DrawInfo
*clone_info;
MagickStatusType
status;
/*
Draw a mask path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
composite_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(composite_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(composite_mask));
status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL,
exception);
status=QueryColorCompliance("#0000",AllCompliance,
&composite_mask->background_color,exception);
composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
composite_mask->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(composite_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,mask_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
status=RenderMVGContent(composite_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(composite_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
composite_mask=DestroyImage(composite_mask);
composite_mask=separate_mask;
status=NegateImage(composite_mask,MagickFalse,exception);
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
}
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path");
return(composite_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register double
dx,
dy;
register ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+32UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
return(MagickFalse);
(void) memset(dash_polygon,0,(2UL*number_vertices+32UL)*
sizeof(*dash_polygon));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*draw_info->dash_pattern[0];
offset=fabs(draw_info->dash_offset) >= MagickEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*draw_info->dash_pattern[n];
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > MaxBezierCoordinates)
break;
if (fabs(length) < MagickEpsilon)
{
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
j=1;
}
else
{
if ((j+1) > (ssize_t) number_vertices)
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
if (status == MagickFalse)
break;
}
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((status != MagickFalse) && (total_length < maximum_length) &&
((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
static int StopInfoCompare(const void *x,const void *y)
{
StopInfo
*stop_1,
*stop_2;
stop_1=(StopInfo *) x;
stop_2=(StopInfo *) y;
if (stop_1->offset > stop_2->offset)
return(1);
if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon)
return(0);
return(-1);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info,ExceptionInfo *exception)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo),
StopInfoCompare);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
PixelInfo
composite,
pixel;
double
alpha,
offset;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
MagickBooleanType
antialias;
double
repeat;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,gradient->radius);
else
repeat=fmod(offset,gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha,
&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info,
const size_t pad)
{
double
extent;
size_t
quantum;
/*
Check if there is enough storage for drawing pimitives.
*/
extent=(double) mvg_info->offset+pad+PrimitiveExtentPad;
quantum=sizeof(**mvg_info->primitive_info);
if (((extent*quantum) < (double) SSIZE_MAX) &&
((extent*quantum) < (double) GetMaxMemoryRequest()))
{
if (extent <= (double) *mvg_info->extent)
return(MagickTrue);
*mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(
*mvg_info->primitive_info,(size_t) extent,quantum);
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
{
register ssize_t
i;
*mvg_info->extent=(size_t) extent;
for (i=mvg_info->offset+1; i < (ssize_t) extent; i++)
(*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive;
return(MagickTrue);
}
}
/*
Reallocation failed, allocate a primitive to facilitate unwinding.
*/
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
*mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(
*mvg_info->primitive_info);
*mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory(
PrimitiveExtentPad*quantum);
(void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum);
*mvg_info->extent=1;
return(MagickFalse);
}
MagickExport int MVGMacroCompare(const void *target,const void *source)
{
const char
*p,
*q;
p=(const char *) target;
q=(const char *) source;
return(strcmp(p,q));
}
static SplayTreeInfo *GetMVGMacros(const char *primitive)
{
char
*macro,
*token;
const char
*q;
size_t
extent;
SplayTreeInfo
*macros;
/*
Scan graphic primitives for definitions and classes.
*/
if (primitive == (const char *) NULL)
return((SplayTreeInfo *) NULL);
macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory,
RelinquishMagickMemory);
macro=AcquireString(primitive);
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
for (q=primitive; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare("push",token) == 0)
{
register const char
*end,
*start;
(void) GetNextToken(q,&q,extent,token);
if (*q == '"')
{
char
name[MagickPathExtent];
const char
*p;
ssize_t
n;
/*
Named macro (e.g. push graphic-context "wheel").
*/
(void) GetNextToken(q,&q,extent,token);
start=q;
end=q;
(void) CopyMagickString(name,token,MagickPathExtent);
n=1;
for (p=q; *p != '\0'; )
{
if (GetNextToken(p,&p,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare(token,"pop") == 0)
{
end=p-strlen(token)-1;
n--;
}
if (LocaleCompare(token,"push") == 0)
n++;
if ((n == 0) && (end > start))
{
/*
Extract macro.
*/
(void) GetNextToken(p,&p,extent,token);
(void) CopyMagickString(macro,start,(size_t) (end-start));
(void) AddValueToSplayTree(macros,ConstantString(name),
ConstantString(macro));
break;
}
}
}
}
}
token=DestroyString(token);
macro=DestroyString(macro);
return(macros);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=StringToDouble(point,&p);
return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse :
MagickTrue);
}
static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->closed_subpath=MagickFalse;
primitive_info->point=point;
return(MagickTrue);
}
static MagickBooleanType RenderMVGContent(Image *image,
const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
keyword[MagickPathExtent],
geometry[MagickPathExtent],
*next_token,
pattern[MagickPathExtent],
*primitive,
*token;
const char
*q;
double
angle,
coordinates,
cursor,
factor,
primitive_extent;
DrawInfo
*clone_info,
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
MVGInfo
mvg_info;
PointInfo
point;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_points,
number_stops;
SplayTreeInfo
*macros;
ssize_t
defsDepth,
j,
k,
n,
symbolDepth;
StopInfo
*stops;
TypeMetric
metrics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (depth > MagickMaxRecursionDepth)
ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply",
image->filename);
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
{
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
if (status == MagickFalse)
return(MagickFalse);
}
if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) &&
(*(draw_info->primitive+1) != '-') && (depth == 0))
primitive=FileToString(draw_info->primitive+1,~0UL,exception);
else
primitive=AcquireString(draw_info->primitive);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"mvg:vector-graphics",primitive);
n=0;
number_stops=0;
stops=(StopInfo *) NULL;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=PrimitiveExtentPad;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) number_points*
sizeof(*primitive_info));
(void) memset(&mvg_info,0,sizeof(mvg_info));
mvg_info.primitive_info=(&primitive_info);
mvg_info.extent=(&number_points);
mvg_info.exception=exception;
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
defsDepth=0;
symbolDepth=0;
cursor=0.0;
macros=GetMVGMacros(primitive);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
if (GetNextToken(q,&q,MagickPathExtent,keyword) < 1)
break;
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
*token='\0';
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.rx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ry=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("alpha",keyword) == 0)
{
primitive_type=AlphaPrimitive;
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->border_color,exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("class",keyword) == 0)
{
const char
*mvg_class;
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
if (LocaleCompare(token,graphic_context[n]->id) == 0)
break;
mvg_class=(const char *) GetValueFromSplayTree(macros,token);
if (mvg_class != (const char *) NULL)
{
char
*elements;
ssize_t
offset;
/*
Inject class elements in stream.
*/
offset=(ssize_t) (p-primitive);
elements=AcquireString(primitive);
elements[offset]='\0';
(void) ConcatenateString(&elements,mvg_class);
(void) ConcatenateString(&elements,"\n");
(void) ConcatenateString(&elements,q);
primitive=DestroyString(primitive);
primitive=elements;
q=primitive+offset;
}
break;
}
if (LocaleCompare("clip-path",keyword) == 0)
{
const char
*clip_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
(void) CloneString(&graphic_context[n]->clip_mask,token);
clip_path=(const char *) GetValueFromSplayTree(macros,token);
if (clip_path != (const char *) NULL)
{
if (graphic_context[n]->clipping_mask != (Image *) NULL)
graphic_context[n]->clipping_mask=
DestroyImage(graphic_context[n]->clipping_mask);
graphic_context[n]->clipping_mask=DrawClippingMask(image,
graphic_context[n],token,clip_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
{
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,
graphic_context[n]->clip_mask,clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
}
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
(void) GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
if (LocaleCompare("compliance",keyword) == 0)
{
/*
MVG compliance associates a clipping mask with an image; SVG
compliance associates a clipping mask with a graphics context.
*/
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->compliance=(ComplianceType) ParseCommandOption(
MagickComplianceOptions,MagickFalse,token);
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
(void) GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
(void) GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->fill,exception);
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->fill_alpha*=opacity;
if (graphic_context[n]->fill.alpha != TransparentAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
else
graphic_context[n]->fill.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
(void) GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
(void) GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
(void) GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
(void) GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
(void) GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("letter-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
clone_info->text=AcquireString(" ");
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
graphic_context[n]->kerning=metrics.width*
StringToDouble(token,&next_token);
clone_info=DestroyDrawInfo(clone_info);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("mask",keyword) == 0)
{
const char
*mask_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
mask_path=(const char *) GetValueFromSplayTree(macros,token);
if (mask_path != (const char *) NULL)
{
if (graphic_context[n]->composite_mask != (Image *) NULL)
graphic_context[n]->composite_mask=
DestroyImage(graphic_context[n]->composite_mask);
graphic_context[n]->composite_mask=DrawCompositeMask(image,
graphic_context[n],token,mask_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
status=SetImageMask(image,CompositePixelMask,
graphic_context[n]->composite_mask,exception);
}
break;
}
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->fill_alpha*=opacity;
graphic_context[n]->stroke_alpha*=opacity;
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
break;
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
DrawError,"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if ((graphic_context[n]->clip_mask != (char *) NULL) &&
(graphic_context[n]->compliance != SVGCompliance))
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
status=SetImageMask(image,WritePixelMask,(Image *) NULL,
exception);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("mask",token) == 0)
break;
if (LocaleCompare("pattern",token) == 0)
break;
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth--;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
{
/*
Class context.
*/
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"class") != 0)
continue;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("clip-path",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent],
type[MagickPathExtent];
SegmentInfo
segment;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
segment.x1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.x2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (LocaleCompare(type,"radial") == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
if (*q == '"')
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->id,token);
}
break;
}
if (LocaleCompare("mask",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent];
RectangleInfo
bounds;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
bounds.x=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.y=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.width=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.height=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth++;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelInfo
stop_color;
number_stops++;
if (number_stops == 1)
stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops));
else
if (number_stops > 2)
stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops,
sizeof(*stops));
if (stops == (StopInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,&stop_color,
exception);
stops[number_stops-1].color=stop_color;
(void) GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
stops[number_stops-1].offset=factor*StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->stroke,exception);
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=
graphic_context[n]->stroke_alpha;
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*r;
r=q;
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
status=MagickFalse;
break;
}
(void) memset(graphic_context[n]->dash_pattern,0,(size_t)
(2*x+2)*sizeof(*graphic_context[n]->dash_pattern));
for (j=0; j < x; j++)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
(void) GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
(void) GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->stroke_alpha*=opacity;
if (graphic_context[n]->stroke.alpha != TransparentAlpha)
graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha;
else
graphic_context[n]->stroke.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->undercolor,exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
cursor=0.0;
break;
}
status=MagickFalse;
break;
}
case 'u':
case 'U':
{
if (LocaleCompare("use",keyword) == 0)
{
const char
*use;
/*
Get a macro from the MVG document, and "use" it here.
*/
(void) GetNextToken(q,&q,extent,token);
use=(const char *) GetValueFromSplayTree(macros,token);
if (use != (const char *) NULL)
{
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
(void) CloneString(&clone_info->primitive,use);
status=RenderMVGContent(image,clone_info,depth+1,exception);
clone_info=DestroyDrawInfo(clone_info);
}
break;
}
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'w':
case 'W':
{
if (LocaleCompare("word-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= MagickEpsilon) ||
(fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) ||
(fabs(affine.sy-1.0) >= MagickEpsilon) ||
(fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (*q == '\0')
{
if (number_stops > 1)
{
GradientType
type;
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,stops,number_stops,
exception);
}
if (number_stops > 0)
stops=(StopInfo *) RelinquishMagickMemory(stops);
}
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p-1),p);
continue;
}
/*
Parse the primitive attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
i=0;
mvg_info.offset=i;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
primitive_info[0].coordinates=0;
primitive_info[0].method=FloodfillMethod;
primitive_info[0].closed_subpath=MagickFalse;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
(void) GetNextToken(q,&q,extent,token);
point.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
point.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
primitive_info[i].closed_subpath=MagickFalse;
i++;
mvg_info.offset=i;
if (i < (ssize_t) number_points)
continue;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
if (status == MagickFalse)
break;
if ((primitive_info[j].primitive == TextPrimitive) ||
(primitive_info[j].primitive == ImagePrimitive))
if (primitive_info[j].text != (char *) NULL)
primitive_info[j].text=DestroyString(primitive_info[j].text);
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].closed_subpath=MagickFalse;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
coordinates=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
coordinates*=5.0;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
coordinates*=5.0;
coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0*
BezierQuantum+360.0;
break;
}
case BezierPrimitive:
{
coordinates=(double) (BezierQuantum*primitive_info[j].coordinates);
if (primitive_info[j].coordinates > (107*BezierQuantum))
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
break;
}
case PathPrimitive:
{
char
*s,
*t;
(void) GetNextToken(q,&q,extent,token);
coordinates=1.0;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=StringToDouble(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
coordinates++;
}
for (s=token; *s != '\0'; s++)
if (strspn(s,"AaCcQqSsTt") != 0)
coordinates+=(20.0*BezierQuantum)+360.0;
break;
}
case CirclePrimitive:
case ArcPrimitive:
case EllipsePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0;
break;
}
default:
break;
}
if (coordinates > MaxBezierCoordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
}
if (status == MagickFalse)
break;
if (((size_t) (i+coordinates)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=coordinates+1;
if (number_points < (size_t) coordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
mvg_info.offset=i;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad);
if (status == MagickFalse)
break;
mvg_info.offset=j;
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
status&=TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+2].point.x < 0.0) ||
(primitive_info[j+2].point.y < 0.0))
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0)
{
status=MagickFalse;
break;
}
status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
primitive_type=UndefinedPrimitive;
break;
}
status&=TraceArc(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x < 0.0) ||
(primitive_info[j+1].point.y < 0.0))
{
status=MagickFalse;
break;
}
status&=TraceEllipse(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceCircle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
{
if (primitive_info[j].coordinates < 1)
{
status=MagickFalse;
break;
}
break;
}
case PolygonPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
primitive_info[j].closed_subpath=MagickTrue;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
status&=TraceBezier(&mvg_info,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
coordinates=(double) TracePath(&mvg_info,token,exception);
if (coordinates == 0.0)
{
status=MagickFalse;
break;
}
i=(ssize_t) (j+coordinates);
break;
}
case AlphaPrimitive:
case ColorPrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
/*
Compute text cursor offset.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) &&
(fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon))
{
mvg_info.point=primitive_info->point;
primitive_info->point.x+=cursor;
}
else
{
mvg_info.point=primitive_info->point;
cursor=0.0;
}
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
clone_info->render=MagickFalse;
clone_info->text=AcquireString(token);
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
clone_info=DestroyDrawInfo(clone_info);
cursor+=metrics.width;
if (graphic_context[n]->compliance != SVGCompliance)
cursor=0.0;
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
break;
}
}
mvg_info.offset=i;
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),
p);
if (status == MagickFalse)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) &&
(graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
{
const char
*clip_path;
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,graphic_context[n]->clip_mask,
clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
status&=DrawPrimitive(image,graphic_context[n],primitive_info,
exception);
}
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
macros=DestroySplayTree(macros);
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
{
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
}
primitive=DestroyString(primitive);
if (stops != (StopInfo *) NULL)
stops=(StopInfo *) RelinquishMagickMemory(stops);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
ExceptionInfo *exception)
{
return(RenderMVGContent(image,draw_info,0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern,
ExceptionInfo *exception)
{
char
property[MagickPathExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MagickPathExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info,exception);
image_info=DestroyImageInfo(image_info);
(void) QueryColorCompliance("#00000000",AllCompliance,
&(*pattern)->background_color,exception);
(void) SetImageBackgroundColor(*pattern,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill_pattern=NewImageList();
clone_info->stroke_pattern=NewImageList();
(void) FormatLocaleString(property,MagickPathExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=RenderMVGContent(*pattern,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(
const PrimitiveInfo *primitive_info)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
return((PolygonInfo **) NULL);
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(primitive_info);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
polygon_info[i]=ConvertPathToPolygon(path_info);
if (polygon_info[i] == (PolygonInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static double GetFillAlpha(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_alpha)
{
double
alpha,
beta,
distance,
subpath_alpha;
PointInfo
delta;
register const PointInfo
*q;
register EdgeInfo
*p;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_alpha=0.0;
subpath_alpha=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,(size_t) j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta <= 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta >= alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x);
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_alpha < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_alpha=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25)))
*stroke_alpha=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_alpha=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < MagickEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_alpha < (alpha*alpha))
subpath_alpha=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_alpha >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_alpha);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
fill,
status;
double
mid;
PolygonInfo
**magick_restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates <= 1)
return(MagickTrue);
/*
Compute bounding box.
*/
polygon_info=AcquirePolygonThreadSet(primitive_info);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
DisableMSCWarning(4127)
if (0)
{
status=DrawBoundingRectangles(image,draw_info,polygon_info[0],exception);
if (status == MagickFalse)
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(status);
}
}
RestoreMSCWarning
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
bounds=polygon_info[0]->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.y1-=(mid+1.0);
bounds.x2+=(mid+1.0);
bounds.y2+=(mid+1.0);
if ((bounds.x1 >= (double) image->columns) ||
(bounds.y1 >= (double) image->rows) ||
(bounds.x2 <= 0.0) || (bounds.y2 <= 0.0))
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(MagickTrue); /* virtual polygon */
}
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for ( ; x <= stop_x; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
{
GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
double
fill_alpha,
stroke_alpha;
PixelInfo
fill_color,
stroke_color;
/*
Fill and/or stroke.
*/
fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule,
x,y,&stroke_alpha);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_alpha=fill_alpha > 0.25 ? 1.0 : 0.0;
stroke_alpha=stroke_alpha > 0.25 ? 1.0 : 0.0;
}
GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception);
CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception);
CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double ConstrainCoordinate(double x)
{
if (x < (double) -(SSIZE_MAX-512))
return((double) -(SSIZE_MAX-512));
if (x > (double) (SSIZE_MAX-512))
return((double) (SSIZE_MAX-512));
return(x);
}
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
point,
q;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
status=MagickTrue;
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelInfoGray(&draw_info->fill) == MagickFalse) ||
(IsPixelInfoGray(&draw_info->stroke) == MagickFalse)))
status=SetImageColorspace(image,sRGBColorspace,exception);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask,
exception);
status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask,
exception);
}
x=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.x-0.5));
y=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.y-0.5));
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel,
target;
(void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
ChannelType
channel_mask;
PixelInfo
target;
(void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
(void) SetImageChannelMask(image,channel_mask);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetPixelInfo(image,&pixel);
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel,
target;
(void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
PixelInfo
target;
(void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel;
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MagickPathExtent];
Image
*composite_image,
*composite_images;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
composite_images=(Image *) NULL;
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_images=ReadInlineImage(clone_info,primitive_info->text,
exception);
else
if (*primitive_info->text != '\0')
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
composite_images=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_images == (Image *) NULL)
{
status=0;
break;
}
composite_image=RemoveFirstImageFromList(&composite_images);
composite_images=DestroyImageList(composite_images);
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
/*
Resize image.
*/
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
(void) TransformImage(&composite_image,(char *) NULL,
composite_geometry,exception);
}
if (composite_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel,
exception);
if (draw_info->alpha != OpaqueAlpha)
(void) SetImageAlpha(composite_image,draw_info->alpha,exception);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
if ((draw_info->compose == OverCompositeOp) ||
(draw_info->compose == SrcOverCompositeOp))
(void) DrawAffineImage(image,composite_image,&affine,exception);
else
(void) CompositeImage(image,composite_image,draw_info->compose,
MagickTrue,geometry.x,geometry.y,exception);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelInfo
fill_color;
register Quantum
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&fill_color,exception);
CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.alpha != (Quantum) TransparentAlpha))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
status=DrawDashPolygon(draw_info,primitive_info,image,exception);
break;
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.alpha != (Quantum) TransparentAlpha) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
double
x,
y;
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
closed_path=primitive_info[0].closed_subpath;
i=(ssize_t) primitive_info[0].coordinates;
x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x);
y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
closed_path=MagickTrue;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
status=DrawPolygonPrimitive(image,draw_info,primitive_info,
exception);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
status&=DrawStrokePolygon(image,draw_info,primitive_info,exception);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception);
break;
}
}
image_view=DestroyCacheView(image_view);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception);
status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static MagickBooleanType DrawRoundLinecap(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*MagickEpsilon;
linecap[2].point.x+=2.0*MagickEpsilon;
linecap[2].point.y+=2.0*MagickEpsilon;
linecap[3].point.y+=2.0*MagickEpsilon;
linecap[4].primitive=UndefinedPrimitive;
return(DrawPolygonPrimitive(image,draw_info,linecap,exception));
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
if (p->coordinates == 1)
continue;
stroke_polygon=TraceStrokePolygon(image,draw_info,p);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=p->closed_subpath;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
status&=DrawRoundLinecap(image,draw_info,p,exception);
status&=DrawRoundLinecap(image,draw_info,q,exception);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke,
exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->alpha=OpaqueAlpha;
draw_info->fill_alpha=OpaqueAlpha;
draw_info->stroke_alpha=OpaqueAlpha;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
draw_info->pointsize=12.0;
draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha;
draw_info->compose=OverCompositeOp;
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
if (fabs(clone_info->pointsize) >= MagickEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->border_color=clone_info->border_color;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->fill,
exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke,
exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor,
exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radius;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radius.x=fabs(center.x-start.x);
radius.y=fabs(center.y-start.y);
return(TraceEllipse(mvg_info,center,radius,degrees));
}
static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
MagickStatusType
status;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
ssize_t
offset;
offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
return(TracePoint(primitive_info,end));
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon))
return(TraceLine(primitive_info,start,end));
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
if (fabs(alpha*alpha+beta*beta) < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+
MagickEpsilon))));
status=MagickTrue;
p=primitive_info;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
status&=TraceBezier(mvg_info,4);
if (status == 0)
break;
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
p+=p->coordinates;
}
if (status == 0)
return(MagickFalse);
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceBezier(MVGInfo *mvg_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coefficients.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=MagickMin(quantum/number_coordinates,BezierQuantum);
coefficients=(double *) AcquireQuantumMemory(number_coordinates,
sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates*
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
{
if (points != (PointInfo *) NULL)
points=(PointInfo *) RelinquishMagickMemory(points);
if (coefficients != (double *) NULL)
coefficients=(double *) RelinquishMagickMemory(coefficients);
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
control_points=quantum*number_coordinates;
if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
if (TracePoint(p,points[i]) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
}
if (TracePoint(p,end) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickTrue);
}
static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
return(TraceEllipse(mvg_info,start,offset,degrees));
}
static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center,
const PointInfo radii,const PointInfo arc)
{
double
coordinates,
delta,
step,
x,
y;
PointInfo
angle,
point;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(MagickTrue);
delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y));
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0);
angle.x=DegreesToRadians(arc.x);
y=arc.y;
while (y < arc.x)
y+=360.0;
angle.y=DegreesToRadians(y);
coordinates=ceil((angle.y-angle.x)/step+1.0);
if (coordinates > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse)
return(MagickFalse);
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
x=fabs(primitive_info[0].point.x-
primitive_info[primitive_info->coordinates-1].point.x);
y=fabs(primitive_info[0].point.y-
primitive_info[primitive_info->coordinates-1].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
if (TracePoint(primitive_info,start) == MagickFalse)
return(MagickFalse);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return(MagickTrue);
}
if (TracePoint(primitive_info+1,end) == MagickFalse)
return(MagickFalse);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
primitive_info->closed_subpath=MagickFalse;
return(MagickTrue);
}
static size_t TracePath(MVGInfo *mvg_info,const char *path,
ExceptionInfo *exception)
{
char
*next_token,
token[MagickPathExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickBooleanType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
ssize_t
subpath_offset;
subpath_offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle = 0.0;
MagickBooleanType
large_arc = MagickFalse,
sweep = MagickFalse;
PointInfo
arc = {0.0, 0.0};
/*
Elliptical arc.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to.
*/
if (mvg_info->offset != subpath_offset)
{
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
}
i=0;
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
primitive_info->closed_subpath=MagickTrue;
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
z_count++;
break;
}
default:
{
ThrowPointExpectedException(token,exception);
break;
}
}
}
if (status == MagickFalse)
return(0);
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return(number_coordinates);
}
static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,end) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
point,
segment;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
ssize_t
offset;
offset=mvg_info->offset;
segment.x=fabs(end.x-start.x);
segment.y=fabs(end.y-start.y);
if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon))
{
(*mvg_info->primitive_info+mvg_info->offset)->coordinates=0;
return(MagickTrue);
}
if (arc.x > (0.5*segment.x))
arc.x=0.5*segment.x;
if (arc.y > (0.5*segment.y))
arc.y=0.5*segment.y;
point.x=start.x+segment.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+segment.x-arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
return(MagickTrue);
}
static PrimitiveInfo *TraceStrokePolygon(const Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
#define CheckPathExtent(pad) \
if ((ssize_t) (q+(pad)) >= (ssize_t) max_strokes) \
{ \
if (~max_strokes < (pad)) \
{ \
path_p=(PointInfo *) RelinquishMagickMemory(path_p); \
path_q=(PointInfo *) RelinquishMagickMemory(path_q); \
} \
else \
{ \
max_strokes+=(pad); \
path_p=(PointInfo *) ResizeQuantumMemory(path_p,max_strokes, \
sizeof(*path_p)); \
path_q=(PointInfo *) ResizeQuantumMemory(path_q,max_strokes, \
sizeof(*path_q)); \
} \
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL)) \
{ \
if (path_p != (PointInfo *) NULL) \
path_p=(PointInfo *) RelinquishMagickMemory(path_p); \
if (path_q != (PointInfo *) NULL) \
path_q=(PointInfo *) RelinquishMagickMemory(path_q); \
polygon_primitive=(PrimitiveInfo *) \
RelinquishMagickMemory(polygon_primitive); \
return((PrimitiveInfo *) NULL); \
} \
}
typedef struct _LineSegment
{
double
p,
q;
} LineSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
LineSegment
dx = {0,0},
dy = {0,0},
inverse_slope = {0,0},
slope = {0,0},
theta = {0,0};
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*path_p,
*path_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
max_strokes,
number_vertices;
ssize_t
j,
n,
p,
q;
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
max_strokes=2*number_vertices+6*BezierQuantum+360;
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if (polygon_primitive == (PrimitiveInfo *) NULL)
return((PrimitiveInfo *) NULL);
(void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices*
sizeof(*polygon_primitive));
closed_path=primitive_info[0].closed_subpath;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
{
if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse))
{
/*
Zero length subpath.
*/
stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory(
sizeof(*stroke_polygon));
stroke_polygon[0]=polygon_primitive[0];
stroke_polygon[0].coordinates=0;
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
n=(ssize_t) number_vertices-1L;
}
path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_p));
if (path_p == (PointInfo *) NULL)
{
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_q));
if (path_q == (PointInfo *) NULL)
{
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) < MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
(void) TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
path_q[p++]=box_q[0];
path_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) < MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
CheckPathExtent(6*BezierQuantum+360);
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+6*BezierQuantum+360);
path_q[q].x=box_q[1].x;
path_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
path_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+6*BezierQuantum+360);
path_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
path_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
path_p[p++]=box_p[1];
path_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
rose_functionCall.c | //! Contributed by Jeff Keasler
#include <omp.h>
typedef double real8;
extern void OtherFunc(int k,real8 *l,real8 *m,real8 *n,real8 *o,real8 *p,real8 q,real8 r,real8 s[3]);
void foo(int istart,int iend,real8 *a,real8 *b,real8 *c,int k,real8 *l,real8 *m,real8 *n,real8 *o,real8 *p)
{
for (int i = istart; i <= iend - 1; i += 1) {
real8 s[3];
real8 afi = a[i];
real8 bfi = b[i];
OtherFunc(k,l,m,n,o,p,afi,bfi,s);
#pragma omp parallel for
for (int k = 0; k <= 2; k += 1) {
c[3 * i + k] = s[k];
}
}
}
|
matmult.c | #include <stdio.h>
#include <stdlib.h>
#include "matmult_initialize.h"
#ifndef MATRIX_SIZE
#define MATRIX_SIZE 512
#endif
#define NRA MATRIX_SIZE /* number of rows in matrix A */
#define NCA MATRIX_SIZE /* number of columns in matrix A */
#define NCB MATRIX_SIZE /* number of columns in matrix B */
double** allocateMatrix(int rows, int cols) {
int i;
double **matrix = (double**)malloc((sizeof(double*)) * rows);
for (i=0; i<rows; i++) {
matrix[i] = (double*)malloc((sizeof(double)) * cols);
}
return matrix;
}
void freeMatrix(double** matrix, int rows, int cols) {
int i;
for (i=0; i<rows; i++) {
free(matrix[i]);
}
free(matrix);
}
__inline double multiply(double a, double b) {
return a * b;
}
// cols_a and rows_b are the same value
void compute(double **a, double **b, double **c, int rows_a, int cols_a, int cols_b) {
int i,j,k;
#pragma omp parallel private(i,j,k) shared(a,b,c)
{
/*** Do matrix multiply sharing iterations on outer loop ***/
/*** Display who does which iterations for demonstration purposes ***/
#pragma omp for nowait
for (i=0; i<rows_a; i++) {
for(j=0; j<cols_b; j++) {
for (k=0; k<cols_a; k++) {
c[i][j] += multiply(a[i][k], b[k][j]);
}
}
}
} /*** End of parallel region ***/
}
void compute_interchange(double **a, double **b, double **c, int rows_a, int cols_a, int cols_b) {
int i,j,k;
#pragma omp parallel private(i,j,k) shared(a,b,c)
{
/*** Do matrix multiply sharing iterations on outer loop ***/
/*** Display who does which iterations for demonstration purposes ***/
#pragma omp for nowait
for (i=0; i<rows_a; i++) {
for (k=0; k<cols_a; k++) {
for(j=0; j<cols_b; j++) {
c[i][j] += multiply(a[i][k], b[k][j]);
}
}
}
} /*** End of parallel region ***/
}
double do_work(void) {
double **a, /* matrix A to be multiplied */
**b, /* matrix B to be multiplied */
**c; /* result matrix C */
a = allocateMatrix(NRA, NCA);
b = allocateMatrix(NCA, NCB);
c = allocateMatrix(NRA, NCB);
/*** Spawn a parallel region explicitly scoping all variables ***/
initialize(a, NRA, NCA);
initialize(b, NCA, NCB);
initialize(c, NRA, NCB);
compute(a, b, c, NRA, NCA, NCB);
compute_interchange(a, b, c, NRA, NCA, NCB);
double result = c[0][1];
freeMatrix(a, NRA, NCA);
freeMatrix(b, NCA, NCB);
freeMatrix(c, NCA, NCB);
return result;
}
int main (int argc, char *argv[])
{
do_work();
printf("Done.\n");
return 0;
}
|
Step_OMP.h | #ifndef __Step_OMP_h__
#define __Step_OMP_h__
#include <chrono>
#include "Step.h"
typedef int (*CalSubstepFuncOMP)(void* _self, size_t my_th_id,
double dt, double cur_time, size_t substp_id);
int substep_func_omp_default(void *_self, size_t my_th_id,
double dt, double cur_time, size_t substp_id);
class Step_OMP : public Step
{
protected:
size_t thread_num;
double new_time;
double step_time_minus_tol;
double next_output_time_minus_tol;
bool output_not_needed, step_not_end;
bool continue_cal;
CalSubstepFuncOMP cal_substep_func_omp;
std::chrono::steady_clock::time_point t0, t1;
std::chrono::nanoseconds cpu_time;
public:
Step_OMP(const char *_name, const char *_type = "Step_OMP",
CalSubstepFuncOMP _func_omp = &substep_func_omp_default);
~Step_OMP();
inline void set_thread_num(size_t th_num) noexcept { thread_num = th_num; }
int solve() override;
// this function need to be put into
// #pragma omp master
void continue_calculation();
void exit_calculation();
void abort_calculation();
// in microseconds
inline long long get_time() const noexcept { return std::chrono::duration_cast<std::chrono::microseconds>(cpu_time).count(); }
};
#endif |
distribute_parallel_for_simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute parallel for simd'}}
#pragma omp distribute parallel for simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute parallel for simd'}}
#pragma omp distribute parallel for simd foo
void test_no_clause() {
int i;
#pragma omp distribute parallel for simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp distribute parallel for simd' must be a for loop}}
#pragma omp distribute parallel for simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
#pragma omp distribute parallel for simd foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
#pragma omp distribute parallel for simd;
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
#pragma omp distribute parallel for simd linear(x);
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
#pragma omp distribute parallel for simd private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
#pragma omp distribute parallel for simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_safelen() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd safelen
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd safelen()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd safelen 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd safelen(4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd safelen(4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd safelen(4, 8)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp distribute parallel for simd safelen(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp distribute parallel for simd safelen(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd safelen(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd safelen(0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd safelen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_simdlen() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd simdlen
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd simdlen()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd simdlen 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd simdlen(4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd simdlen(4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd simdlen(4, 8)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp distribute parallel for simd simdlen(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp distribute parallel for simd simdlen(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd simdlen(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd simdlen(0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd simdlen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_safelen_simdlen() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp distribute parallel for simd simdlen(6) safelen(5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp distribute parallel for simd safelen(5) simdlen(6)
for (i = 0; i < 16; ++i)
;
}
void test_collapse() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd collapse
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp distribute parallel for simd collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp distribute parallel for simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}}
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp distribute parallel for simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp distribute parallel for simd collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp distribute parallel for simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd collapse(2)
for (i = 0; i < 16; ++i)
for (int j = 0; j < 16; ++j)
// expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}}
#pragma omp distribute parallel for simd reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_linear() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd linear(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd linear(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd linear(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd linear()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd linear(int)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd linear(0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp distribute parallel for simd linear(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp distribute parallel for simd linear(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp distribute parallel for simd linear(x, y, z)
for (i = 0; i < 16; ++i)
;
int x, y;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd linear(x :)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd linear(x :, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd linear(x : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd linear(x : 2 * 2)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd linear(x : 1, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd linear(x : 1, y, z : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be linear}}
#pragma omp distribute parallel for simd linear(x) linear(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-note@+2 {{defined as private}}
// expected-error@+1 {{private variable cannot be linear}}
#pragma omp distribute parallel for simd private(x) linear(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be private}}
#pragma omp distribute parallel for simd linear(x) private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}}
#pragma omp distribute parallel for simd linear(x, y : 0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-note@+2 {{defined as linear}}
// expected-error@+1 {{linear variable cannot be lastprivate}}
#pragma omp distribute parallel for simd linear(x) lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-note@+2 {{defined as lastprivate}}
// expected-error@+1 {{lastprivate variable cannot be linear}}
#pragma omp distribute parallel for simd lastprivate(x) linear(x)
for (i = 0; i < 16; ++i)
;
}
void test_aligned() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd aligned(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd aligned(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd aligned(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd aligned()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd aligned(int)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd aligned(0)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp distribute parallel for simd aligned(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{use of undeclared identifier 'x'}}
// expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp distribute parallel for simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp distribute parallel for simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
int *x, y, z[25]; // expected-note 4 {{'y' defined here}}
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd aligned(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd aligned(z)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd aligned(x :)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd aligned(x :, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd aligned(x : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd aligned(x : 2 * 2)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd aligned(x : 1, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd aligned(x : 1, y, z : 1)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp distribute parallel for simd aligned(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp distribute parallel for simd aligned(x, y, z)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-note@+2 {{defined as aligned}}
// expected-error@+1 {{a variable cannot appear in more than one aligned clause}}
#pragma omp distribute parallel for simd aligned(x) aligned(z, x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-note@+3 {{defined as aligned}}
// expected-error@+2 {{a variable cannot appear in more than one aligned clause}}
// expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}}
#pragma omp distribute parallel for simd aligned(x, y, z) aligned(y, z)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp distribute parallel for simd private(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd private(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd private()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 2 {{expected expression}}
#pragma omp distribute parallel for simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected expression}}
#pragma omp distribute parallel for simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
// expected-error@+1 {{expected variable name}}
#pragma omp distribute parallel for simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target
#pragma omp teams
#pragma omp distribute parallel for simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
#pragma omp target
#pragma omp teams
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp distribute parallel for simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp target
#pragma omp teams
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp distribute parallel for simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
|
common.c | /****************************************************************************
* *
* OpenMP MicroBenchmark Suite - Version 3.1 *
* *
* produced by *
* *
* Mark Bull, Fiona Reid and Nix Mc Donnell *
* *
* at *
* *
* Edinburgh Parallel Computing Centre *
* *
* email: markb@epcc.ed.ac.uk or fiona@epcc.ed.ac.uk *
* *
* *
* This version copyright (c) The University of Edinburgh, 2015. *
* *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
****************************************************************************/
#include "common.h"
#define CONF95 1.96
int nthreads = 1; // Number of OpenMP threads
int delaylength = -1; // The number of iterations to delay for
int outerreps = -1; // Outer repetitions
double delaytime = -1.0; // Length of time to delay for in microseconds
double targettesttime = 0.0; // The length of time in microseconds that the test
// should run for.
unsigned long innerreps; // Inner repetitions
#define times TIMES
double *times; // Array of doubles storing the benchmark times in microseconds
double referencetime; // The average reference time in microseconds to perform
// outerreps runs
double referencesd; // The standard deviation in the reference time in
// microseconds for outerreps runs.
double testtime; // The average test time in microseconds for
// outerreps runs
double testsd; // The standard deviation in the test time in
// microseconds for outerreps runs.
void usage(char *argv[]) {
printf("Usage: %s.x \n"
"\t--outer-repetitions <outer-repetitions> (default %d)\n"
"\t--test-time <target-test-time> (default %0.2f microseconds)\n"
"\t--delay-time <delay-time> (default %0.4f microseconds)\n"
"\t--delay-length <delay-length> "
"(default auto-generated based on processor speed)\n",
argv[0],
DEFAULT_OUTER_REPS, DEFAULT_TEST_TARGET_TIME, DEFAULT_DELAY_TIME);
}
void parse_args(int argc, char *argv[]) {
// Parse the parameters
int arg;
for (arg = 1; arg < argc; arg++) {
if (strcmp(argv[arg], "--delay-time") == 0.0) {
delaytime = atof(argv[++arg]);
if (delaytime == 0.0) {
printf("Invalid float:--delay-time: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[arg], "--outer-repetitions") == 0) {
outerreps = atoi(argv[++arg]);
if (outerreps == 0) {
printf("Invalid integer:--outer-repetitions: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[arg], "--test-time") == 0) {
targettesttime = atof(argv[++arg]);
if (targettesttime == 0) {
printf("Invalid integer:--test-time: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[arg], "-h") == 0) {
usage(argv);
exit(EXIT_SUCCESS);
} else {
printf("Invalid parameters: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
}
}
int getdelaylengthfromtime(double delaytime) {
int i, reps;
double lapsedtime, starttime; // seconds
reps = 1000;
lapsedtime = 0.0;
delaytime = delaytime/1.0E6; // convert from microseconds to seconds
// Note: delaytime is local to this function and thus the conversion
// does not propagate to the main code.
// Here we want to use the delaytime in microseconds to find the
// delaylength in iterations. We start with delaylength=0 and
// increase until we get a large enough delaytime, return delaylength
// in iterations.
delaylength = 0;
delay(delaylength);
while (lapsedtime < delaytime) {
delaylength = delaylength * 1.1 + 1;
starttime = getclock();
for (i = 0; i < reps; i++) {
delay(delaylength);
}
lapsedtime = (getclock() - starttime) / (double) reps;
}
return delaylength;
}
unsigned long getinnerreps(void (*test)(void)) {
innerreps = 10L; // some initial value
double time = 0.0;
while (time < targettesttime) {
double start = getclock();
test();
time = (getclock() - start) * 1.0e6;
innerreps *=2;
// Test to stop code if compiler is optimising reference time expressions away
if (innerreps > (targettesttime*1.0e15)) {
printf("Compiler has optimised reference loop away, STOP! \n");
printf("Try recompiling with lower optimisation level \n");
exit(1);
}
}
return innerreps;
}
void printheader(char *name) {
printf("\n");
printf("--------------------------------------------------------\n");
printf("Computing %s time using %lu reps\n", name, innerreps);
}
void stats(double *mtp, double *sdp) {
double meantime, totaltime, sumsq, mintime, maxtime, sd, cutoff;
int i, nr;
mintime = 1.0e10;
maxtime = 0.;
totaltime = 0.;
for (i = 1; i <= outerreps; i++) {
mintime = (mintime < times[i]) ? mintime : times[i];
maxtime = (maxtime > times[i]) ? maxtime : times[i];
totaltime += times[i];
}
meantime = totaltime / outerreps;
sumsq = 0;
for (i = 1; i <= outerreps; i++) {
sumsq += (times[i] - meantime) * (times[i] - meantime);
}
sd = sqrt(sumsq / (outerreps - 1));
cutoff = 3.0 * sd;
nr = 0;
for (i = 1; i <= outerreps; i++) {
if (fabs(times[i] - meantime) > cutoff)
nr++;
}
printf("\n");
printf("Sample_size Average Min Max S.D. Outliers\n");
printf(" %d %f %f %f %f %d\n",
outerreps, meantime, mintime, maxtime, sd, nr);
printf("\n");
*mtp = meantime;
*sdp = sd;
}
void printfooter(char *name, double testtime, double testsd,
double referencetime, double refsd) {
printf("%s time = %f microseconds +/- %f\n",
name, testtime, CONF95*testsd);
printf("%s overhead = %f microseconds +/- %f\n",
name, testtime-referencetime, CONF95*(testsd+referencesd));
}
void printreferencefooter(char *name, double referencetime, double referencesd) {
printf("%s time = %f microseconds +/- %f\n",
name, referencetime, CONF95 * referencesd);
}
void ompbench_init(int argc, char **argv)
{
#pragma omp parallel
{
#pragma omp master
{
nthreads = omp_get_num_threads();
}
}
parse_args(argc, argv);
if (outerreps == -1) {
outerreps = DEFAULT_OUTER_REPS;
}
if (targettesttime == 0.0) {
targettesttime = DEFAULT_TEST_TARGET_TIME;
}
if (delaytime == -1.0) {
delaytime = DEFAULT_DELAY_TIME;
}
delaylength = getdelaylengthfromtime(delaytime); // Always need to compute delaylength in iterations
times = malloc((outerreps+1) * sizeof(double));
printf("Running OpenMP benchmark version 3.0\n"
"\t%d thread(s)\n"
"\t%d outer repetitions\n"
"\t%0.2f test time (microseconds)\n"
"\t%d delay length (iterations) \n"
"\t%f delay time (microseconds)\n",
nthreads,
outerreps, targettesttime,
delaylength, delaytime);
}
void finalise(void) {
free(times);
}
void initreference(char *name) {
printheader(name);
}
/* Calculate the reference time. */
void reference(char *name, void (*refer)(void)) {
int k;
double start;
// Calculate the required number of innerreps
innerreps = getinnerreps(refer);
initreference(name);
for (k = 0; k <= outerreps; k++) {
start = getclock();
refer();
times[k] = (getclock() - start) * 1.0e6 / (double) innerreps;
}
finalisereference(name);
}
void finalisereference(char *name) {
stats(&referencetime, &referencesd);
printreferencefooter(name, referencetime, referencesd);
}
void intitest(char *name) {
printheader(name);
}
void finalisetest(char *name) {
stats(&testtime, &testsd);
printfooter(name, testtime, testsd, referencetime, referencesd);
}
/* Function to run a microbenchmark test*/
void benchmark(char *name, void (*test)(void))
{
int k;
double start;
// Calculate the required number of innerreps
innerreps = getinnerreps(test);
intitest(name);
for (k=0; k<=outerreps; k++) {
start = getclock();
test();
times[k] = (getclock() - start) * 1.0e6 / (double) innerreps;
}
finalisetest(name);
}
// For the Cray compiler on HECToR we need to turn off optimisation
// for the delay and array_delay functions. Other compilers should
// not be afffected.
//#pragma _CRI noopt
void __attribute__((optnone)) delay(int delaylength) {
int i;
float a = 0.;
for (i = 0; i < delaylength; i++)
a += i;
if (a < 0)
printf("%f \n", a);
}
void __attribute__((optnone)) array_delay(int delaylength, double a[1]) {
int i;
a[0] = 1.0;
for (i = 0; i < delaylength; i++)
a[0] += i;
if (a[0] < 0)
printf("%f \n", a[0]);
}
// Re-enable optimisation for remainder of source.
#pragma _CRI opt
double getclock() {
double time;
// Returns a value in seconds of the time elapsed from some arbitrary,
// but consistent point.
double omp_get_wtime(void);
time = omp_get_wtime();
return time;
}
int returnfalse() {
return 0;
}
|
nest_call_par2.c | #include <stdio.h>
#define N 10
int foobar(void) {
int a = 0;
for (int ii = 0; ii < N+2; ii++) {
#pragma omp parallel for reduction(+:a)
for (int i = 0; i < N; i++) {
a += i;
}
}
return a;
}
int main (void)
{
long int aa=0;
int res = 0;
int ng =12;
int cmom = 14;
int nxyz = 5;
#pragma omp target teams distribute num_teams(nxyz) thread_limit(4) map(tofrom:aa)
for (int gid = 0; gid < nxyz; gid++) {
#pragma omp parallel for collapse(2)
for (unsigned int g = 0; g < ng; g++) {
for (unsigned int l = 0; l < cmom-1; l++) {
int a = foobar();
#pragma omp atomic
aa += a;
}
}
}
long exp = (long)ng*(cmom-1)*nxyz*(N*(N-1)/2)*(N+2);
printf ("The result is = %ld exp:%ld!\n", aa,exp);
if (aa != exp) {
printf("Failed %ld\n",aa);
return 1;
}
return 0;
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 32;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GridInit.c | #include "XSbench_header.h"
#ifdef MPI
#include<mpi.h>
#endif
// Generates randomized energy grid for each nuclide
// Note that this is done as part of initialization (serial), so
// rand() is used.
void generate_grids( NuclideGridPoint ** nuclide_grids,
long n_isotopes, long n_gridpoints ) {
for( long i = 0; i < n_isotopes; i++ )
for( long j = 0; j < n_gridpoints; j++ )
{
nuclide_grids[i][j].energy =((double)rand()/(double)RAND_MAX);
nuclide_grids[i][j].total_xs =((double)rand()/(double)RAND_MAX);
nuclide_grids[i][j].elastic_xs =((double)rand()/(double)RAND_MAX);
nuclide_grids[i][j].absorbtion_xs=((double)rand()/(double)RAND_MAX);
nuclide_grids[i][j].fission_xs =((double)rand()/(double)RAND_MAX);
nuclide_grids[i][j].nu_fission_xs=((double)rand()/(double)RAND_MAX);
}
}
// Verification version of this function (tighter control over RNG)
void generate_grids_v( NuclideGridPoint ** nuclide_grids,
long n_isotopes, long n_gridpoints ) {
for( long i = 0; i < n_isotopes; i++ )
for( long j = 0; j < n_gridpoints; j++ )
{
nuclide_grids[i][j].energy = rn_v();
nuclide_grids[i][j].total_xs = rn_v();
nuclide_grids[i][j].elastic_xs = rn_v();
nuclide_grids[i][j].absorbtion_xs= rn_v();
nuclide_grids[i][j].fission_xs = rn_v();
nuclide_grids[i][j].nu_fission_xs= rn_v();
}
}
// Sorts the nuclide grids by energy (lowest -> highest)
void sort_nuclide_grids( NuclideGridPoint ** nuclide_grids, long n_isotopes,
long n_gridpoints )
{
int (*cmp) (const void *, const void *);
cmp = NGP_compare;
for( long i = 0; i < n_isotopes; i++ )
qsort( nuclide_grids[i], n_gridpoints, sizeof(NuclideGridPoint),
cmp );
// error debug check
/*
for( int i = 0; i < n_isotopes; i++ )
{
printf("NUCLIDE %d ==============================\n", i);
for( int j = 0; j < n_gridpoints; j++ )
printf("E%d = %lf\n", j, nuclide_grids[i][j].energy);
}
*/
}
// Allocates unionized energy grid, and assigns union of energy levels
// from nuclide grids to it.
GridPoint * generate_energy_grid( long n_isotopes, long n_gridpoints,
NuclideGridPoint ** nuclide_grids) {
int mype = 0;
#ifdef MPI
MPI_Comm_rank(MPI_COMM_WORLD, &mype);
#endif
if( mype == 0 ) printf("Generating Unionized Energy Grid...\n");
long n_unionized_grid_points = n_isotopes*n_gridpoints;
int (*cmp) (const void *, const void *);
cmp = NGP_compare;
GridPoint * energy_grid = (GridPoint *)malloc( n_unionized_grid_points
* sizeof( GridPoint ) );
if( mype == 0 ) printf("Copying and Sorting all nuclide grids...\n");
NuclideGridPoint ** n_grid_sorted = gpmatrix( n_isotopes, n_gridpoints );
memcpy( n_grid_sorted[0], nuclide_grids[0], n_isotopes*n_gridpoints*
sizeof( NuclideGridPoint ) );
qsort( &n_grid_sorted[0][0], n_unionized_grid_points,
sizeof(NuclideGridPoint), cmp);
if( mype == 0 ) printf("Assigning energies to unionized grid...\n");
for( long i = 0; i < n_unionized_grid_points; i++ )
energy_grid[i].energy = n_grid_sorted[0][i].energy;
gpmatrix_free(n_grid_sorted);
int * full = (int *) malloc( n_isotopes * n_unionized_grid_points
* sizeof(int) );
for( long i = 0; i < n_unionized_grid_points; i++ )
energy_grid[i].xs_ptrs = &full[n_isotopes * i];
// debug error checking
/*
for( int i = 0; i < n_unionized_grid_points; i++ )
printf("E%d = %lf\n", i, energy_grid[i].energy);
*/
return energy_grid;
}
// Searches each nuclide grid for the closest energy level and assigns
// pointer from unionized grid to the correct spot in the nuclide grid.
// This process is time consuming, as the number of binary searches
// required is: binary searches = n_gridpoints * n_isotopes^2
void set_grid_ptrs( GridPoint * energy_grid, NuclideGridPoint ** nuclide_grids,
long n_isotopes, long n_gridpoints )
{
int mype = 0;
#ifdef MPI
MPI_Comm_rank(MPI_COMM_WORLD, &mype);
#endif
if( mype == 0 ) printf("Assigning pointers to Unionized Energy Grid...\n");
#pragma omp parallel for default(none) \
shared( energy_grid, nuclide_grids, n_isotopes, n_gridpoints, mype )
for( long i = 0; i < n_isotopes * n_gridpoints ; i++ )
{
double quarry = energy_grid[i].energy;
if( INFO && mype == 0 && omp_get_thread_num() == 0 && i % 200 == 0 )
printf("\rAligning Unionized Grid...(%.0lf%% complete)",
100.0 * (double) i / (n_isotopes*n_gridpoints /
omp_get_num_threads()) );
for( long j = 0; j < n_isotopes; j++ )
{
// j is the nuclide i.d.
// log n binary search
energy_grid[i].xs_ptrs[j] =
binary_search( nuclide_grids[j], quarry, n_gridpoints);
}
}
if( mype == 0 ) printf("\n");
//test
/*
for( int i=0; i < n_isotopes * n_gridpoints; i++ )
for( int j = 0; j < n_isotopes; j++ )
printf("E = %.4lf\tNuclide %d->%p->%.4lf\n",
energy_grid[i].energy,
j,
energy_grid[i].xs_ptrs[j],
(energy_grid[i].xs_ptrs[j])->energy
);
*/
}
|
GB_unaryop__abs_int8_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int8_int32
// op(A') function: GB_tran__abs_int8_int32
// C type: int8_t
// A type: int32_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT8 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int8_int32
(
int8_t *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int8_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
SparseDenseProduct.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEDENSEPRODUCT_H
#define EIGEN_SPARSEDENSEPRODUCT_H
namespace Eigen {
namespace internal {
template <> struct product_promote_storage_type<Sparse,Dense, OuterProduct> { typedef Sparse ret; };
template <> struct product_promote_storage_type<Dense,Sparse, OuterProduct> { typedef Sparse ret; };
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,
typename AlphaType,
int LhsStorageOrder = ((SparseLhsType::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor,
bool ColPerCol = ((DenseRhsType::Flags&RowMajorBit)==0) || DenseRhsType::ColsAtCompileTime==1>
struct sparse_time_dense_product_impl;
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, true>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
typedef typename evaluator<Lhs>::type LhsEval;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
LhsEval lhsEval(lhs);
Index n = lhs.outerSize();
#ifdef EIGEN_HAS_OPENMP
Eigen::initParallel();
Index threads = Eigen::nbThreads();
#endif
for(Index c=0; c<rhs.cols(); ++c)
{
#ifdef EIGEN_HAS_OPENMP
// This 20000 threshold has been found experimentally on 2D and 3D Poisson problems.
// It basically represents the minimal amount of work to be done to be worth it.
if(threads>1 && lhsEval.nonZerosEstimate() > 20000)
{
#pragma omp parallel for schedule(static) num_threads(threads)
for(Index i=0; i<n; ++i)
processRow(lhsEval,rhs,res,alpha,i,c);
}
else
#endif
{
for(Index i=0; i<n; ++i)
processRow(lhsEval,rhs,res,alpha,i,c);
}
}
}
static void processRow(const LhsEval& lhsEval, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha, Index i, Index col)
{
typename Res::Scalar tmp(0);
for(LhsInnerIterator it(lhsEval,i); it ;++it)
tmp += it.value() * rhs.coeff(it.index(),col);
res.coeffRef(i,col) += alpha * tmp;
}
};
// FIXME: what is the purpose of the following specialization? Is it for the BlockedSparse format?
template<typename T1, typename T2/*, int _Options, typename _StrideType*/>
struct scalar_product_traits<T1, Ref<T2/*, _Options, _StrideType*/> >
{
enum {
Defined = 1
};
typedef typename CwiseUnaryOp<scalar_multiple2_op<T1, typename T2::Scalar>, T2>::PlainObject ReturnType;
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType, ColMajor, true>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{
typename evaluator<Lhs>::type lhsEval(lhs);
for(Index c=0; c<rhs.cols(); ++c)
{
for(Index j=0; j<lhs.outerSize(); ++j)
{
// typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c);
typename internal::scalar_product_traits<AlphaType, typename Rhs::Scalar>::ReturnType rhs_j(alpha * rhs.coeff(j,c));
for(LhsInnerIterator it(lhsEval,j); it ;++it)
res.coeffRef(it.index(),c) += it.value() * rhs_j;
}
}
}
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, false>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
typename evaluator<Lhs>::type lhsEval(lhs);
for(Index j=0; j<lhs.outerSize(); ++j)
{
typename Res::RowXpr res_j(res.row(j));
for(LhsInnerIterator it(lhsEval,j); it ;++it)
res_j += (alpha*it.value()) * rhs.row(it.index());
}
}
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, ColMajor, false>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
typename evaluator<Lhs>::type lhsEval(lhs);
for(Index j=0; j<lhs.outerSize(); ++j)
{
typename Rhs::ConstRowXpr rhs_j(rhs.row(j));
for(LhsInnerIterator it(lhsEval,j); it ;++it)
res.row(it.index()) += (alpha*it.value()) * rhs_j;
}
}
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,typename AlphaType>
inline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{
sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType>::run(lhs, rhs, res, alpha);
}
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,SparseShape,DenseShape,ProductType> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{
typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
LhsNested lhsNested(lhs);
RhsNested rhsNested(rhs);
internal::sparse_time_dense_product(lhsNested, rhsNested, dst, alpha);
}
};
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, SparseTriangularShape, DenseShape, ProductType>
: generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType>
{};
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,SparseShape,ProductType> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
template<typename Dst>
static void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{
typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
LhsNested lhsNested(lhs);
RhsNested rhsNested(rhs);
// transpose everything
Transpose<Dst> dstT(dst);
internal::sparse_time_dense_product(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
}
};
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, DenseShape, SparseTriangularShape, ProductType>
: generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType>
{};
template<typename LhsT, typename RhsT, bool NeedToTranspose>
struct sparse_dense_outer_product_evaluator
{
protected:
typedef typename conditional<NeedToTranspose,RhsT,LhsT>::type Lhs1;
typedef typename conditional<NeedToTranspose,LhsT,RhsT>::type ActualRhs;
typedef Product<LhsT,RhsT,DefaultProduct> ProdXprType;
// if the actual left-hand side is a dense vector,
// then build a sparse-view so that we can seamlessly iterate over it.
typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,
Lhs1, SparseView<Lhs1> >::type ActualLhs;
typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,
Lhs1 const&, SparseView<Lhs1> >::type LhsArg;
typedef typename evaluator<ActualLhs>::type LhsEval;
typedef typename evaluator<ActualRhs>::type RhsEval;
typedef typename evaluator<ActualLhs>::InnerIterator LhsIterator;
typedef typename ProdXprType::Scalar Scalar;
public:
enum {
Flags = NeedToTranspose ? RowMajorBit : 0,
CoeffReadCost = Dynamic
};
class InnerIterator : public LhsIterator
{
public:
InnerIterator(const sparse_dense_outer_product_evaluator &xprEval, Index outer)
: LhsIterator(xprEval.m_lhsXprImpl, 0),
m_outer(outer),
m_empty(false),
m_factor(get(xprEval.m_rhsXprImpl, outer, typename internal::traits<ActualRhs>::StorageKind() ))
{}
EIGEN_STRONG_INLINE Index outer() const { return m_outer; }
EIGEN_STRONG_INLINE Index row() const { return NeedToTranspose ? m_outer : LhsIterator::index(); }
EIGEN_STRONG_INLINE Index col() const { return NeedToTranspose ? LhsIterator::index() : m_outer; }
EIGEN_STRONG_INLINE Scalar value() const { return LhsIterator::value() * m_factor; }
EIGEN_STRONG_INLINE operator bool() const { return LhsIterator::operator bool() && (!m_empty); }
protected:
Scalar get(const RhsEval &rhs, Index outer, Dense = Dense()) const
{
return rhs.coeff(outer);
}
Scalar get(const RhsEval &rhs, Index outer, Sparse = Sparse())
{
typename RhsEval::InnerIterator it(rhs, outer);
if (it && it.index()==0 && it.value()!=Scalar(0))
return it.value();
m_empty = true;
return Scalar(0);
}
Index m_outer;
bool m_empty;
Scalar m_factor;
};
sparse_dense_outer_product_evaluator(const Lhs1 &lhs, const ActualRhs &rhs)
: m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs)
{}
// transpose case
sparse_dense_outer_product_evaluator(const ActualRhs &rhs, const Lhs1 &lhs)
: m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs)
{}
protected:
const LhsArg m_lhs;
typename evaluator<ActualLhs>::nestedType m_lhsXprImpl;
typename evaluator<ActualRhs>::nestedType m_rhsXprImpl;
};
// sparse * dense outer product
template<typename Lhs, typename Rhs>
struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, SparseShape, DenseShape, typename traits<Lhs>::Scalar, typename traits<Rhs>::Scalar>
: sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor>
{
typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor> Base;
typedef Product<Lhs, Rhs> XprType;
typedef typename XprType::PlainObject PlainObject;
explicit product_evaluator(const XprType& xpr)
: Base(xpr.lhs(), xpr.rhs())
{}
};
template<typename Lhs, typename Rhs>
struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, DenseShape, SparseShape, typename traits<Lhs>::Scalar, typename traits<Rhs>::Scalar>
: sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor>
{
typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor> Base;
typedef Product<Lhs, Rhs> XprType;
typedef typename XprType::PlainObject PlainObject;
explicit product_evaluator(const XprType& xpr)
: Base(xpr.lhs(), xpr.rhs())
{}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSEDENSEPRODUCT_H
|
openmp_vector_multiply.c | #include <stdio.h>
#include <omp.h>
#include <math.h>
//gcc -fopenmp 1-1.c
//pgcc vector_multiply.c -acc -Minfo -ta=multicore
int main()
{
int n=100000; /* size of the vector */
double a[n],b[n],result=0.0,exception=0.0;/*definr vector*/
int i, errs=0;
for( i = 0; i < n; ++i )
{
a[i] = (float)(i*2.0);
b[i] = (float)(i*5.0);
}
double start = omp_get_wtime( );
#pragma omp parallel for private(i) reduction(+:result)
for( i = 0; i < n; ++i )
{
result =result+ a[i]*b[i];
}
double end = omp_get_wtime( );
/* compute on the host to compare */
for( i = 0; i < n; ++i )
{
exception = exception+a[i]*b[i];
}
printf("Programming by Openmp.");
printf("The vector dimension is %d \n",n);
/* check the results */
if (result-exception < 0.001)
{
printf( "After compared with exception results the result was assert as Right.\n" );
}
else{
printf( "After compared with exception results the result was assert as Right. \n" );
}
printf("parallel time:%.16g seconds\n",end-start);
printf("a*b=%.6g \n",result);
return 0;
}
|
tree-vectorizer.h | /* Vectorizer
Copyright (C) 2003-2020 Free Software Foundation, Inc.
Contributed by Dorit Naishlos <dorit@il.ibm.com>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_TREE_VECTORIZER_H
#define GCC_TREE_VECTORIZER_H
typedef class _stmt_vec_info *stmt_vec_info;
#include "tree-data-ref.h"
#include "tree-hash-traits.h"
#include "target.h"
#include <utility>
/* Used for naming of new temporaries. */
enum vect_var_kind {
vect_simple_var,
vect_pointer_var,
vect_scalar_var,
vect_mask_var
};
/* Defines type of operation. */
enum operation_type {
unary_op = 1,
binary_op,
ternary_op
};
/* Define type of available alignment support. */
enum dr_alignment_support {
dr_unaligned_unsupported,
dr_unaligned_supported,
dr_explicit_realign,
dr_explicit_realign_optimized,
dr_aligned
};
/* Define type of def-use cross-iteration cycle. */
enum vect_def_type {
vect_uninitialized_def = 0,
vect_constant_def = 1,
vect_external_def,
vect_internal_def,
vect_induction_def,
vect_reduction_def,
vect_double_reduction_def,
vect_nested_cycle,
vect_unknown_def_type
};
/* Define type of reduction. */
enum vect_reduction_type {
TREE_CODE_REDUCTION,
COND_REDUCTION,
INTEGER_INDUC_COND_REDUCTION,
CONST_COND_REDUCTION,
/* Retain a scalar phi and use a FOLD_EXTRACT_LAST within the loop
to implement:
for (int i = 0; i < VF; ++i)
res = cond[i] ? val[i] : res; */
EXTRACT_LAST_REDUCTION,
/* Use a folding reduction within the loop to implement:
for (int i = 0; i < VF; ++i)
res = res OP val[i];
(with no reassocation). */
FOLD_LEFT_REDUCTION
};
#define VECTORIZABLE_CYCLE_DEF(D) (((D) == vect_reduction_def) \
|| ((D) == vect_double_reduction_def) \
|| ((D) == vect_nested_cycle))
/* Structure to encapsulate information about a group of like
instructions to be presented to the target cost model. */
struct stmt_info_for_cost {
int count;
enum vect_cost_for_stmt kind;
enum vect_cost_model_location where;
stmt_vec_info stmt_info;
int misalign;
};
typedef vec<stmt_info_for_cost> stmt_vector_for_cost;
/* Maps base addresses to an innermost_loop_behavior that gives the maximum
known alignment for that base. */
typedef hash_map<tree_operand_hash,
innermost_loop_behavior *> vec_base_alignments;
/************************************************************************
SLP
************************************************************************/
typedef struct _slp_tree *slp_tree;
/* A computation tree of an SLP instance. Each node corresponds to a group of
stmts to be packed in a SIMD stmt. */
struct _slp_tree {
/* Nodes that contain def-stmts of this node statements operands. */
vec<slp_tree> children;
/* A group of scalar stmts to be vectorized together. */
vec<stmt_vec_info> stmts;
/* A group of scalar operands to be vectorized together. */
vec<tree> ops;
/* Load permutation relative to the stores, NULL if there is no
permutation. */
vec<unsigned> load_permutation;
/* Vectorized stmt/s. */
vec<stmt_vec_info> vec_stmts;
/* Number of vector stmts that are created to replace the group of scalar
stmts. It is calculated during the transformation phase as the number of
scalar elements in one scalar iteration (GROUP_SIZE) multiplied by VF
divided by vector size. */
unsigned int vec_stmts_size;
/* Reference count in the SLP graph. */
unsigned int refcnt;
/* The maximum number of vector elements for the subtree rooted
at this node. */
poly_uint64 max_nunits;
/* Whether the scalar computations use two different operators. */
bool two_operators;
/* The DEF type of this node. */
enum vect_def_type def_type;
};
/* SLP instance is a sequence of stmts in a loop that can be packed into
SIMD stmts. */
typedef class _slp_instance {
public:
/* The root of SLP tree. */
slp_tree root;
/* For vector constructors, the constructor stmt that the SLP tree is built
from, NULL otherwise. */
stmt_vec_info root_stmt;
/* Size of groups of scalar stmts that will be replaced by SIMD stmt/s. */
unsigned int group_size;
/* The unrolling factor required to vectorized this SLP instance. */
poly_uint64 unrolling_factor;
/* The group of nodes that contain loads of this SLP instance. */
vec<slp_tree> loads;
/* The SLP node containing the reduction PHIs. */
slp_tree reduc_phis;
} *slp_instance;
/* Access Functions. */
#define SLP_INSTANCE_TREE(S) (S)->root
#define SLP_INSTANCE_GROUP_SIZE(S) (S)->group_size
#define SLP_INSTANCE_UNROLLING_FACTOR(S) (S)->unrolling_factor
#define SLP_INSTANCE_LOADS(S) (S)->loads
#define SLP_INSTANCE_ROOT_STMT(S) (S)->root_stmt
#define SLP_TREE_CHILDREN(S) (S)->children
#define SLP_TREE_SCALAR_STMTS(S) (S)->stmts
#define SLP_TREE_SCALAR_OPS(S) (S)->ops
#define SLP_TREE_VEC_STMTS(S) (S)->vec_stmts
#define SLP_TREE_NUMBER_OF_VEC_STMTS(S) (S)->vec_stmts_size
#define SLP_TREE_LOAD_PERMUTATION(S) (S)->load_permutation
#define SLP_TREE_TWO_OPERATORS(S) (S)->two_operators
#define SLP_TREE_DEF_TYPE(S) (S)->def_type
/* Key for map that records association between
scalar conditions and corresponding loop mask, and
is populated by vect_record_loop_mask. */
struct scalar_cond_masked_key
{
scalar_cond_masked_key (tree t, unsigned ncopies_)
: ncopies (ncopies_)
{
get_cond_ops_from_tree (t);
}
void get_cond_ops_from_tree (tree);
unsigned ncopies;
tree_code code;
tree op0;
tree op1;
};
template<>
struct default_hash_traits<scalar_cond_masked_key>
{
typedef scalar_cond_masked_key compare_type;
typedef scalar_cond_masked_key value_type;
static inline hashval_t
hash (value_type v)
{
inchash::hash h;
h.add_int (v.code);
inchash::add_expr (v.op0, h, 0);
inchash::add_expr (v.op1, h, 0);
h.add_int (v.ncopies);
return h.end ();
}
static inline bool
equal (value_type existing, value_type candidate)
{
return (existing.ncopies == candidate.ncopies
&& existing.code == candidate.code
&& operand_equal_p (existing.op0, candidate.op0, 0)
&& operand_equal_p (existing.op1, candidate.op1, 0));
}
static const bool empty_zero_p = true;
static inline void
mark_empty (value_type &v)
{
v.ncopies = 0;
}
static inline bool
is_empty (value_type v)
{
return v.ncopies == 0;
}
static inline void mark_deleted (value_type &) {}
static inline bool is_deleted (const value_type &)
{
return false;
}
static inline void remove (value_type &) {}
};
typedef hash_set<scalar_cond_masked_key> scalar_cond_masked_set_type;
/* Describes two objects whose addresses must be unequal for the vectorized
loop to be valid. */
typedef std::pair<tree, tree> vec_object_pair;
/* Records that vectorization is only possible if abs (EXPR) >= MIN_VALUE.
UNSIGNED_P is true if we can assume that abs (EXPR) == EXPR. */
class vec_lower_bound {
public:
vec_lower_bound () {}
vec_lower_bound (tree e, bool u, poly_uint64 m)
: expr (e), unsigned_p (u), min_value (m) {}
tree expr;
bool unsigned_p;
poly_uint64 min_value;
};
/* Vectorizer state shared between different analyses like vector sizes
of the same CFG region. */
class vec_info_shared {
public:
vec_info_shared();
~vec_info_shared();
void save_datarefs();
void check_datarefs();
/* All data references. Freed by free_data_refs, so not an auto_vec. */
vec<data_reference_p> datarefs;
vec<data_reference> datarefs_copy;
/* The loop nest in which the data dependences are computed. */
auto_vec<loop_p> loop_nest;
/* All data dependences. Freed by free_dependence_relations, so not
an auto_vec. */
vec<ddr_p> ddrs;
};
/* Vectorizer state common between loop and basic-block vectorization. */
class vec_info {
public:
typedef hash_set<int_hash<machine_mode, E_VOIDmode, E_BLKmode> > mode_set;
enum vec_kind { bb, loop };
vec_info (vec_kind, void *, vec_info_shared *);
~vec_info ();
stmt_vec_info add_stmt (gimple *);
stmt_vec_info lookup_stmt (gimple *);
stmt_vec_info lookup_def (tree);
stmt_vec_info lookup_single_use (tree);
class dr_vec_info *lookup_dr (data_reference *);
void move_dr (stmt_vec_info, stmt_vec_info);
void remove_stmt (stmt_vec_info);
void replace_stmt (gimple_stmt_iterator *, stmt_vec_info, gimple *);
/* The type of vectorization. */
vec_kind kind;
/* Shared vectorizer state. */
vec_info_shared *shared;
/* The mapping of GIMPLE UID to stmt_vec_info. */
vec<stmt_vec_info> stmt_vec_infos;
/* All SLP instances. */
auto_vec<slp_instance> slp_instances;
/* Maps base addresses to an innermost_loop_behavior that gives the maximum
known alignment for that base. */
vec_base_alignments base_alignments;
/* All interleaving chains of stores, represented by the first
stmt in the chain. */
auto_vec<stmt_vec_info> grouped_stores;
/* Cost data used by the target cost model. */
void *target_cost_data;
/* The set of vector modes used in the vectorized region. */
mode_set used_vector_modes;
/* The argument we should pass to related_vector_mode when looking up
the vector mode for a scalar mode, or VOIDmode if we haven't yet
made any decisions about which vector modes to use. */
machine_mode vector_mode;
private:
stmt_vec_info new_stmt_vec_info (gimple *stmt);
void set_vinfo_for_stmt (gimple *, stmt_vec_info);
void free_stmt_vec_infos ();
void free_stmt_vec_info (stmt_vec_info);
};
class _loop_vec_info;
class _bb_vec_info;
template<>
template<>
inline bool
is_a_helper <_loop_vec_info *>::test (vec_info *i)
{
return i->kind == vec_info::loop;
}
template<>
template<>
inline bool
is_a_helper <_bb_vec_info *>::test (vec_info *i)
{
return i->kind == vec_info::bb;
}
/* In general, we can divide the vector statements in a vectorized loop
into related groups ("rgroups") and say that for each rgroup there is
some nS such that the rgroup operates on nS values from one scalar
iteration followed by nS values from the next. That is, if VF is the
vectorization factor of the loop, the rgroup operates on a sequence:
(1,1) (1,2) ... (1,nS) (2,1) ... (2,nS) ... (VF,1) ... (VF,nS)
where (i,j) represents a scalar value with index j in a scalar
iteration with index i.
[ We use the term "rgroup" to emphasise that this grouping isn't
necessarily the same as the grouping of statements used elsewhere.
For example, if we implement a group of scalar loads using gather
loads, we'll use a separate gather load for each scalar load, and
thus each gather load will belong to its own rgroup. ]
In general this sequence will occupy nV vectors concatenated
together. If these vectors have nL lanes each, the total number
of scalar values N is given by:
N = nS * VF = nV * nL
None of nS, VF, nV and nL are required to be a power of 2. nS and nV
are compile-time constants but VF and nL can be variable (if the target
supports variable-length vectors).
In classical vectorization, each iteration of the vector loop would
handle exactly VF iterations of the original scalar loop. However,
in a fully-masked loop, a particular iteration of the vector loop
might handle fewer than VF iterations of the scalar loop. The vector
lanes that correspond to iterations of the scalar loop are said to be
"active" and the other lanes are said to be "inactive".
In a fully-masked loop, many rgroups need to be masked to ensure that
they have no effect for the inactive lanes. Each such rgroup needs a
sequence of booleans in the same order as above, but with each (i,j)
replaced by a boolean that indicates whether iteration i is active.
This sequence occupies nV vector masks that again have nL lanes each.
Thus the mask sequence as a whole consists of VF independent booleans
that are each repeated nS times.
We make the simplifying assumption that if a sequence of nV masks is
suitable for one (nS,nL) pair, we can reuse it for (nS/2,nL/2) by
VIEW_CONVERTing it. This holds for all current targets that support
fully-masked loops. For example, suppose the scalar loop is:
float *f;
double *d;
for (int i = 0; i < n; ++i)
{
f[i * 2 + 0] += 1.0f;
f[i * 2 + 1] += 2.0f;
d[i] += 3.0;
}
and suppose that vectors have 256 bits. The vectorized f accesses
will belong to one rgroup and the vectorized d access to another:
f rgroup: nS = 2, nV = 1, nL = 8
d rgroup: nS = 1, nV = 1, nL = 4
VF = 4
[ In this simple example the rgroups do correspond to the normal
SLP grouping scheme. ]
If only the first three lanes are active, the masks we need are:
f rgroup: 1 1 | 1 1 | 1 1 | 0 0
d rgroup: 1 | 1 | 1 | 0
Here we can use a mask calculated for f's rgroup for d's, but not
vice versa.
Thus for each value of nV, it is enough to provide nV masks, with the
mask being calculated based on the highest nL (or, equivalently, based
on the highest nS) required by any rgroup with that nV. We therefore
represent the entire collection of masks as a two-level table, with the
first level being indexed by nV - 1 (since nV == 0 doesn't exist) and
the second being indexed by the mask index 0 <= i < nV. */
/* The masks needed by rgroups with nV vectors, according to the
description above. */
struct rgroup_masks {
/* The largest nS for all rgroups that use these masks. */
unsigned int max_nscalars_per_iter;
/* The type of mask to use, based on the highest nS recorded above. */
tree mask_type;
/* A vector of nV masks, in iteration order. */
vec<tree> masks;
};
typedef auto_vec<rgroup_masks> vec_loop_masks;
typedef auto_vec<std::pair<data_reference*, tree> > drs_init_vec;
/*-----------------------------------------------------------------*/
/* Info on vectorized loops. */
/*-----------------------------------------------------------------*/
typedef class _loop_vec_info : public vec_info {
public:
_loop_vec_info (class loop *, vec_info_shared *);
~_loop_vec_info ();
/* The loop to which this info struct refers to. */
class loop *loop;
/* The loop basic blocks. */
basic_block *bbs;
/* Number of latch executions. */
tree num_itersm1;
/* Number of iterations. */
tree num_iters;
/* Number of iterations of the original loop. */
tree num_iters_unchanged;
/* Condition under which this loop is analyzed and versioned. */
tree num_iters_assumptions;
/* Threshold of number of iterations below which vectorization will not be
performed. It is calculated from MIN_PROFITABLE_ITERS and
param_min_vect_loop_bound. */
unsigned int th;
/* When applying loop versioning, the vector form should only be used
if the number of scalar iterations is >= this value, on top of all
the other requirements. Ignored when loop versioning is not being
used. */
poly_uint64 versioning_threshold;
/* Unrolling factor */
poly_uint64 vectorization_factor;
/* Maximum runtime vectorization factor, or MAX_VECTORIZATION_FACTOR
if there is no particular limit. */
unsigned HOST_WIDE_INT max_vectorization_factor;
/* The masks that a fully-masked loop should use to avoid operating
on inactive scalars. */
vec_loop_masks masks;
/* Set of scalar conditions that have loop mask applied. */
scalar_cond_masked_set_type scalar_cond_masked_set;
/* If we are using a loop mask to align memory addresses, this variable
contains the number of vector elements that we should skip in the
first iteration of the vector loop (i.e. the number of leading
elements that should be false in the first mask). */
tree mask_skip_niters;
/* Type of the variables to use in the WHILE_ULT call for fully-masked
loops. */
tree mask_compare_type;
/* For #pragma omp simd if (x) loops the x expression. If constant 0,
the loop should not be vectorized, if constant non-zero, simd_if_cond
shouldn't be set and loop vectorized normally, if SSA_NAME, the loop
should be versioned on that condition, using scalar loop if the condition
is false and vectorized loop otherwise. */
tree simd_if_cond;
/* Type of the IV to use in the WHILE_ULT call for fully-masked
loops. */
tree iv_type;
/* Unknown DRs according to which loop was peeled. */
class dr_vec_info *unaligned_dr;
/* peeling_for_alignment indicates whether peeling for alignment will take
place, and what the peeling factor should be:
peeling_for_alignment = X means:
If X=0: Peeling for alignment will not be applied.
If X>0: Peel first X iterations.
If X=-1: Generate a runtime test to calculate the number of iterations
to be peeled, using the dataref recorded in the field
unaligned_dr. */
int peeling_for_alignment;
/* The mask used to check the alignment of pointers or arrays. */
int ptr_mask;
/* Data Dependence Relations defining address ranges that are candidates
for a run-time aliasing check. */
auto_vec<ddr_p> may_alias_ddrs;
/* Data Dependence Relations defining address ranges together with segment
lengths from which the run-time aliasing check is built. */
auto_vec<dr_with_seg_len_pair_t> comp_alias_ddrs;
/* Check that the addresses of each pair of objects is unequal. */
auto_vec<vec_object_pair> check_unequal_addrs;
/* List of values that are required to be nonzero. This is used to check
whether things like "x[i * n] += 1;" are safe and eventually gets added
to the checks for lower bounds below. */
auto_vec<tree> check_nonzero;
/* List of values that need to be checked for a minimum value. */
auto_vec<vec_lower_bound> lower_bounds;
/* Statements in the loop that have data references that are candidates for a
runtime (loop versioning) misalignment check. */
auto_vec<stmt_vec_info> may_misalign_stmts;
/* Reduction cycles detected in the loop. Used in loop-aware SLP. */
auto_vec<stmt_vec_info> reductions;
/* All reduction chains in the loop, represented by the first
stmt in the chain. */
auto_vec<stmt_vec_info> reduction_chains;
/* Cost vector for a single scalar iteration. */
auto_vec<stmt_info_for_cost> scalar_cost_vec;
/* Map of IV base/step expressions to inserted name in the preheader. */
hash_map<tree_operand_hash, tree> *ivexpr_map;
/* Map of OpenMP "omp simd array" scan variables to corresponding
rhs of the store of the initializer. */
hash_map<tree, tree> *scan_map;
/* The unrolling factor needed to SLP the loop. In case of that pure SLP is
applied to the loop, i.e., no unrolling is needed, this is 1. */
poly_uint64 slp_unrolling_factor;
/* Cost of a single scalar iteration. */
int single_scalar_iteration_cost;
/* The cost of the vector prologue and epilogue, including peeled
iterations and set-up code. */
int vec_outside_cost;
/* The cost of the vector loop body. */
int vec_inside_cost;
/* Is the loop vectorizable? */
bool vectorizable;
/* Records whether we still have the option of using a fully-masked loop. */
bool can_fully_mask_p;
/* True if have decided to use a fully-masked loop. */
bool fully_masked_p;
/* When we have grouped data accesses with gaps, we may introduce invalid
memory accesses. We peel the last iteration of the loop to prevent
this. */
bool peeling_for_gaps;
/* When the number of iterations is not a multiple of the vector size
we need to peel off iterations at the end to form an epilogue loop. */
bool peeling_for_niter;
/* True if there are no loop carried data dependencies in the loop.
If loop->safelen <= 1, then this is always true, either the loop
didn't have any loop carried data dependencies, or the loop is being
vectorized guarded with some runtime alias checks, or couldn't
be vectorized at all, but then this field shouldn't be used.
For loop->safelen >= 2, the user has asserted that there are no
backward dependencies, but there still could be loop carried forward
dependencies in such loops. This flag will be false if normal
vectorizer data dependency analysis would fail or require versioning
for alias, but because of loop->safelen >= 2 it has been vectorized
even without versioning for alias. E.g. in:
#pragma omp simd
for (int i = 0; i < m; i++)
a[i] = a[i + k] * c;
(or #pragma simd or #pragma ivdep) we can vectorize this and it will
DTRT even for k > 0 && k < m, but without safelen we would not
vectorize this, so this field would be false. */
bool no_data_dependencies;
/* Mark loops having masked stores. */
bool has_mask_store;
/* Queued scaling factor for the scalar loop. */
profile_probability scalar_loop_scaling;
/* If if-conversion versioned this loop before conversion, this is the
loop version without if-conversion. */
class loop *scalar_loop;
/* For loops being epilogues of already vectorized loops
this points to the original vectorized loop. Otherwise NULL. */
_loop_vec_info *orig_loop_info;
/* Used to store loop_vec_infos of epilogues of this loop during
analysis. */
vec<_loop_vec_info *> epilogue_vinfos;
} *loop_vec_info;
/* Access Functions. */
#define LOOP_VINFO_LOOP(L) (L)->loop
#define LOOP_VINFO_BBS(L) (L)->bbs
#define LOOP_VINFO_NITERSM1(L) (L)->num_itersm1
#define LOOP_VINFO_NITERS(L) (L)->num_iters
/* Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after
prologue peeling retain total unchanged scalar loop iterations for
cost model. */
#define LOOP_VINFO_NITERS_UNCHANGED(L) (L)->num_iters_unchanged
#define LOOP_VINFO_NITERS_ASSUMPTIONS(L) (L)->num_iters_assumptions
#define LOOP_VINFO_COST_MODEL_THRESHOLD(L) (L)->th
#define LOOP_VINFO_VERSIONING_THRESHOLD(L) (L)->versioning_threshold
#define LOOP_VINFO_VECTORIZABLE_P(L) (L)->vectorizable
#define LOOP_VINFO_CAN_FULLY_MASK_P(L) (L)->can_fully_mask_p
#define LOOP_VINFO_FULLY_MASKED_P(L) (L)->fully_masked_p
#define LOOP_VINFO_VECT_FACTOR(L) (L)->vectorization_factor
#define LOOP_VINFO_MAX_VECT_FACTOR(L) (L)->max_vectorization_factor
#define LOOP_VINFO_MASKS(L) (L)->masks
#define LOOP_VINFO_MASK_SKIP_NITERS(L) (L)->mask_skip_niters
#define LOOP_VINFO_MASK_COMPARE_TYPE(L) (L)->mask_compare_type
#define LOOP_VINFO_MASK_IV_TYPE(L) (L)->iv_type
#define LOOP_VINFO_PTR_MASK(L) (L)->ptr_mask
#define LOOP_VINFO_LOOP_NEST(L) (L)->shared->loop_nest
#define LOOP_VINFO_DATAREFS(L) (L)->shared->datarefs
#define LOOP_VINFO_DDRS(L) (L)->shared->ddrs
#define LOOP_VINFO_INT_NITERS(L) (TREE_INT_CST_LOW ((L)->num_iters))
#define LOOP_VINFO_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment
#define LOOP_VINFO_UNALIGNED_DR(L) (L)->unaligned_dr
#define LOOP_VINFO_MAY_MISALIGN_STMTS(L) (L)->may_misalign_stmts
#define LOOP_VINFO_MAY_ALIAS_DDRS(L) (L)->may_alias_ddrs
#define LOOP_VINFO_COMP_ALIAS_DDRS(L) (L)->comp_alias_ddrs
#define LOOP_VINFO_CHECK_UNEQUAL_ADDRS(L) (L)->check_unequal_addrs
#define LOOP_VINFO_CHECK_NONZERO(L) (L)->check_nonzero
#define LOOP_VINFO_LOWER_BOUNDS(L) (L)->lower_bounds
#define LOOP_VINFO_GROUPED_STORES(L) (L)->grouped_stores
#define LOOP_VINFO_SLP_INSTANCES(L) (L)->slp_instances
#define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor
#define LOOP_VINFO_REDUCTIONS(L) (L)->reductions
#define LOOP_VINFO_REDUCTION_CHAINS(L) (L)->reduction_chains
#define LOOP_VINFO_TARGET_COST_DATA(L) (L)->target_cost_data
#define LOOP_VINFO_PEELING_FOR_GAPS(L) (L)->peeling_for_gaps
#define LOOP_VINFO_PEELING_FOR_NITER(L) (L)->peeling_for_niter
#define LOOP_VINFO_NO_DATA_DEPENDENCIES(L) (L)->no_data_dependencies
#define LOOP_VINFO_SCALAR_LOOP(L) (L)->scalar_loop
#define LOOP_VINFO_SCALAR_LOOP_SCALING(L) (L)->scalar_loop_scaling
#define LOOP_VINFO_HAS_MASK_STORE(L) (L)->has_mask_store
#define LOOP_VINFO_SCALAR_ITERATION_COST(L) (L)->scalar_cost_vec
#define LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST(L) (L)->single_scalar_iteration_cost
#define LOOP_VINFO_ORIG_LOOP_INFO(L) (L)->orig_loop_info
#define LOOP_VINFO_SIMD_IF_COND(L) (L)->simd_if_cond
#define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L) \
((L)->may_misalign_stmts.length () > 0)
#define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L) \
((L)->comp_alias_ddrs.length () > 0 \
|| (L)->check_unequal_addrs.length () > 0 \
|| (L)->lower_bounds.length () > 0)
#define LOOP_REQUIRES_VERSIONING_FOR_NITERS(L) \
(LOOP_VINFO_NITERS_ASSUMPTIONS (L))
#define LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND(L) \
(LOOP_VINFO_SIMD_IF_COND (L))
#define LOOP_REQUIRES_VERSIONING(L) \
(LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (L) \
|| LOOP_REQUIRES_VERSIONING_FOR_ALIAS (L) \
|| LOOP_REQUIRES_VERSIONING_FOR_NITERS (L) \
|| LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND (L))
#define LOOP_VINFO_NITERS_KNOWN_P(L) \
(tree_fits_shwi_p ((L)->num_iters) && tree_to_shwi ((L)->num_iters) > 0)
#define LOOP_VINFO_EPILOGUE_P(L) \
(LOOP_VINFO_ORIG_LOOP_INFO (L) != NULL)
#define LOOP_VINFO_ORIG_MAX_VECT_FACTOR(L) \
(LOOP_VINFO_MAX_VECT_FACTOR (LOOP_VINFO_ORIG_LOOP_INFO (L)))
/* Wrapper for loop_vec_info, for tracking success/failure, where a non-NULL
value signifies success, and a NULL value signifies failure, supporting
propagating an opt_problem * describing the failure back up the call
stack. */
typedef opt_pointer_wrapper <loop_vec_info> opt_loop_vec_info;
static inline loop_vec_info
loop_vec_info_for_loop (class loop *loop)
{
return (loop_vec_info) loop->aux;
}
typedef class _bb_vec_info : public vec_info
{
public:
_bb_vec_info (gimple_stmt_iterator, gimple_stmt_iterator, vec_info_shared *);
~_bb_vec_info ();
basic_block bb;
gimple_stmt_iterator region_begin;
gimple_stmt_iterator region_end;
} *bb_vec_info;
#define BB_VINFO_BB(B) (B)->bb
#define BB_VINFO_GROUPED_STORES(B) (B)->grouped_stores
#define BB_VINFO_SLP_INSTANCES(B) (B)->slp_instances
#define BB_VINFO_DATAREFS(B) (B)->shared->datarefs
#define BB_VINFO_DDRS(B) (B)->shared->ddrs
#define BB_VINFO_TARGET_COST_DATA(B) (B)->target_cost_data
static inline bb_vec_info
vec_info_for_bb (basic_block bb)
{
return (bb_vec_info) bb->aux;
}
/*-----------------------------------------------------------------*/
/* Info on vectorized defs. */
/*-----------------------------------------------------------------*/
enum stmt_vec_info_type {
undef_vec_info_type = 0,
load_vec_info_type,
store_vec_info_type,
shift_vec_info_type,
op_vec_info_type,
call_vec_info_type,
call_simd_clone_vec_info_type,
assignment_vec_info_type,
condition_vec_info_type,
comparison_vec_info_type,
reduc_vec_info_type,
induc_vec_info_type,
type_promotion_vec_info_type,
type_demotion_vec_info_type,
type_conversion_vec_info_type,
cycle_phi_info_type,
lc_phi_info_type,
loop_exit_ctrl_vec_info_type
};
/* Indicates whether/how a variable is used in the scope of loop/basic
block. */
enum vect_relevant {
vect_unused_in_scope = 0,
/* The def is only used outside the loop. */
vect_used_only_live,
/* The def is in the inner loop, and the use is in the outer loop, and the
use is a reduction stmt. */
vect_used_in_outer_by_reduction,
/* The def is in the inner loop, and the use is in the outer loop (and is
not part of reduction). */
vect_used_in_outer,
/* defs that feed computations that end up (only) in a reduction. These
defs may be used by non-reduction stmts, but eventually, any
computations/values that are affected by these defs are used to compute
a reduction (i.e. don't get stored to memory, for example). We use this
to identify computations that we can change the order in which they are
computed. */
vect_used_by_reduction,
vect_used_in_scope
};
/* The type of vectorization that can be applied to the stmt: regular loop-based
vectorization; pure SLP - the stmt is a part of SLP instances and does not
have uses outside SLP instances; or hybrid SLP and loop-based - the stmt is
a part of SLP instance and also must be loop-based vectorized, since it has
uses outside SLP sequences.
In the loop context the meanings of pure and hybrid SLP are slightly
different. By saying that pure SLP is applied to the loop, we mean that we
exploit only intra-iteration parallelism in the loop; i.e., the loop can be
vectorized without doing any conceptual unrolling, cause we don't pack
together stmts from different iterations, only within a single iteration.
Loop hybrid SLP means that we exploit both intra-iteration and
inter-iteration parallelism (e.g., number of elements in the vector is 4
and the slp-group-size is 2, in which case we don't have enough parallelism
within an iteration, so we obtain the rest of the parallelism from subsequent
iterations by unrolling the loop by 2). */
enum slp_vect_type {
loop_vect = 0,
pure_slp,
hybrid
};
/* Says whether a statement is a load, a store of a vectorized statement
result, or a store of an invariant value. */
enum vec_load_store_type {
VLS_LOAD,
VLS_STORE,
VLS_STORE_INVARIANT
};
/* Describes how we're going to vectorize an individual load or store,
or a group of loads or stores. */
enum vect_memory_access_type {
/* An access to an invariant address. This is used only for loads. */
VMAT_INVARIANT,
/* A simple contiguous access. */
VMAT_CONTIGUOUS,
/* A contiguous access that goes down in memory rather than up,
with no additional permutation. This is used only for stores
of invariants. */
VMAT_CONTIGUOUS_DOWN,
/* A simple contiguous access in which the elements need to be permuted
after loading or before storing. Only used for loop vectorization;
SLP uses separate permutes. */
VMAT_CONTIGUOUS_PERMUTE,
/* A simple contiguous access in which the elements need to be reversed
after loading or before storing. */
VMAT_CONTIGUOUS_REVERSE,
/* An access that uses IFN_LOAD_LANES or IFN_STORE_LANES. */
VMAT_LOAD_STORE_LANES,
/* An access in which each scalar element is loaded or stored
individually. */
VMAT_ELEMENTWISE,
/* A hybrid of VMAT_CONTIGUOUS and VMAT_ELEMENTWISE, used for grouped
SLP accesses. Each unrolled iteration uses a contiguous load
or store for the whole group, but the groups from separate iterations
are combined in the same way as for VMAT_ELEMENTWISE. */
VMAT_STRIDED_SLP,
/* The access uses gather loads or scatter stores. */
VMAT_GATHER_SCATTER
};
class dr_vec_info {
public:
/* The data reference itself. */
data_reference *dr;
/* The statement that contains the data reference. */
stmt_vec_info stmt;
/* The misalignment in bytes of the reference, or -1 if not known. */
int misalignment;
/* The byte alignment that we'd ideally like the reference to have,
and the value that misalignment is measured against. */
poly_uint64 target_alignment;
/* If true the alignment of base_decl needs to be increased. */
bool base_misaligned;
tree base_decl;
/* Stores current vectorized loop's offset. To be added to the DR's
offset to calculate current offset of data reference. */
tree offset;
};
typedef struct data_reference *dr_p;
class _stmt_vec_info {
public:
enum stmt_vec_info_type type;
/* Indicates whether this stmts is part of a computation whose result is
used outside the loop. */
bool live;
/* Stmt is part of some pattern (computation idiom) */
bool in_pattern_p;
/* True if the statement was created during pattern recognition as
part of the replacement for RELATED_STMT. This implies that the
statement isn't part of any basic block, although for convenience
its gimple_bb is the same as for RELATED_STMT. */
bool pattern_stmt_p;
/* Is this statement vectorizable or should it be skipped in (partial)
vectorization. */
bool vectorizable;
/* The stmt to which this info struct refers to. */
gimple *stmt;
/* The vec_info with respect to which STMT is vectorized. */
vec_info *vinfo;
/* The vector type to be used for the LHS of this statement. */
tree vectype;
/* The vectorized version of the stmt. */
stmt_vec_info vectorized_stmt;
/* The following is relevant only for stmts that contain a non-scalar
data-ref (array/pointer/struct access). A GIMPLE stmt is expected to have
at most one such data-ref. */
dr_vec_info dr_aux;
/* Information about the data-ref relative to this loop
nest (the loop that is being considered for vectorization). */
innermost_loop_behavior dr_wrt_vec_loop;
/* For loop PHI nodes, the base and evolution part of it. This makes sure
this information is still available in vect_update_ivs_after_vectorizer
where we may not be able to re-analyze the PHI nodes evolution as
peeling for the prologue loop can make it unanalyzable. The evolution
part is still correct after peeling, but the base may have changed from
the version here. */
tree loop_phi_evolution_base_unchanged;
tree loop_phi_evolution_part;
/* Used for various bookkeeping purposes, generally holding a pointer to
some other stmt S that is in some way "related" to this stmt.
Current use of this field is:
If this stmt is part of a pattern (i.e. the field 'in_pattern_p' is
true): S is the "pattern stmt" that represents (and replaces) the
sequence of stmts that constitutes the pattern. Similarly, the
related_stmt of the "pattern stmt" points back to this stmt (which is
the last stmt in the original sequence of stmts that constitutes the
pattern). */
stmt_vec_info related_stmt;
/* Used to keep a sequence of def stmts of a pattern stmt if such exists.
The sequence is attached to the original statement rather than the
pattern statement. */
gimple_seq pattern_def_seq;
/* List of datarefs that are known to have the same alignment as the dataref
of this stmt. */
vec<dr_p> same_align_refs;
/* Selected SIMD clone's function info. First vector element
is SIMD clone's function decl, followed by a pair of trees (base + step)
for linear arguments (pair of NULLs for other arguments). */
vec<tree> simd_clone_info;
/* Classify the def of this stmt. */
enum vect_def_type def_type;
/* Whether the stmt is SLPed, loop-based vectorized, or both. */
enum slp_vect_type slp_type;
/* Interleaving and reduction chains info. */
/* First element in the group. */
stmt_vec_info first_element;
/* Pointer to the next element in the group. */
stmt_vec_info next_element;
/* The size of the group. */
unsigned int size;
/* For stores, number of stores from this group seen. We vectorize the last
one. */
unsigned int store_count;
/* For loads only, the gap from the previous load. For consecutive loads, GAP
is 1. */
unsigned int gap;
/* The minimum negative dependence distance this stmt participates in
or zero if none. */
unsigned int min_neg_dist;
/* Not all stmts in the loop need to be vectorized. e.g, the increment
of the loop induction variable and computation of array indexes. relevant
indicates whether the stmt needs to be vectorized. */
enum vect_relevant relevant;
/* For loads if this is a gather, for stores if this is a scatter. */
bool gather_scatter_p;
/* True if this is an access with loop-invariant stride. */
bool strided_p;
/* For both loads and stores. */
unsigned simd_lane_access_p : 3;
/* Classifies how the load or store is going to be implemented
for loop vectorization. */
vect_memory_access_type memory_access_type;
/* For INTEGER_INDUC_COND_REDUCTION, the initial value to be used. */
tree induc_cond_initial_val;
/* If not NULL the value to be added to compute final reduction value. */
tree reduc_epilogue_adjustment;
/* On a reduction PHI the reduction type as detected by
vect_is_simple_reduction and vectorizable_reduction. */
enum vect_reduction_type reduc_type;
/* The original reduction code, to be used in the epilogue. */
enum tree_code reduc_code;
/* An internal function we should use in the epilogue. */
internal_fn reduc_fn;
/* On a stmt participating in the reduction the index of the operand
on the reduction SSA cycle. */
int reduc_idx;
/* On a reduction PHI the def returned by vect_force_simple_reduction.
On the def returned by vect_force_simple_reduction the
corresponding PHI. */
stmt_vec_info reduc_def;
/* The vector input type relevant for reduction vectorization. */
tree reduc_vectype_in;
/* The vector type for performing the actual reduction. */
tree reduc_vectype;
/* Whether we force a single cycle PHI during reduction vectorization. */
bool force_single_cycle;
/* Whether on this stmt reduction meta is recorded. */
bool is_reduc_info;
/* The number of scalar stmt references from active SLP instances. */
unsigned int num_slp_uses;
/* If nonzero, the lhs of the statement could be truncated to this
many bits without affecting any users of the result. */
unsigned int min_output_precision;
/* If nonzero, all non-boolean input operands have the same precision,
and they could each be truncated to this many bits without changing
the result. */
unsigned int min_input_precision;
/* If OPERATION_BITS is nonzero, the statement could be performed on
an integer with the sign and number of bits given by OPERATION_SIGN
and OPERATION_BITS without changing the result. */
unsigned int operation_precision;
signop operation_sign;
/* If the statement produces a boolean result, this value describes
how we should choose the associated vector type. The possible
values are:
- an integer precision N if we should use the vector mask type
associated with N-bit integers. This is only used if all relevant
input booleans also want the vector mask type for N-bit integers,
or if we can convert them into that form by pattern-matching.
- ~0U if we considered choosing a vector mask type but decided
to treat the boolean as a normal integer type instead.
- 0 otherwise. This means either that the operation isn't one that
could have a vector mask type (and so should have a normal vector
type instead) or that we simply haven't made a choice either way. */
unsigned int mask_precision;
/* True if this is only suitable for SLP vectorization. */
bool slp_vect_only_p;
};
/* Information about a gather/scatter call. */
struct gather_scatter_info {
/* The internal function to use for the gather/scatter operation,
or IFN_LAST if a built-in function should be used instead. */
internal_fn ifn;
/* The FUNCTION_DECL for the built-in gather/scatter function,
or null if an internal function should be used instead. */
tree decl;
/* The loop-invariant base value. */
tree base;
/* The original scalar offset, which is a non-loop-invariant SSA_NAME. */
tree offset;
/* Each offset element should be multiplied by this amount before
being added to the base. */
int scale;
/* The definition type for the vectorized offset. */
enum vect_def_type offset_dt;
/* The type of the vectorized offset. */
tree offset_vectype;
/* The type of the scalar elements after loading or before storing. */
tree element_type;
/* The type of the scalar elements being loaded or stored. */
tree memory_type;
};
/* Access Functions. */
#define STMT_VINFO_TYPE(S) (S)->type
#define STMT_VINFO_STMT(S) (S)->stmt
inline loop_vec_info
STMT_VINFO_LOOP_VINFO (stmt_vec_info stmt_vinfo)
{
if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (stmt_vinfo->vinfo))
return loop_vinfo;
return NULL;
}
inline bb_vec_info
STMT_VINFO_BB_VINFO (stmt_vec_info stmt_vinfo)
{
if (bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (stmt_vinfo->vinfo))
return bb_vinfo;
return NULL;
}
#define STMT_VINFO_RELEVANT(S) (S)->relevant
#define STMT_VINFO_LIVE_P(S) (S)->live
#define STMT_VINFO_VECTYPE(S) (S)->vectype
#define STMT_VINFO_VEC_STMT(S) (S)->vectorized_stmt
#define STMT_VINFO_VECTORIZABLE(S) (S)->vectorizable
#define STMT_VINFO_DATA_REF(S) ((S)->dr_aux.dr + 0)
#define STMT_VINFO_GATHER_SCATTER_P(S) (S)->gather_scatter_p
#define STMT_VINFO_STRIDED_P(S) (S)->strided_p
#define STMT_VINFO_MEMORY_ACCESS_TYPE(S) (S)->memory_access_type
#define STMT_VINFO_SIMD_LANE_ACCESS_P(S) (S)->simd_lane_access_p
#define STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL(S) (S)->induc_cond_initial_val
#define STMT_VINFO_REDUC_EPILOGUE_ADJUSTMENT(S) (S)->reduc_epilogue_adjustment
#define STMT_VINFO_REDUC_IDX(S) (S)->reduc_idx
#define STMT_VINFO_FORCE_SINGLE_CYCLE(S) (S)->force_single_cycle
#define STMT_VINFO_DR_WRT_VEC_LOOP(S) (S)->dr_wrt_vec_loop
#define STMT_VINFO_DR_BASE_ADDRESS(S) (S)->dr_wrt_vec_loop.base_address
#define STMT_VINFO_DR_INIT(S) (S)->dr_wrt_vec_loop.init
#define STMT_VINFO_DR_OFFSET(S) (S)->dr_wrt_vec_loop.offset
#define STMT_VINFO_DR_STEP(S) (S)->dr_wrt_vec_loop.step
#define STMT_VINFO_DR_BASE_ALIGNMENT(S) (S)->dr_wrt_vec_loop.base_alignment
#define STMT_VINFO_DR_BASE_MISALIGNMENT(S) \
(S)->dr_wrt_vec_loop.base_misalignment
#define STMT_VINFO_DR_OFFSET_ALIGNMENT(S) \
(S)->dr_wrt_vec_loop.offset_alignment
#define STMT_VINFO_DR_STEP_ALIGNMENT(S) \
(S)->dr_wrt_vec_loop.step_alignment
#define STMT_VINFO_DR_INFO(S) \
(gcc_checking_assert ((S)->dr_aux.stmt == (S)), &(S)->dr_aux)
#define STMT_VINFO_IN_PATTERN_P(S) (S)->in_pattern_p
#define STMT_VINFO_RELATED_STMT(S) (S)->related_stmt
#define STMT_VINFO_PATTERN_DEF_SEQ(S) (S)->pattern_def_seq
#define STMT_VINFO_SAME_ALIGN_REFS(S) (S)->same_align_refs
#define STMT_VINFO_SIMD_CLONE_INFO(S) (S)->simd_clone_info
#define STMT_VINFO_DEF_TYPE(S) (S)->def_type
#define STMT_VINFO_GROUPED_ACCESS(S) \
((S)->dr_aux.dr && DR_GROUP_FIRST_ELEMENT(S))
#define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S) (S)->loop_phi_evolution_base_unchanged
#define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part
#define STMT_VINFO_MIN_NEG_DIST(S) (S)->min_neg_dist
#define STMT_VINFO_NUM_SLP_USES(S) (S)->num_slp_uses
#define STMT_VINFO_REDUC_TYPE(S) (S)->reduc_type
#define STMT_VINFO_REDUC_CODE(S) (S)->reduc_code
#define STMT_VINFO_REDUC_FN(S) (S)->reduc_fn
#define STMT_VINFO_REDUC_DEF(S) (S)->reduc_def
#define STMT_VINFO_REDUC_VECTYPE(S) (S)->reduc_vectype
#define STMT_VINFO_REDUC_VECTYPE_IN(S) (S)->reduc_vectype_in
#define STMT_VINFO_SLP_VECT_ONLY(S) (S)->slp_vect_only_p
#define DR_GROUP_FIRST_ELEMENT(S) \
(gcc_checking_assert ((S)->dr_aux.dr), (S)->first_element)
#define DR_GROUP_NEXT_ELEMENT(S) \
(gcc_checking_assert ((S)->dr_aux.dr), (S)->next_element)
#define DR_GROUP_SIZE(S) \
(gcc_checking_assert ((S)->dr_aux.dr), (S)->size)
#define DR_GROUP_STORE_COUNT(S) \
(gcc_checking_assert ((S)->dr_aux.dr), (S)->store_count)
#define DR_GROUP_GAP(S) \
(gcc_checking_assert ((S)->dr_aux.dr), (S)->gap)
#define REDUC_GROUP_FIRST_ELEMENT(S) \
(gcc_checking_assert (!(S)->dr_aux.dr), (S)->first_element)
#define REDUC_GROUP_NEXT_ELEMENT(S) \
(gcc_checking_assert (!(S)->dr_aux.dr), (S)->next_element)
#define REDUC_GROUP_SIZE(S) \
(gcc_checking_assert (!(S)->dr_aux.dr), (S)->size)
#define STMT_VINFO_RELEVANT_P(S) ((S)->relevant != vect_unused_in_scope)
#define HYBRID_SLP_STMT(S) ((S)->slp_type == hybrid)
#define PURE_SLP_STMT(S) ((S)->slp_type == pure_slp)
#define STMT_SLP_TYPE(S) (S)->slp_type
#define VECT_MAX_COST 1000
/* The maximum number of intermediate steps required in multi-step type
conversion. */
#define MAX_INTERM_CVT_STEPS 3
#define MAX_VECTORIZATION_FACTOR INT_MAX
/* Nonzero if TYPE represents a (scalar) boolean type or type
in the middle-end compatible with it (unsigned precision 1 integral
types). Used to determine which types should be vectorized as
VECTOR_BOOLEAN_TYPE_P. */
#define VECT_SCALAR_BOOLEAN_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == BOOLEAN_TYPE \
|| ((TREE_CODE (TYPE) == INTEGER_TYPE \
|| TREE_CODE (TYPE) == ENUMERAL_TYPE) \
&& TYPE_PRECISION (TYPE) == 1 \
&& TYPE_UNSIGNED (TYPE)))
static inline bool
nested_in_vect_loop_p (class loop *loop, stmt_vec_info stmt_info)
{
return (loop->inner
&& (loop->inner == (gimple_bb (stmt_info->stmt))->loop_father));
}
/* Return true if STMT_INFO should produce a vector mask type rather than
a normal nonmask type. */
static inline bool
vect_use_mask_type_p (stmt_vec_info stmt_info)
{
return stmt_info->mask_precision && stmt_info->mask_precision != ~0U;
}
/* Return TRUE if a statement represented by STMT_INFO is a part of a
pattern. */
static inline bool
is_pattern_stmt_p (stmt_vec_info stmt_info)
{
return stmt_info->pattern_stmt_p;
}
/* If STMT_INFO is a pattern statement, return the statement that it
replaces, otherwise return STMT_INFO itself. */
inline stmt_vec_info
vect_orig_stmt (stmt_vec_info stmt_info)
{
if (is_pattern_stmt_p (stmt_info))
return STMT_VINFO_RELATED_STMT (stmt_info);
return stmt_info;
}
/* Return the later statement between STMT1_INFO and STMT2_INFO. */
static inline stmt_vec_info
get_later_stmt (stmt_vec_info stmt1_info, stmt_vec_info stmt2_info)
{
if (gimple_uid (vect_orig_stmt (stmt1_info)->stmt)
> gimple_uid (vect_orig_stmt (stmt2_info)->stmt))
return stmt1_info;
else
return stmt2_info;
}
/* If STMT_INFO has been replaced by a pattern statement, return the
replacement statement, otherwise return STMT_INFO itself. */
inline stmt_vec_info
vect_stmt_to_vectorize (stmt_vec_info stmt_info)
{
if (STMT_VINFO_IN_PATTERN_P (stmt_info))
return STMT_VINFO_RELATED_STMT (stmt_info);
return stmt_info;
}
/* Return true if BB is a loop header. */
static inline bool
is_loop_header_bb_p (basic_block bb)
{
if (bb == (bb->loop_father)->header)
return true;
gcc_checking_assert (EDGE_COUNT (bb->preds) == 1);
return false;
}
/* Return pow2 (X). */
static inline int
vect_pow2 (int x)
{
int i, res = 1;
for (i = 0; i < x; i++)
res *= 2;
return res;
}
/* Alias targetm.vectorize.builtin_vectorization_cost. */
static inline int
builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
tree vectype, int misalign)
{
return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
vectype, misalign);
}
/* Get cost by calling cost target builtin. */
static inline
int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
{
return builtin_vectorization_cost (type_of_cost, NULL, 0);
}
/* Alias targetm.vectorize.init_cost. */
static inline void *
init_cost (class loop *loop_info)
{
return targetm.vectorize.init_cost (loop_info);
}
extern void dump_stmt_cost (FILE *, void *, int, enum vect_cost_for_stmt,
stmt_vec_info, int, unsigned,
enum vect_cost_model_location);
/* Alias targetm.vectorize.add_stmt_cost. */
static inline unsigned
add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
stmt_vec_info stmt_info, int misalign,
enum vect_cost_model_location where)
{
unsigned cost = targetm.vectorize.add_stmt_cost (data, count, kind,
stmt_info, misalign, where);
if (dump_file && (dump_flags & TDF_DETAILS))
dump_stmt_cost (dump_file, data, count, kind, stmt_info, misalign,
cost, where);
return cost;
}
/* Alias targetm.vectorize.finish_cost. */
static inline void
finish_cost (void *data, unsigned *prologue_cost,
unsigned *body_cost, unsigned *epilogue_cost)
{
targetm.vectorize.finish_cost (data, prologue_cost, body_cost, epilogue_cost);
}
/* Alias targetm.vectorize.destroy_cost_data. */
static inline void
destroy_cost_data (void *data)
{
targetm.vectorize.destroy_cost_data (data);
}
inline void
add_stmt_costs (void *data, stmt_vector_for_cost *cost_vec)
{
stmt_info_for_cost *cost;
unsigned i;
FOR_EACH_VEC_ELT (*cost_vec, i, cost)
add_stmt_cost (data, cost->count, cost->kind, cost->stmt_info,
cost->misalign, cost->where);
}
/*-----------------------------------------------------------------*/
/* Info on data references alignment. */
/*-----------------------------------------------------------------*/
#define DR_MISALIGNMENT_UNKNOWN (-1)
#define DR_MISALIGNMENT_UNINITIALIZED (-2)
inline void
set_dr_misalignment (dr_vec_info *dr_info, int val)
{
dr_info->misalignment = val;
}
inline int
dr_misalignment (dr_vec_info *dr_info)
{
int misalign = dr_info->misalignment;
gcc_assert (misalign != DR_MISALIGNMENT_UNINITIALIZED);
return misalign;
}
/* Reflects actual alignment of first access in the vectorized loop,
taking into account peeling/versioning if applied. */
#define DR_MISALIGNMENT(DR) dr_misalignment (DR)
#define SET_DR_MISALIGNMENT(DR, VAL) set_dr_misalignment (DR, VAL)
/* Only defined once DR_MISALIGNMENT is defined. */
#define DR_TARGET_ALIGNMENT(DR) ((DR)->target_alignment)
/* Return true if data access DR_INFO is aligned to its target alignment
(which may be less than a full vector). */
static inline bool
aligned_access_p (dr_vec_info *dr_info)
{
return (DR_MISALIGNMENT (dr_info) == 0);
}
/* Return TRUE if the alignment of the data access is known, and FALSE
otherwise. */
static inline bool
known_alignment_for_access_p (dr_vec_info *dr_info)
{
return (DR_MISALIGNMENT (dr_info) != DR_MISALIGNMENT_UNKNOWN);
}
/* Return the minimum alignment in bytes that the vectorized version
of DR_INFO is guaranteed to have. */
static inline unsigned int
vect_known_alignment_in_bytes (dr_vec_info *dr_info)
{
if (DR_MISALIGNMENT (dr_info) == DR_MISALIGNMENT_UNKNOWN)
return TYPE_ALIGN_UNIT (TREE_TYPE (DR_REF (dr_info->dr)));
if (DR_MISALIGNMENT (dr_info) == 0)
return known_alignment (DR_TARGET_ALIGNMENT (dr_info));
return DR_MISALIGNMENT (dr_info) & -DR_MISALIGNMENT (dr_info);
}
/* Return the behavior of DR_INFO with respect to the vectorization context
(which for outer loop vectorization might not be the behavior recorded
in DR_INFO itself). */
static inline innermost_loop_behavior *
vect_dr_behavior (dr_vec_info *dr_info)
{
stmt_vec_info stmt_info = dr_info->stmt;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
if (loop_vinfo == NULL
|| !nested_in_vect_loop_p (LOOP_VINFO_LOOP (loop_vinfo), stmt_info))
return &DR_INNERMOST (dr_info->dr);
else
return &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info);
}
/* Return the offset calculated by adding the offset of this DR_INFO to the
corresponding data_reference's offset. If CHECK_OUTER then use
vect_dr_behavior to select the appropriate data_reference to use. */
inline tree
get_dr_vinfo_offset (dr_vec_info *dr_info, bool check_outer = false)
{
innermost_loop_behavior *base;
if (check_outer)
base = vect_dr_behavior (dr_info);
else
base = &dr_info->dr->innermost;
tree offset = base->offset;
if (!dr_info->offset)
return offset;
offset = fold_convert (sizetype, offset);
return fold_build2 (PLUS_EXPR, TREE_TYPE (dr_info->offset), offset,
dr_info->offset);
}
/* Return true if the vect cost model is unlimited. */
static inline bool
unlimited_cost_model (loop_p loop)
{
if (loop != NULL && loop->force_vectorize
&& flag_simd_cost_model != VECT_COST_MODEL_DEFAULT)
return flag_simd_cost_model == VECT_COST_MODEL_UNLIMITED;
return (flag_vect_cost_model == VECT_COST_MODEL_UNLIMITED);
}
/* Return true if the loop described by LOOP_VINFO is fully-masked and
if the first iteration should use a partial mask in order to achieve
alignment. */
static inline bool
vect_use_loop_mask_for_alignment_p (loop_vec_info loop_vinfo)
{
return (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
&& LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo));
}
/* Return the number of vectors of type VECTYPE that are needed to get
NUNITS elements. NUNITS should be based on the vectorization factor,
so it is always a known multiple of the number of elements in VECTYPE. */
static inline unsigned int
vect_get_num_vectors (poly_uint64 nunits, tree vectype)
{
return exact_div (nunits, TYPE_VECTOR_SUBPARTS (vectype)).to_constant ();
}
/* Return the number of copies needed for loop vectorization when
a statement operates on vectors of type VECTYPE. This is the
vectorization factor divided by the number of elements in
VECTYPE and is always known at compile time. */
static inline unsigned int
vect_get_num_copies (loop_vec_info loop_vinfo, tree vectype)
{
return vect_get_num_vectors (LOOP_VINFO_VECT_FACTOR (loop_vinfo), vectype);
}
/* Update maximum unit count *MAX_NUNITS so that it accounts for
NUNITS. *MAX_NUNITS can be 1 if we haven't yet recorded anything. */
static inline void
vect_update_max_nunits (poly_uint64 *max_nunits, poly_uint64 nunits)
{
/* All unit counts have the form vec_info::vector_size * X for some
rational X, so two unit sizes must have a common multiple.
Everything is a multiple of the initial value of 1. */
*max_nunits = force_common_multiple (*max_nunits, nunits);
}
/* Update maximum unit count *MAX_NUNITS so that it accounts for
the number of units in vector type VECTYPE. *MAX_NUNITS can be 1
if we haven't yet recorded any vector types. */
static inline void
vect_update_max_nunits (poly_uint64 *max_nunits, tree vectype)
{
vect_update_max_nunits (max_nunits, TYPE_VECTOR_SUBPARTS (vectype));
}
/* Return the vectorization factor that should be used for costing
purposes while vectorizing the loop described by LOOP_VINFO.
Pick a reasonable estimate if the vectorization factor isn't
known at compile time. */
static inline unsigned int
vect_vf_for_cost (loop_vec_info loop_vinfo)
{
return estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
}
/* Estimate the number of elements in VEC_TYPE for costing purposes.
Pick a reasonable estimate if the exact number isn't known at
compile time. */
static inline unsigned int
vect_nunits_for_cost (tree vec_type)
{
return estimated_poly_value (TYPE_VECTOR_SUBPARTS (vec_type));
}
/* Return the maximum possible vectorization factor for LOOP_VINFO. */
static inline unsigned HOST_WIDE_INT
vect_max_vf (loop_vec_info loop_vinfo)
{
unsigned HOST_WIDE_INT vf;
if (LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
return vf;
return MAX_VECTORIZATION_FACTOR;
}
/* Return the size of the value accessed by unvectorized data reference
DR_INFO. This is only valid once STMT_VINFO_VECTYPE has been calculated
for the associated gimple statement, since that guarantees that DR_INFO
accesses either a scalar or a scalar equivalent. ("Scalar equivalent"
here includes things like V1SI, which can be vectorized in the same way
as a plain SI.) */
inline unsigned int
vect_get_scalar_dr_size (dr_vec_info *dr_info)
{
return tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr_info->dr))));
}
/* Return true if LOOP_VINFO requires a runtime check for whether the
vector loop is profitable. */
inline bool
vect_apply_runtime_profitability_check_p (loop_vec_info loop_vinfo)
{
unsigned int th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
return (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
&& th >= vect_vf_for_cost (loop_vinfo));
}
/* Source location + hotness information. */
extern dump_user_location_t vect_location;
/* A macro for calling:
dump_begin_scope (MSG, vect_location);
via an RAII object, thus printing "=== MSG ===\n" to the dumpfile etc,
and then calling
dump_end_scope ();
once the object goes out of scope, thus capturing the nesting of
the scopes.
These scopes affect dump messages within them: dump messages at the
top level implicitly default to MSG_PRIORITY_USER_FACING, whereas those
in a nested scope implicitly default to MSG_PRIORITY_INTERNALS. */
#define DUMP_VECT_SCOPE(MSG) \
AUTO_DUMP_SCOPE (MSG, vect_location)
/* A sentinel class for ensuring that the "vect_location" global gets
reset at the end of a scope.
The "vect_location" global is used during dumping and contains a
location_t, which could contain references to a tree block via the
ad-hoc data. This data is used for tracking inlining information,
but it's not a GC root; it's simply assumed that such locations never
get accessed if the blocks are optimized away.
Hence we need to ensure that such locations are purged at the end
of any operations using them (e.g. via this class). */
class auto_purge_vect_location
{
public:
~auto_purge_vect_location ();
};
/*-----------------------------------------------------------------*/
/* Function prototypes. */
/*-----------------------------------------------------------------*/
/* Simple loop peeling and versioning utilities for vectorizer's purposes -
in tree-vect-loop-manip.c. */
extern void vect_set_loop_condition (class loop *, loop_vec_info,
tree, tree, tree, bool);
extern bool slpeel_can_duplicate_loop_p (const class loop *, const_edge);
class loop *slpeel_tree_duplicate_loop_to_edge_cfg (class loop *,
class loop *, edge);
class loop *vect_loop_versioning (loop_vec_info, gimple *);
extern class loop *vect_do_peeling (loop_vec_info, tree, tree,
tree *, tree *, tree *, int, bool, bool,
tree *);
extern void vect_prepare_for_masked_peels (loop_vec_info);
extern dump_user_location_t find_loop_location (class loop *);
extern bool vect_can_advance_ivs_p (loop_vec_info);
extern void vect_update_inits_of_drs (loop_vec_info, tree, tree_code);
/* In tree-vect-stmts.c. */
extern tree get_related_vectype_for_scalar_type (machine_mode, tree,
poly_uint64 = 0);
extern tree get_vectype_for_scalar_type (vec_info *, tree, unsigned int = 0);
extern tree get_vectype_for_scalar_type (vec_info *, tree, slp_tree);
extern tree get_mask_type_for_scalar_type (vec_info *, tree, unsigned int = 0);
extern tree get_same_sized_vectype (tree, tree);
extern bool vect_chooses_same_modes_p (vec_info *, machine_mode);
extern bool vect_get_loop_mask_type (loop_vec_info);
extern bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *,
stmt_vec_info * = NULL, gimple ** = NULL);
extern bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *,
tree *, stmt_vec_info * = NULL,
gimple ** = NULL);
extern bool supportable_widening_operation (enum tree_code, stmt_vec_info,
tree, tree, enum tree_code *,
enum tree_code *, int *,
vec<tree> *);
extern bool supportable_narrowing_operation (enum tree_code, tree, tree,
enum tree_code *, int *,
vec<tree> *);
extern unsigned record_stmt_cost (stmt_vector_for_cost *, int,
enum vect_cost_for_stmt, stmt_vec_info,
int, enum vect_cost_model_location);
extern stmt_vec_info vect_finish_replace_stmt (stmt_vec_info, gimple *);
extern stmt_vec_info vect_finish_stmt_generation (stmt_vec_info, gimple *,
gimple_stmt_iterator *);
extern opt_result vect_mark_stmts_to_be_vectorized (loop_vec_info, bool *);
extern tree vect_get_store_rhs (stmt_vec_info);
extern tree vect_get_vec_def_for_operand_1 (stmt_vec_info, enum vect_def_type);
extern tree vect_get_vec_def_for_operand (tree, stmt_vec_info, tree = NULL);
extern void vect_get_vec_defs (tree, tree, stmt_vec_info, vec<tree> *,
vec<tree> *, slp_tree);
extern void vect_get_vec_defs_for_stmt_copy (vec_info *,
vec<tree> *, vec<tree> *);
extern tree vect_init_vector (stmt_vec_info, tree, tree,
gimple_stmt_iterator *);
extern tree vect_get_vec_def_for_stmt_copy (vec_info *, tree);
extern bool vect_transform_stmt (stmt_vec_info, gimple_stmt_iterator *,
slp_tree, slp_instance);
extern void vect_remove_stores (stmt_vec_info);
extern bool vect_nop_conversion_p (stmt_vec_info);
extern opt_result vect_analyze_stmt (stmt_vec_info, bool *, slp_tree,
slp_instance, stmt_vector_for_cost *);
extern void vect_get_load_cost (stmt_vec_info, int, bool,
unsigned int *, unsigned int *,
stmt_vector_for_cost *,
stmt_vector_for_cost *, bool);
extern void vect_get_store_cost (stmt_vec_info, int,
unsigned int *, stmt_vector_for_cost *);
extern bool vect_supportable_shift (vec_info *, enum tree_code, tree);
extern tree vect_gen_perm_mask_any (tree, const vec_perm_indices &);
extern tree vect_gen_perm_mask_checked (tree, const vec_perm_indices &);
extern void optimize_mask_stores (class loop*);
extern gcall *vect_gen_while (tree, tree, tree);
extern tree vect_gen_while_not (gimple_seq *, tree, tree, tree);
extern opt_result vect_get_vector_types_for_stmt (stmt_vec_info, tree *,
tree *, unsigned int = 0);
extern opt_tree vect_get_mask_type_for_stmt (stmt_vec_info, unsigned int = 0);
/* In tree-vect-data-refs.c. */
extern bool vect_can_force_dr_alignment_p (const_tree, poly_uint64);
extern enum dr_alignment_support vect_supportable_dr_alignment
(dr_vec_info *, bool);
extern tree vect_get_smallest_scalar_type (stmt_vec_info, HOST_WIDE_INT *,
HOST_WIDE_INT *);
extern opt_result vect_analyze_data_ref_dependences (loop_vec_info, unsigned int *);
extern bool vect_slp_analyze_instance_dependence (slp_instance);
extern opt_result vect_enhance_data_refs_alignment (loop_vec_info);
extern opt_result vect_analyze_data_refs_alignment (loop_vec_info);
extern opt_result vect_verify_datarefs_alignment (loop_vec_info);
extern bool vect_slp_analyze_and_verify_instance_alignment (slp_instance);
extern opt_result vect_analyze_data_ref_accesses (vec_info *);
extern opt_result vect_prune_runtime_alias_test_list (loop_vec_info);
extern bool vect_gather_scatter_fn_p (vec_info *, bool, bool, tree, tree,
tree, int, internal_fn *, tree *);
extern bool vect_check_gather_scatter (stmt_vec_info, loop_vec_info,
gather_scatter_info *);
extern opt_result vect_find_stmt_data_reference (loop_p, gimple *,
vec<data_reference_p> *);
extern opt_result vect_analyze_data_refs (vec_info *, poly_uint64 *, bool *);
extern void vect_record_base_alignments (vec_info *);
extern tree vect_create_data_ref_ptr (stmt_vec_info, tree, class loop *, tree,
tree *, gimple_stmt_iterator *,
gimple **, bool,
tree = NULL_TREE, tree = NULL_TREE);
extern tree bump_vector_ptr (tree, gimple *, gimple_stmt_iterator *,
stmt_vec_info, tree);
extern void vect_copy_ref_info (tree, tree);
extern tree vect_create_destination_var (tree, tree);
extern bool vect_grouped_store_supported (tree, unsigned HOST_WIDE_INT);
extern bool vect_store_lanes_supported (tree, unsigned HOST_WIDE_INT, bool);
extern bool vect_grouped_load_supported (tree, bool, unsigned HOST_WIDE_INT);
extern bool vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT, bool);
extern void vect_permute_store_chain (vec<tree> ,unsigned int, stmt_vec_info,
gimple_stmt_iterator *, vec<tree> *);
extern tree vect_setup_realignment (stmt_vec_info, gimple_stmt_iterator *,
tree *, enum dr_alignment_support, tree,
class loop **);
extern void vect_transform_grouped_load (stmt_vec_info, vec<tree> , int,
gimple_stmt_iterator *);
extern void vect_record_grouped_load_vectors (stmt_vec_info, vec<tree>);
extern tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *);
extern tree vect_get_new_ssa_name (tree, enum vect_var_kind,
const char * = NULL);
extern tree vect_create_addr_base_for_vector_ref (stmt_vec_info, gimple_seq *,
tree, tree = NULL_TREE);
/* In tree-vect-loop.c. */
extern widest_int vect_iv_limit_for_full_masking (loop_vec_info loop_vinfo);
/* Used in tree-vect-loop-manip.c */
extern void determine_peel_for_niter (loop_vec_info);
/* Used in gimple-loop-interchange.c and tree-parloops.c. */
extern bool check_reduction_path (dump_user_location_t, loop_p, gphi *, tree,
enum tree_code);
extern bool needs_fold_left_reduction_p (tree, tree_code);
/* Drive for loop analysis stage. */
extern opt_loop_vec_info vect_analyze_loop (class loop *, vec_info_shared *);
extern tree vect_build_loop_niters (loop_vec_info, bool * = NULL);
extern void vect_gen_vector_loop_niters (loop_vec_info, tree, tree *,
tree *, bool);
extern tree vect_halve_mask_nunits (tree, machine_mode);
extern tree vect_double_mask_nunits (tree, machine_mode);
extern void vect_record_loop_mask (loop_vec_info, vec_loop_masks *,
unsigned int, tree, tree);
extern tree vect_get_loop_mask (gimple_stmt_iterator *, vec_loop_masks *,
unsigned int, tree, unsigned int);
extern stmt_vec_info info_for_reduction (stmt_vec_info);
/* Drive for loop transformation stage. */
extern class loop *vect_transform_loop (loop_vec_info, gimple *);
extern opt_loop_vec_info vect_analyze_loop_form (class loop *,
vec_info_shared *);
extern bool vectorizable_live_operation (stmt_vec_info, gimple_stmt_iterator *,
slp_tree, slp_instance, int,
bool, stmt_vector_for_cost *);
extern bool vectorizable_reduction (stmt_vec_info, slp_tree, slp_instance,
stmt_vector_for_cost *);
extern bool vectorizable_induction (stmt_vec_info, gimple_stmt_iterator *,
stmt_vec_info *, slp_tree,
stmt_vector_for_cost *);
extern bool vect_transform_reduction (stmt_vec_info, gimple_stmt_iterator *,
stmt_vec_info *, slp_tree);
extern bool vect_transform_cycle_phi (stmt_vec_info, stmt_vec_info *,
slp_tree, slp_instance);
extern bool vectorizable_lc_phi (stmt_vec_info, stmt_vec_info *, slp_tree);
extern bool vect_worthwhile_without_simd_p (vec_info *, tree_code);
extern int vect_get_known_peeling_cost (loop_vec_info, int, int *,
stmt_vector_for_cost *,
stmt_vector_for_cost *,
stmt_vector_for_cost *);
extern tree cse_and_gimplify_to_preheader (loop_vec_info, tree);
/* In tree-vect-slp.c. */
extern void vect_free_slp_instance (slp_instance, bool);
extern bool vect_transform_slp_perm_load (slp_tree, vec<tree> ,
gimple_stmt_iterator *, poly_uint64,
slp_instance, bool, unsigned *);
extern bool vect_slp_analyze_operations (vec_info *);
extern void vect_schedule_slp (vec_info *);
extern opt_result vect_analyze_slp (vec_info *, unsigned);
extern bool vect_make_slp_decision (loop_vec_info);
extern void vect_detect_hybrid_slp (loop_vec_info);
extern void vect_get_slp_defs (slp_tree, vec<vec<tree> > *, unsigned n = -1U);
extern bool vect_slp_bb (basic_block);
extern stmt_vec_info vect_find_last_scalar_stmt_in_slp (slp_tree);
extern bool is_simple_and_all_uses_invariant (stmt_vec_info, loop_vec_info);
extern bool can_duplicate_and_interleave_p (vec_info *, unsigned int, tree,
unsigned int * = NULL,
tree * = NULL, tree * = NULL);
extern void duplicate_and_interleave (vec_info *, gimple_seq *, tree,
vec<tree>, unsigned int, vec<tree> &);
extern int vect_get_place_in_interleaving_chain (stmt_vec_info, stmt_vec_info);
/* In tree-vect-patterns.c. */
/* Pattern recognition functions.
Additional pattern recognition functions can (and will) be added
in the future. */
void vect_pattern_recog (vec_info *);
/* In tree-vectorizer.c. */
unsigned vectorize_loops (void);
void vect_free_loop_info_assumptions (class loop *);
gimple *vect_loop_vectorized_call (class loop *, gcond **cond = NULL);
#endif /* GCC_TREE_VECTORIZER_H */
|
main.c | #include <ctype.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <omp.h>
#include <kmeans.h>
#include <granula.h>
#define min(x, y) (((x) < (y)) ? (x) : (y))
void kmeans(size_t nclusters,
value_t *attributes,
size_t nattributes,
size_t nobjects,
int is_random,
size_t niterations,
size_t nthreads) {
omp_set_num_threads(nthreads);
value_t **clusters = alloc_rm2(nclusters, nattributes);
// Randomly pick the cluster centers.
for (size_t i = 0; i < nclusters; ++i) {
size_t index = rand() % nobjects;
for (size_t j = 0; j < nattributes; ++j) {
clusters[i][j] = attributes[index * nattributes + j];
}
}
value_t ***partial_new_centers = malloc(sizeof(value_t **) * nthreads);
size_t **partial_new_center_len = malloc(sizeof(size_t *) * nthreads);
#pragma omp parallel for
for (size_t i = 0; i < nthreads; ++i) {
partial_new_centers[i] = alloc_rm2(nclusters, nattributes);
partial_new_center_len[i] = calloc(nclusters, sizeof(size_t));
}
granula_op_t kmeans_op = {granula_get_uuid(), "kmeans", "Id.Unique", "Job", "Id.Unique"};
char buf[512];
granula_get_opinfo(buf, &kmeans_op, "StartTime", "??");
printf("%s\n", buf);
#pragma omp parallel
{
const size_t tid = omp_get_thread_num();
value_t **local_centers = partial_new_centers[tid];
size_t *local_center_len = partial_new_center_len[tid];
for (size_t iter = 0; iter < niterations; ++iter) {
// Initialize the local storage.
for (size_t i = 0; i < nclusters; ++i) {
local_center_len[i] = 0;
for (size_t j = 0; j < nattributes; ++j) {
local_centers[i][j] = 0;
}
}
#pragma omp for
for (size_t i = 0; i < nobjects; ++i) {
// Find the index of the nearest cluster centers.
size_t nearest = find_nearest_point(
&attributes[i * nattributes], nattributes, clusters, nclusters);
// Update new cluster centers: sum of all objects located within.
local_center_len[nearest]++;
for (size_t j = 0; j < nattributes; ++j) {
local_centers[nearest][j] += attributes[i * nattributes + j];
}
}
// Perform reduction at the master core.
#pragma omp single
for (size_t i = 0; i < nclusters; ++i) {
for (size_t j = 0; j < nattributes; ++j) {
value_t sum = 0;
size_t length = 0;
for (size_t k = 0; k < nthreads; ++k) {
sum += partial_new_centers[k][i][j];
length += partial_new_center_len[k][i];
}
if (length > 0) {
clusters[i][j] = sum / length;
} else {
clusters[i][j] = 0;
}
}
}
}
}
granula_get_opinfo(buf, &kmeans_op, "EndTime", "??");
printf("%s\n", buf);
#pragma omp parallel for
for (size_t i = 0; i < nthreads; ++i) {
free_rm2(partial_new_centers[i]);
free(partial_new_center_len[i]);
}
free_rm2(clusters);
free(partial_new_centers);
free(partial_new_center_len);
}
int main(int argc, char *argv[]) {
const char *filename = NULL;
size_t nclusters = 16;
size_t nattributes = 32;
size_t nobjects = 4096;
size_t niterations = 100;
size_t nthreads = 2;
size_t nchunks = 1;
int opt;
while ((opt = getopt(argc, argv, "f:n:k:d:c:i:p:t:h")) != -1) {
switch (opt) {
case 'f':
filename = optarg;
break;
case 'n':
nchunks = atoi(optarg);
break;
case 'k':
nclusters = atoi(optarg);
break;
case 'd':
nattributes = atoi(optarg);
break;
case 'c':
nobjects = atoi(optarg);
break;
case 'i':
niterations = atoi(optarg);
break;
case 'p':
fprintf(stderr, "error: kmeans++ is not supported\n");
return 1;
case 't':
nthreads = atoi(optarg);
break;
case 'h':
default:
fprintf(stderr, "usage: %s [-f filename] [-k clusters] "
"[-d dimensionality] [-c cardinality] [-i iterations] "
"[-t threads]\n", argv[0]);
return 1;
}
}
fprintf(stderr, "Determining %lu clusters on %lu rows with %lu attributes..\n"
"Running %lu iterations with %lu threads..\n",
nclusters, nobjects, nattributes, niterations, nthreads);
value_t *attributes = alloc_rm2(nobjects, nattributes);
// Simple NUMA optimization based on the first-touch policy.
#pragma omp parallel for
for (size_t i = 0; i < nobjects; ++i) {
for (size_t j = 0; j < nattributes; ++j) {
attributes[i * nattributes + j] = 0;
}
}
if (filename) {
fprintf(stderr, "Loading from file not implemented!\n");
exit(-1);
} else {
// Deterministically initialize the attributes for validation.
for (size_t i = 0; i < nobjects; i++) {
for (size_t j = 0; j < nattributes; j++) {
attributes[i * nattributes + j] = i % 1000;
}
}
}
kmeans(nclusters, attributes, nattributes, nobjects, filename == NULL,
niterations, nthreads);
free_rm2(attributes);
return 0;
}
|
tree.h | #ifndef LIGHTGBM_TREE_H_
#define LIGHTGBM_TREE_H_
#include <LightGBM/meta.h>
#include <LightGBM/dataset.h>
#include <string>
#include <vector>
#include <memory>
namespace LightGBM {
#define kMaxTreeOutput (100)
/*!
* \brief Tree model
*/
class Tree {
public:
/*!
* \brief Constructor
* \param max_leaves The number of max leaves
*/
explicit Tree(int max_leaves);
/*!
* \brief Construtor, from a string
* \param str Model string
*/
explicit Tree(const std::string& str);
~Tree();
/*!
* \brief Performing a split on tree leaves.
* \param leaf Index of leaf to be split
* \param feature Index of feature; the converted index after removing useless features
* \param bin_type type of this feature, numerical or categorical
* \param threshold Threshold(bin) of split
* \param real_feature Index of feature, the original index on data
* \param threshold_double Threshold on feature value
* \param left_value Model Left child output
* \param right_value Model Right child output
* \param left_cnt Count of left child
* \param right_cnt Count of right child
* \param gain Split gain
* \return The index of new leaf.
*/
int Split(int leaf, int feature, BinType bin_type, uint32_t threshold, int real_feature,
double threshold_double, double left_value,
double right_value, data_size_t left_cnt, data_size_t right_cnt, double gain);
/*! \brief Get the output of one leaf */
inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; }
/*! \brief Set the output of one leaf */
inline void SetLeafOutput(int leaf, double output) {
leaf_value_[leaf] = output;
}
/*!
* \brief Adding prediction value of this tree model to scores
* \param data The dataset
* \param num_data Number of total data
* \param score Will add prediction to score
*/
void AddPredictionToScore(const Dataset* data,
data_size_t num_data,
double* score) const;
/*!
* \brief Adding prediction value of this tree model to scorese
* \param data The dataset
* \param used_data_indices Indices of used data
* \param num_data Number of total data
* \param score Will add prediction to score
*/
void AddPredictionToScore(const Dataset* data,
const data_size_t* used_data_indices,
data_size_t num_data, double* score) const;
/*!
* \brief Prediction on one record
* \param feature_values Feature value of this record
* \return Prediction result
*/
inline double Predict(const double* feature_values) const;
inline int PredictLeafIndex(const double* feature_values) const;
/*! \brief Get Number of leaves*/
inline int num_leaves() const { return num_leaves_; }
/*! \brief Get depth of specific leaf*/
inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; }
/*! \brief Get feature of specific split*/
inline int split_feature(int split_idx) const { return split_feature_[split_idx]; }
/*!
* \brief Shrinkage for the tree's output
* shrinkage rate (a.k.a learning rate) is used to tune the traning process
* \param rate The factor of shrinkage
*/
inline void Shrinkage(double rate) {
#pragma omp parallel for schedule(static, 512) if (num_leaves_ >= 1024)
for (int i = 0; i < num_leaves_; ++i) {
leaf_value_[i] *= rate;
if (leaf_value_[i] > kMaxTreeOutput) { leaf_value_[i] = kMaxTreeOutput; }
else if (leaf_value_[i] < -kMaxTreeOutput) { leaf_value_[i] = -kMaxTreeOutput; }
}
shrinkage_ *= rate;
}
inline void ReMapFeature(const std::vector<int>& feature_mapper) {
mapped_feature_ = split_feature_;
for (int i = 0; i < num_leaves_ - 1; ++i) {
mapped_feature_[i] = feature_mapper[split_feature_[i]];
}
}
/*! \brief Serialize this object to string*/
std::string ToString();
/*! \brief Serialize this object to json*/
std::string ToJSON();
template<typename T>
static bool CategoricalDecision(T fval, T threshold) {
if (static_cast<int>(fval) == static_cast<int>(threshold)) {
return true;
} else {
return false;
}
}
template<typename T>
static bool NumericalDecision(T fval, T threshold) {
if (fval <= threshold) {
return true;
} else {
return false;
}
}
static const char* GetDecisionTypeName(int8_t type) {
if (type == 0) {
return "no_greater";
} else {
return "is";
}
}
static std::vector<bool(*)(uint32_t, uint32_t)> inner_decision_funs;
static std::vector<bool(*)(double, double)> decision_funs;
private:
/*!
* \brief Find leaf index of which record belongs by features
* \param feature_values Feature value of this record
* \return Leaf index
*/
inline int GetLeaf(const double* feature_values) const;
/*! \brief Serialize one node to json*/
inline std::string NodeToJSON(int index);
/*! \brief Number of max leaves*/
int max_leaves_;
/*! \brief Number of current levas*/
int num_leaves_;
// following values used for non-leaf node
/*! \brief A non-leaf node's left child */
std::vector<int> left_child_;
/*! \brief A non-leaf node's right child */
std::vector<int> right_child_;
/*! \brief A non-leaf node's split feature */
std::vector<int> split_feature_inner;
/*! \brief A non-leaf node's split feature, the original index */
std::vector<int> split_feature_;
/*! \brief A non-leaf node's split threshold in bin */
std::vector<uint32_t> threshold_in_bin_;
/*! \brief A non-leaf node's split threshold in feature value */
std::vector<double> threshold_;
/*! \brief Decision type, 0 for '<='(numerical feature), 1 for 'is'(categorical feature) */
std::vector<int8_t> decision_type_;
/*! \brief A non-leaf node's split gain */
std::vector<double> split_gain_;
// used for leaf node
/*! \brief The parent of leaf */
std::vector<int> leaf_parent_;
/*! \brief Output of leaves */
std::vector<double> leaf_value_;
/*! \brief DataCount of leaves */
std::vector<data_size_t> leaf_count_;
/*! \brief Output of non-leaf nodes */
std::vector<double> internal_value_;
/*! \brief DataCount of non-leaf nodes */
std::vector<data_size_t> internal_count_;
/*! \brief Depth for leaves */
std::vector<int> leaf_depth_;
double shrinkage_;
bool has_categorical_;
/*! \brief buffer of mapped split_feature_ */
std::vector<int> mapped_feature_;
};
inline double Tree::Predict(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return LeafOutput(leaf);
} else {
return 0.0f;
}
}
inline int Tree::PredictLeafIndex(const double* feature_values) const {
if (num_leaves_ > 1) {
int leaf = GetLeaf(feature_values);
return leaf;
} else {
return 0;
}
}
inline int Tree::GetLeaf(const double* feature_values) const {
int node = 0;
if (has_categorical_) {
while (node >= 0) {
if (decision_funs[decision_type_[node]](
feature_values[mapped_feature_[node]],
threshold_[node])) {
node = left_child_[node];
} else {
node = right_child_[node];
}
}
} else {
while (node >= 0) {
if (NumericalDecision<double>(
feature_values[mapped_feature_[node]],
threshold_[node])) {
node = left_child_[node];
} else {
node = right_child_[node];
}
}
}
return ~node;
}
} // namespace LightGBM
#endif // LightGBM_TREE_H_
|
axcrypt_fmt_plug.c | /* AxCrypt 1.x encrypted files cracker patch for JtR
* 2016 by Fist0urs <eddy.maaalou at gmail.com>.
*
* This software is Copyright (c) 2016, Fist0urs <eddy.maaalou at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted. */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_axcrypt;
#elif FMT_REGISTERS_H
john_register_one(&fmt_axcrypt);
#else
#include <string.h>
#include "stdint.h"
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "dyna_salt.h"
#include "sha.h"
#include "aes.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "axcrypt"
#define FORMAT_NAME "AxCrypt"
#define FORMAT_TAG "$axcrypt$*"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "SHA1 AES 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125 /* actual max is 250 */
#define BINARY_SIZE 0
#define SALT_SIZE sizeof(struct custom_salt *)
#define BINARY_ALIGN MEM_ALIGN_NONE
#define SALT_ALIGN sizeof(struct custom_salt *)
/* constant value recommended by FIPS */
#define AES_WRAPPING_IV "\xA6\xA6\xA6\xA6\xA6\xA6\xA6\xA6"
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define PUT_64BITS_XOR_MSB(cp, value) ( \
(cp)[0] ^= (unsigned char)((value)), \
(cp)[1] ^= (unsigned char)((value) >> 8), \
(cp)[2] ^= (unsigned char)((value) >> 16), \
(cp)[3] ^= (unsigned char)((value) >> 24 ) )
static struct fmt_tests axcrypt_tests[] = {
/*
formats can be:
$axcrypt$*version*iterations*salt*wrappedkey
$axcrypt$*version*iterations*salt*wrappedkey*key-file
*/
{"$axcrypt$*1*1337*0fd9e7e2f907f480f8af162564f8f94b*af10c88878ba4e2c89b12586f93b7802453121ee702bc362", "Bab00nmoNCo|\\|2$inge"},
{"$axcrypt$*1*60000*7522aa07694d441e47f8faad8a8cb984*95e02b7ccbdc27c227a80d1307505d8b769e87b32f312aa1", "nuNuche<3rewshauv"},
{"$axcrypt$*1*31014*3408ae91dddc0b1750ed4223fd843364*1cc0f8fa8d89f44d284d0562ac7e93848c86ce9605907129", "tr0pO$phere5apointzero"},
/* axcrypt created key-file */
{"$axcrypt$*1*38574*ce4f58c1e85df1ea921df6d6c05439b4*3278c3c730f7887b1008e852e59997e2196710a5c6bc1813*66664a6b2074434a4520374d73592055626979204a6b755520736d6b4b20394e694a205548444320524578562065674b33202f42593d", "0v3rgo2|<fc!"},
/* custom key-file */
{"$axcrypt$*1*130885*8eb4d745f7ac3f7505bcf14e8ce7e3b4*5221a6e8277e90b0b4f16f7871fca02986fca55c0dec5e59*22486520646f65736e2774206c696b652047656f726765204d69636861656c3a20426f6f6f6f6f6f220d0a0d0a49206665656c20736f20756e737572650d0a417320492074616b6520796f75722068616e6420616e64206c65616420796f7520746f207468652062616e6365666c6f6f720d0a417320746865206d75736963207374617274732c20736f6d657468696e6720696e20796f757220657965730d0a43616c6c7320746f206d696e642074686520676f6c64656e2073637265656e0d0a416e6420616c6c206974277320736169642069732068690d0a0d0a49276d206e6576657220676f6e6e612064616e636520616761696e0d0a4775696c74792066656574206861766520676f74206e6f2072687974686d0d0a54686f7567682069742773206561737920746f2070726574656e640d0a49206b6e6f7720796f277265206e6f74206120666f6f6c0d0a0d0a53686f756c64277665206b6e6f776e20626574746572207468616e20746f206368656174206120667269656e640d0a416e6420776173746520746865206368616e636520746861742049277665206265656e20676976656e0d0a536f2049276d206e6576657220676f6e6e612064616e636520616761696e0d0a5468652077617920492064616e636564207769746820796f750d0a0d0a54696d652063616e206e65766572206d656e640d0a54686520636172656c657373207768697370657273206f66206120676f6f6420667269656e640d0a546f2074686520686561727420616e64206d696e640d0a49676e6f72616e6365206973206b696e640d0a54686572652773206e6f20636f6d666f727420696e207468652074727574680d0a5061696e20697320616c6c20796f75276c6c2066696e640d0a0d0a49276d206e6576657220676f6e6e612064616e636520616761696e0d0a4775696c74792066656574206861766520676f74206e6f2072687974686d0d0a54686f7567682069742773206561737920746f2070726574656e640d0a49206b6e6f7720796f75277265206e6f74206120666f6f6c0d0a0d0a492073686f756c64277665206b6e6f776e20626574746572207468616e20746f206368656174206120667269656e640d0a416e6420776173746520746865206368616e636520746861742049277665206265656e20676976656e0d0a536f2049276d206e6576657220676f6e6e612064616e636520616761696e0d0a5468652077617920492064616e636564207769746820796f750d0a0d0a4e6576657220776974686f757420796f7572206c6f76650d0a0d0a546f6e6967687420746865206d75736963207365656d7320736f206c6f75640d0a492077697368207468617420776520636f756c64206c6f736520746869732063726f77640d0a4d617962652069742773206265747465722074686973207761790d0a5765276420687572742065616368206f74686572207769746820746865207468696e677320776527642077616e7420746f207361790d0a0d0a576520636f756c642068617665206265656e20736f20676f6f6420746f6765746865720d0a576520636f756c642068617665206c6976656420746869732064616e636520666f72657665720d0a427574206e6f772077686f277320676f6e6e612064616e63652077697468206d650d0a506c6561736520737461790d0a0d0a416e642049276d206e6576657220676f6e6e612064616e636520616761696e0d0a4775696c74792066656574206861766520676f74206e6f2072687974686d0d0a54686f7567682069742773206561737920746f2070726574656e640d0a49206b6e6f7720796f75277265206e6f74206120666f6f6c0d0a0d0a53686f756c64277665206b6e6f776e20626574746572207468616e20746f206368656174206120667269656e640d0a416e6420776173746520746865206368616e636520746861742049277665206265656e20676976656e0d0a536f2049276d206e6576657220676f6e6e612064616e636520616761696e0d0a5468652077617920492064616e636564207769746820796f750d0a0d0a284e6f77207468617420796f7527726520676f6e6529204e6f77207468617420796f7527726520676f6e650d0a284e6f77207468617420796f7527726520676f6e65292057686174204920646964277320736f2077726f6e672c20736f2077726f6e670d0a5468617420796f752068616420746f206c65617665206d6520616c6f6e65", "careless whisper"},
{NULL}
};
static char (*saved_key) [PLAINTEXT_LENGTH + 1];
static int any_cracked, *cracked;
static size_t cracked_size;
static struct custom_salt {
dyna_salt dsalt;
int version;
uint32_t key_wrapping_rounds;
unsigned char salt[16];
unsigned char wrappedkey[24];
char* keyfile;
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = 1;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
any_cracked = 0;
cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt;
cracked = mem_calloc(cracked_size, 1);
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
char *ctcopy;
char *keeptr;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN; /* skip over "$axcrypt$*" */
if ((p = strtokm(ctcopy, "*")) == NULL) /* version */
goto err;
if (!isdec(p))
goto err;
if (!atoi(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iterations */
goto err;
if (!isdec(p))
goto err;
if (!atoi(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt */
goto err;
if (strlen(p) != 32 || !ishexlc(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* wrappedkey */
goto err;
if (strlen(p) != 48 || !ishexlc(p))
goto err;
/* optional key-file following */
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
int i;
static struct custom_salt cs;
static void *ptr;
cs.keyfile = NULL;
ctcopy += FORMAT_TAG_LEN; /* skip over "$axcrypt$*" */
p = strtokm(ctcopy, "*");
cs.version = atoi(p);
p = strtokm(NULL, "*");
cs.key_wrapping_rounds = (uint32_t) atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < 16; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
for (i = 0; i < 24; i++)
cs.wrappedkey[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
/* if key-file present */
if ((p = strtokm(NULL, "*")) != NULL){
cs.keyfile = (char*) mem_calloc_tiny(strlen(p)/2 + 1, sizeof(char));
for (i = 0; i < strlen(p)/2; i++)
cs.keyfile[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
}
MEM_FREE(keeptr);
cs.dsalt.salt_cmp_offset = SALT_CMP_OFF(struct custom_salt, salt);
cs.dsalt.salt_cmp_size = SALT_CMP_SIZE(struct custom_salt, salt, wrappedkey, 0);
cs.dsalt.salt_alloc_needs_free = 0;
ptr = mem_alloc_tiny(sizeof(struct custom_salt), MEM_ALIGN_WORD);
memcpy(ptr, &cs, sizeof(struct custom_salt));
return (void *) &ptr;
}
static void set_salt(void *salt)
{
cur_salt = *(struct custom_salt **) salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
/*
NUMBER_AES_BLOCKS = 2
AES_BLOCK_SIZE = 16
*/
unsigned char KEK[20], lsb[24], cipher[16];
AES_KEY akey;
SHA_CTX ctx;
int i, j, nb_iterations = cur_salt->key_wrapping_rounds;
SHA1_Init(&ctx);
SHA1_Update(&ctx, (unsigned char *) saved_key[index],
strlen(saved_key[index]));
/* if key-file provided */
if (cur_salt->keyfile != NULL)
SHA1_Update(&ctx, (unsigned char *) cur_salt->keyfile,
strlen(cur_salt->keyfile));
SHA1_Final( KEK, &ctx );
/* hash XOR salt => KEK */
for (i = 0; i < sizeof(cur_salt->salt); i++)
KEK[i] ^= cur_salt->salt[i];
memcpy(lsb, cur_salt->wrappedkey + 8, 16);
memset(&akey, 0, sizeof(AES_KEY));
AES_set_decrypt_key(KEK, 128, &akey);
/* set msb */
memcpy(cipher, cur_salt->wrappedkey, 8);
/* custom AES un-wrapping loop */
for (j = nb_iterations - 1; j >= 0; j--) {
/* 1st block treatment */
/* MSB XOR (NUMBER_AES_BLOCKS * j + i) */
PUT_64BITS_XOR_MSB(cipher, 2 * j + 2);
/* R[i] */
memcpy(cipher + 8, lsb + 8, 8);
/* AES_ECB(KEK, (MSB XOR (NUMBER_AES_BLOCKS * j + i)) | R[i]) */
AES_decrypt(cipher, cipher, &akey);
memcpy(lsb + 8, cipher + 8, 8);
/* 2nd block treatment */
PUT_64BITS_XOR_MSB(cipher, 2 * j + 1);
memcpy(cipher + 8, lsb, 8);
AES_decrypt(cipher, cipher, &akey);
memcpy(lsb, cipher + 8, 8);
}
if (!memcmp(cipher, AES_WRAPPING_IV, 8)) {
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return cracked[index];
}
static void axcrypt_set_key(char *key, int index)
{
int saved_len = strlen(key);
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_axcrypt =
{
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_DYNA_SALT,
{ NULL },
{ FORMAT_TAG },
axcrypt_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */
},
fmt_default_dyna_salt_hash,
NULL,
set_salt,
axcrypt_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif
|
GB_binop__bxnor_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bxnor_uint8
// A.*B function (eWiseMult): GB_AemultB__bxnor_uint8
// A*D function (colscale): GB_AxD__bxnor_uint8
// D*A function (rowscale): GB_DxB__bxnor_uint8
// C+=B function (dense accum): GB_Cdense_accumB__bxnor_uint8
// C+=b function (dense accum): GB_Cdense_accumb__bxnor_uint8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bxnor_uint8
// C=scalar+B GB_bind1st__bxnor_uint8
// C=scalar+B' GB_bind1st_tran__bxnor_uint8
// C=A+scalar GB_bind2nd__bxnor_uint8
// C=A'+scalar GB_bind2nd_tran__bxnor_uint8
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = ~((aij) ^ (bij))
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ~((x) ^ (y)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXNOR || GxB_NO_UINT8 || GxB_NO_BXNOR_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bxnor_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bxnor_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bxnor_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__bxnor_uint8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__bxnor_uint8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__bxnor_uint8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bxnor_uint8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bxnor_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = Bx [p] ;
Cx [p] = ~((x) ^ (bij)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bxnor_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = Ax [p] ;
Cx [p] = ~((aij) ^ (y)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = ~((x) ^ (aij)) ; \
}
GrB_Info GB_bind1st_tran__bxnor_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = ~((aij) ^ (y)) ; \
}
GrB_Info GB_bind2nd_tran__bxnor_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
openmp-ex05.c | #include <stdio.h>
#include <unistd.h>
#include <omp.h>
int main(void)
{
int orig_num_threads, orig_my_thread;
orig_num_threads = omp_get_num_threads();
orig_my_thread = omp_get_thread_num();
printf ("\"You're all individuals!\" said %d of %d.\n", orig_my_thread, orig_num_threads);
#pragma omp parallel
{
/* The last example showed that variables are shared by default in
* parallel regions: having multiple threads write to the same variable
* creates a race condition.
*
* But, variables declared inside the scope of the parallel region are
* private: each thread has its own private variables */
int my_thread, num_threads;
num_threads = omp_get_num_threads();
my_thread = omp_get_thread_num();
sleep(1);
printf("\"Yes, we're all individuals!\" replied %d of %d, sleepily.\n", my_thread, num_threads);
}
orig_num_threads = omp_get_num_threads();
orig_my_thread = omp_get_thread_num();
printf ("\"I'm not,\" said %d of %d.\n", orig_my_thread, orig_num_threads);
return 0;
}
|
GB_binop__isge_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isge_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__isge_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__isge_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__isge_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_int8)
// A*D function (colscale): GB (_AxD__isge_int8)
// D*A function (rowscale): GB (_DxB__isge_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__isge_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__isge_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_int8)
// C=scalar+B GB (_bind1st__isge_int8)
// C=scalar+B' GB (_bind1st_tran__isge_int8)
// C=A+scalar GB (_bind2nd__isge_int8)
// C=A'+scalar GB (_bind2nd_tran__isge_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_INT8 || GxB_NO_ISGE_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isge_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isge_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isge_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isge_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isge_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isge_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isge_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isge_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isge_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isge_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isge_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isge_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__isge_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__isge_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
paint.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP AAA IIIII N N TTTTT %
% P P A A I NN N T %
% PPPP AAAAA I N N N T %
% P A A I N NN T %
% P A A IIIII N N T %
% %
% %
% Methods to Paint on an Image %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/resource_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o o d f i l l P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FloodfillPaintImage() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% By default target must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. The fuzz member of
% image defines how much tolerance is acceptable to consider two colors as
% the same. For example, set fuzz to 10 and the color red at intensities of
% 100 and 102 respectively are now interpreted as the same color for the
% purposes of the floodfill.
%
% The format of the FloodfillPaintImage method is:
%
% MagickBooleanType FloodfillPaintImage(Image *image,
% const DrawInfo *draw_info,const PixelInfo target,
% const ssize_t x_offset,const ssize_t y_offset,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o target: the RGB value of the target color.
%
% o x_offset,y_offset: the starting location of the operation.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType FloodfillPaintImage(Image *image,
const DrawInfo *draw_info,const PixelInfo *target,const ssize_t x_offset,
const ssize_t y_offset,const MagickBooleanType invert,
ExceptionInfo *exception)
{
#define MaxStacksize 524288UL
#define PushSegmentStack(up,left,right,delta) \
{ \
if (s >= (segment_stack+MaxStacksize)) \
ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \
else \
{ \
if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \
{ \
s->x1=(double) (left); \
s->y1=(double) (up); \
s->x2=(double) (right); \
s->y2=(double) (delta); \
s++; \
} \
} \
}
CacheView
*floodplane_view,
*image_view;
Image
*floodplane_image;
MagickBooleanType
skip,
status;
MemoryInfo
*segment_info;
PixelInfo
fill_color,
pixel;
register SegmentInfo
*s;
SegmentInfo
*segment_stack;
ssize_t
offset,
start,
x1,
x2,
y;
/*
Check boundary conditions.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns))
return(MagickFalse);
if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows))
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
if ((image->alpha_trait == UndefinedPixelTrait) &&
(draw_info->fill.alpha_trait != UndefinedPixelTrait))
(void) SetImageAlpha(image,OpaqueAlpha,exception);
/*
Set floodfill state.
*/
floodplane_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (floodplane_image == (Image *) NULL)
return(MagickFalse);
floodplane_image->alpha_trait=UndefinedPixelTrait;
floodplane_image->colorspace=GRAYColorspace;
(void) QueryColorCompliance("#000",AllCompliance,
&floodplane_image->background_color,exception);
(void) SetImageBackgroundColor(floodplane_image,exception);
segment_info=AcquireVirtualMemory(MaxStacksize,sizeof(*segment_stack));
if (segment_info == (MemoryInfo *) NULL)
{
floodplane_image=DestroyImage(floodplane_image);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
segment_stack=(SegmentInfo *) GetVirtualMemoryBlob(segment_info);
/*
Push initial segment on stack.
*/
status=MagickTrue;
start=0;
s=segment_stack;
PushSegmentStack(y_offset,x_offset,x_offset,1);
PushSegmentStack(y_offset+1,x_offset,x_offset,-1);
GetPixelInfo(image,&pixel);
image_view=AcquireVirtualCacheView(image,exception);
floodplane_view=AcquireAuthenticCacheView(floodplane_image,exception);
while (s > segment_stack)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Pop segment off stack.
*/
s--;
x1=(ssize_t) s->x1;
x2=(ssize_t) s->x2;
offset=(ssize_t) s->y2;
y=(ssize_t) s->y1+offset;
/*
Recolor neighboring pixels.
*/
p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception);
q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
p+=x1*GetPixelChannels(image);
q+=x1*GetPixelChannels(floodplane_image);
for (x=x1; x >= 0; x--)
{
if (GetPixelGray(floodplane_image,q) != 0)
break;
GetPixelInfoPixel(image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert)
break;
SetPixelGray(floodplane_image,QuantumRange,q);
p-=GetPixelChannels(image);
q-=GetPixelChannels(floodplane_image);
}
if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse)
break;
skip=x >= x1 ? MagickTrue : MagickFalse;
if (skip == MagickFalse)
{
start=x+1;
if (start < x1)
PushSegmentStack(y,start,x1-1,-offset);
x=x1+1;
}
do
{
if (skip == MagickFalse)
{
if (x < (ssize_t) image->columns)
{
p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1,
exception);
q=GetCacheViewAuthenticPixels(floodplane_view,x,y,image->columns-
x,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for ( ; x < (ssize_t) image->columns; x++)
{
if (GetPixelGray(floodplane_image,q) != 0)
break;
GetPixelInfoPixel(image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert)
break;
SetPixelGray(floodplane_image,QuantumRange,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(floodplane_image);
}
status=SyncCacheViewAuthenticPixels(floodplane_view,exception);
if (status == MagickFalse)
break;
}
PushSegmentStack(y,start,x-1,offset);
if (x > (x2+1))
PushSegmentStack(y,x2+1,x-1,-offset);
}
skip=MagickFalse;
x++;
if (x <= x2)
{
p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1,
exception);
q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for ( ; x <= x2; x++)
{
if (GetPixelGray(floodplane_image,q) != 0)
break;
GetPixelInfoPixel(image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert)
break;
p+=GetPixelChannels(image);
q+=GetPixelChannels(floodplane_image);
}
}
start=x;
} while (x <= x2);
}
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Tile fill color onto floodplane.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelGray(floodplane_image,p) != 0)
{
GetFillColor(draw_info,x,y,&fill_color,exception);
SetPixelViaPixelInfo(image,&fill_color,q);
}
p+=GetPixelChannels(floodplane_image);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
floodplane_view=DestroyCacheView(floodplane_view);
image_view=DestroyCacheView(image_view);
segment_info=RelinquishVirtualMemory(segment_info);
floodplane_image=DestroyImage(floodplane_image);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GradientImage() applies a continuously smooth color transitions along a
% vector from one color to another.
%
% Note, the interface of this method will change in the future to support
% more than one transistion.
%
% The format of the GradientImage method is:
%
% MagickBooleanType GradientImage(Image *image,const GradientType type,
% const SpreadMethod method,const PixelInfo *start_color,
% const PixelInfo *stop_color,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the gradient type: linear or radial.
%
% o spread: the gradient spread meathod: pad, reflect, or repeat.
%
% o start_color: the start color.
%
% o stop_color: the stop color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GradientImage(Image *image,
const GradientType type,const SpreadMethod method,const StopInfo *stops,
const size_t number_stops,ExceptionInfo *exception)
{
const char
*artifact;
DrawInfo
*draw_info;
GradientInfo
*gradient;
MagickBooleanType
status;
/*
Set gradient start-stop end points.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(stops != (const StopInfo *) NULL);
assert(number_stops > 0);
draw_info=AcquireDrawInfo();
gradient=(&draw_info->gradient);
gradient->type=type;
gradient->bounding_box.width=image->columns;
gradient->bounding_box.height=image->rows;
artifact=GetImageArtifact(image,"gradient:bounding-box");
if (artifact != (const char *) NULL)
(void) ParseAbsoluteGeometry(artifact,&gradient->bounding_box);
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=(double) image->rows-1;
artifact=GetImageArtifact(image,"gradient:direction");
if (artifact != (const char *) NULL)
{
GravityType
direction;
direction=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,artifact);
switch (direction)
{
case NorthWestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case NorthGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case NorthEastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=0.0;
break;
}
case WestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case EastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=0.0;
break;
}
case SouthWestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=(double) image->rows-1;
break;
}
case SouthGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=(double) image->columns-1;
break;
}
case SouthEastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=(double) image->rows-1;
break;
}
default:
break;
}
}
artifact=GetImageArtifact(image,"gradient:angle");
if (artifact != (const char *) NULL)
gradient->angle=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"gradient:vector");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf%*[ ,]%lf%*[ ,]%lf",
&gradient->gradient_vector.x1,&gradient->gradient_vector.y1,
&gradient->gradient_vector.x2,&gradient->gradient_vector.y2);
if ((GetImageArtifact(image,"gradient:angle") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:direction") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:extent") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:vector") == (const char *) NULL))
if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0))
gradient->gradient_vector.x2=0.0;
gradient->center.x=(double) gradient->gradient_vector.x2/2.0;
gradient->center.y=(double) gradient->gradient_vector.y2/2.0;
artifact=GetImageArtifact(image,"gradient:center");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->center.x,
&gradient->center.y);
artifact=GetImageArtifact(image,"gradient:angle");
if ((type == LinearGradient) && (artifact != (const char *) NULL))
{
double
sine,
cosine,
distance;
/*
Reference https://drafts.csswg.org/css-images-3/#linear-gradients.
*/
sine=sin((double) DegreesToRadians(gradient->angle-90.0));
cosine=cos((double) DegreesToRadians(gradient->angle-90.0));
distance=fabs((double) (image->columns-1.0)*cosine)+
fabs((double) (image->rows-1.0)*sine);
gradient->gradient_vector.x1=0.5*((image->columns-1.0)-distance*cosine);
gradient->gradient_vector.y1=0.5*((image->rows-1.0)-distance*sine);
gradient->gradient_vector.x2=0.5*((image->columns-1.0)+distance*cosine);
gradient->gradient_vector.y2=0.5*((image->rows-1.0)+distance*sine);
}
gradient->radii.x=(double) MagickMax((image->columns-1.0),(image->rows-1.0))/
2.0;
gradient->radii.y=gradient->radii.x;
artifact=GetImageArtifact(image,"gradient:extent");
if (artifact != (const char *) NULL)
{
if (LocaleCompare(artifact,"Circle") == 0)
{
gradient->radii.x=(double) MagickMax((image->columns-1.0),
(image->rows-1.0))/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Diagonal") == 0)
{
gradient->radii.x=(double) (sqrt((image->columns-1.0)*
(image->columns-1.0)+(image->rows-1.0)*(image->rows-1.0)))/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Ellipse") == 0)
{
gradient->radii.x=(double) (image->columns-1.0)/2.0;
gradient->radii.y=(double) (image->rows-1.0)/2.0;
}
if (LocaleCompare(artifact,"Maximum") == 0)
{
gradient->radii.x=(double) MagickMax((image->columns-1.0),
(image->rows-1.0))/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Minimum") == 0)
{
gradient->radii.x=(double) (MagickMin((image->columns-1.0),
(image->rows-1.0)))/2.0;
gradient->radii.y=gradient->radii.x;
}
}
artifact=GetImageArtifact(image,"gradient:radii");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->radii.x,
&gradient->radii.y);
gradient->radius=MagickMax(gradient->radii.x,gradient->radii.y);
gradient->spread=method;
/*
Define the gradient to fill between the stops.
*/
gradient->number_stops=number_stops;
gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops,
sizeof(*gradient->stops));
if (gradient->stops == (StopInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) CopyMagickMemory(gradient->stops,stops,(size_t) number_stops*
sizeof(*stops));
/*
Draw a gradient on the image.
*/
status=DrawGradientImage(image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O i l P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OilPaintImage() applies a special effect filter that simulates an oil
% painting. Each pixel is replaced by the most frequent color occurring
% in a circular region defined by radius.
%
% The format of the OilPaintImage method is:
%
% Image *OilPaintImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the circular neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t **DestroyHistogramThreadSet(size_t **histogram)
{
register ssize_t
i;
assert(histogram != (size_t **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (histogram[i] != (size_t *) NULL)
histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]);
histogram=(size_t **) RelinquishMagickMemory(histogram);
return(histogram);
}
static size_t **AcquireHistogramThreadSet(const size_t count)
{
register ssize_t
i;
size_t
**histogram,
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
histogram=(size_t **) AcquireQuantumMemory(number_threads,sizeof(*histogram));
if (histogram == (size_t **) NULL)
return((size_t **) NULL);
(void) ResetMagickMemory(histogram,0,number_threads*sizeof(*histogram));
for (i=0; i < (ssize_t) number_threads; i++)
{
histogram[i]=(size_t *) AcquireQuantumMemory(count,sizeof(**histogram));
if (histogram[i] == (size_t *) NULL)
return(DestroyHistogramThreadSet(histogram));
}
return(histogram);
}
MagickExport Image *OilPaintImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define NumberPaintBins 256
#define OilPaintImageTag "OilPaint/Image"
CacheView
*image_view,
*paint_view;
Image
*linear_image,
*paint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
**histograms,
width;
ssize_t
center,
y;
/*
Initialize painted image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth2D(radius,sigma);
linear_image=CloneImage(image,0,0,MagickTrue,exception);
paint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if ((linear_image == (Image *) NULL) || (paint_image == (Image *) NULL))
{
if (linear_image != (Image *) NULL)
linear_image=DestroyImage(linear_image);
if (paint_image != (Image *) NULL)
linear_image=DestroyImage(paint_image);
return((Image *) NULL);
}
if (SetImageStorageClass(paint_image,DirectClass,exception) == MagickFalse)
{
linear_image=DestroyImage(linear_image);
paint_image=DestroyImage(paint_image);
return((Image *) NULL);
}
histograms=AcquireHistogramThreadSet(NumberPaintBins);
if (histograms == (size_t **) NULL)
{
linear_image=DestroyImage(linear_image);
paint_image=DestroyImage(paint_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Oil paint image.
*/
status=MagickTrue;
progress=0;
center=(ssize_t) GetPixelChannels(linear_image)*(linear_image->columns+width)*
(width/2L)+GetPixelChannels(linear_image)*(width/2L);
image_view=AcquireVirtualCacheView(linear_image,exception);
paint_view=AcquireAuthenticCacheView(paint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(linear_image,paint_image,linear_image->rows,1)
#endif
for (y=0; y < (ssize_t) linear_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register size_t
*histogram;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
(width/2L),linear_image->columns+width,width,exception);
q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
histogram=histograms[GetOpenMPThreadId()];
for (x=0; x < (ssize_t) linear_image->columns; x++)
{
register ssize_t
i,
u;
size_t
count;
ssize_t
j,
k,
n,
v;
/*
Assign most frequent color.
*/
k=0;
j=0;
count=0;
(void) ResetMagickMemory(histogram,0,NumberPaintBins* sizeof(*histogram));
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
n=(ssize_t) ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity(
linear_image,p+GetPixelChannels(linear_image)*(u+k))));
histogram[n]++;
if (histogram[n] > count)
{
j=k+u;
count=histogram[n];
}
}
k+=(ssize_t) (linear_image->columns+width);
}
for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(linear_image,i);
PixelTrait traits = GetPixelChannelTraits(linear_image,channel);
PixelTrait paint_traits=GetPixelChannelTraits(paint_image,channel);
if ((traits == UndefinedPixelTrait) ||
(paint_traits == UndefinedPixelTrait))
continue;
if (((paint_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(linear_image,p) <= (QuantumRange/2)))
{
SetPixelChannel(paint_image,channel,p[center+i],q);
continue;
}
SetPixelChannel(paint_image,channel,p[j*GetPixelChannels(linear_image)+
i],q);
}
p+=GetPixelChannels(linear_image);
q+=GetPixelChannels(paint_image);
}
if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse)
status=MagickFalse;
if (linear_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_OilPaintImage)
#endif
proceed=SetImageProgress(linear_image,OilPaintImageTag,progress++,
linear_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
paint_view=DestroyCacheView(paint_view);
image_view=DestroyCacheView(image_view);
histograms=DestroyHistogramThreadSet(histograms);
linear_image=DestroyImage(linear_image);
if (status == MagickFalse)
paint_image=DestroyImage(paint_image);
return(paint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O p a q u e P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpaquePaintImage() changes any pixel that matches color with the color
% defined by fill argument.
%
% By default color must match a particular pixel color exactly. However, in
% many cases two colors may differ by a small amount. Fuzz defines how much
% tolerance is acceptable to consider two colors as the same. For example,
% set fuzz to 10 and the color red at intensities of 100 and 102 respectively
% are now interpreted as the same color.
%
% The format of the OpaquePaintImage method is:
%
% MagickBooleanType OpaquePaintImage(Image *image,const PixelInfo *target,
% const PixelInfo *fill,const MagickBooleanType invert,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the RGB value of the target color.
%
% o fill: the replacement color.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType OpaquePaintImage(Image *image,
const PixelInfo *target,const PixelInfo *fill,const MagickBooleanType invert,
ExceptionInfo *exception)
{
#define OpaquePaintImageTag "Opaque/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
conform_fill,
conform_target,
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(target != (PixelInfo *) NULL);
assert(fill != (PixelInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
ConformPixelInfo(image,fill,&conform_fill,exception);
ConformPixelInfo(image,target,&conform_target,exception);
/*
Make image color opaque.
*/
status=MagickTrue;
progress=0;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&conform_target) != invert)
{
PixelTrait
traits;
traits=GetPixelChannelTraits(image,RedPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelRed(image,conform_fill.red,q);
traits=GetPixelChannelTraits(image,GreenPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelGreen(image,conform_fill.green,q);
traits=GetPixelChannelTraits(image,BluePixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelBlue(image,conform_fill.blue,q);
traits=GetPixelChannelTraits(image,BlackPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelBlack(image,conform_fill.black,q);
traits=GetPixelChannelTraits(image,AlphaPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelAlpha(image,conform_fill.alpha,q);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_OpaquePaintImage)
#endif
proceed=SetImageProgress(image,OpaquePaintImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p a r e n t P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransparentPaintImage() changes the opacity value associated with any pixel
% that matches color to the value defined by opacity.
%
% By default color must match a particular pixel color exactly. However, in
% many cases two colors may differ by a small amount. Fuzz defines how much
% tolerance is acceptable to consider two colors as the same. For example,
% set fuzz to 10 and the color red at intensities of 100 and 102 respectively
% are now interpreted as the same color.
%
% The format of the TransparentPaintImage method is:
%
% MagickBooleanType TransparentPaintImage(Image *image,
% const PixelInfo *target,const Quantum opacity,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the target color.
%
% o opacity: the replacement opacity value.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType TransparentPaintImage(Image *image,
const PixelInfo *target,const Quantum opacity,const MagickBooleanType invert,
ExceptionInfo *exception)
{
#define TransparentPaintImageTag "Transparent/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(target != (PixelInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
/*
Make image color transparent.
*/
status=MagickTrue;
progress=0;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert)
SetPixelAlpha(image,opacity,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransparentPaintImage)
#endif
proceed=SetImageProgress(image,TransparentPaintImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p a r e n t P a i n t I m a g e C h r o m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransparentPaintImageChroma() changes the opacity value associated with any
% pixel that matches color to the value defined by opacity.
%
% As there is one fuzz value for the all the channels, TransparentPaintImage()
% is not suitable for the operations like chroma, where the tolerance for
% similarity of two color component (RGB) can be different. Thus we define
% this method to take two target pixels (one low and one high) and all the
% pixels of an image which are lying between these two pixels are made
% transparent.
%
% The format of the TransparentPaintImageChroma method is:
%
% MagickBooleanType TransparentPaintImageChroma(Image *image,
% const PixelInfo *low,const PixelInfo *high,const Quantum opacity,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low: the low target color.
%
% o high: the high target color.
%
% o opacity: the replacement opacity value.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image,
const PixelInfo *low,const PixelInfo *high,const Quantum opacity,
const MagickBooleanType invert,ExceptionInfo *exception)
{
#define TransparentPaintImageTag "Transparent/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(high != (PixelInfo *) NULL);
assert(low != (PixelInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
/*
Make image color transparent.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
match;
PixelInfo
pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
GetPixelInfoPixel(image,q,&pixel);
match=((pixel.red >= low->red) && (pixel.red <= high->red) &&
(pixel.green >= low->green) && (pixel.green <= high->green) &&
(pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue :
MagickFalse;
if (match != invert)
SetPixelAlpha(image,opacity,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransparentPaintImageChroma)
#endif
proceed=SetImageProgress(image,TransparentPaintImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
user_basis_core.h | #ifndef _user_basis_core_H
#define _user_basis_core_H
#include <complex>
#include <vector>
#include <stdio.h>
#include "general_basis_core.h"
#include "numpy/ndarraytypes.h"
#include "benes_perm.h"
#include "openmp.h"
namespace basis_general {
template<class I>
struct op_results
{
std::complex<double> m;
I r;
op_results(std::complex<double> _m,I _r): m(_m),r(_r)
{}
};
template<class I,class P=signed char>
class user_basis_core : public general_basis_core<I,P>
{
typedef I (*map_type)(I,int,P*,I*);
typedef I (*next_state_type)(I,I,I,I*);
typedef int (*op_func_type)(op_results<I>*,char,int,int,I*);
typedef void (*count_particles_type)(I,int*,I*);
typedef bool (*check_state_nosymm_type)(I,I,I*);
public:
map_type * map_funcs;
next_state_type next_state_func;
op_func_type op_func;
count_particles_type count_particles_func;
check_state_nosymm_type pre_check_state;
const int n_sectors,sps;
I *ns_args,*precs_args,*op_args,*count_particles_args;
I **maps_args;
std::vector<I> M;
user_basis_core(const int _N,const int _sps,const int _nt,
void * _map_funcs, const int _pers[], const int _qs[], I** _maps_args,
const int _n_sectors,size_t _next_state,I *_ns_args,size_t _pre_check_state,
I* _precs_args, const bool pre_check_state_parallel,
size_t _count_particles,I *_count_particles_args,size_t _op_func,I *_op_args) : \
general_basis_core<I,P>::general_basis_core(_N,_nt,NULL,_pers,_qs,true,pre_check_state_parallel), n_sectors(_n_sectors), sps(_sps)
{
map_funcs = (map_type*)_map_funcs;
maps_args = _maps_args;
next_state_func = (next_state_type)_next_state;
count_particles_func = (count_particles_type)_count_particles;
op_func = (op_func_type)_op_func;
op_args = _op_args;
ns_args = _ns_args;
pre_check_state = (check_state_nosymm_type)_pre_check_state;
precs_args = _precs_args;
count_particles_args = _count_particles_args;
M.push_back((I)1);
for(int i=1;i<_N+1;i++){
M.push_back(M[i-1] * (I)_sps);
}
}
~user_basis_core() {}
npy_intp get_prefix(const I s,const int N_p){
if(sps>2){
return integer_cast<npy_intp,I>(s / M[general_basis_core<I,P>::N - N_p]);
}
else{
return integer_cast<npy_intp,I>(s >> (general_basis_core<I,P>::N - N_p));
}
}
I map_state(I s,int n_map,P &phase){
if(general_basis_core<I,P>::nt<=0){
return s;
}
P temp_phase = 1;
s = (*map_funcs[n_map])(s, general_basis_core<I,P>::N, &temp_phase, maps_args[n_map]);
phase *= temp_phase;
return s;
}
void map_state(I s[],npy_intp M,int n_map,P phase[]){
if(general_basis_core<I,P>::nt<=0){
return;
}
map_type func = map_funcs[n_map];
I * args = maps_args[n_map];
#pragma omp for schedule(static)
for(npy_intp i=0;i<M;i++){
P temp_phase = 1;
s[i] = (*func)(s[i], general_basis_core<I,P>::N, &temp_phase, args);
phase[i] *= temp_phase;
}
}
std::vector<int> count_particles(const I s){
std::vector<int> v(n_sectors);
(*count_particles_func)(s,&v[0],count_particles_args);
return v;
}
I inline next_state_pcon(const I s,const I nns){
return (*next_state_func)(s,nns,(I)general_basis_core<I,P>::N, ns_args);
}
double check_state(I s){
bool ns_check=true;
if(pre_check_state){
ns_check = (*pre_check_state)(s,(I)general_basis_core<I,P>::N, precs_args);
}
if(ns_check){
return check_state_core_unrolled<I>(this,s,general_basis_core<I,P>::nt);
}
else{
return std::numeric_limits<double>::quiet_NaN();
}
}
int op(I &r,std::complex<double> &m,const int n_op,const char opstr[],const int indx[]){
I s = r;
op_results<I> res(m,r);
for(int j=n_op-1;j>=0;j--){
int err = (*op_func)(&res,opstr[j],indx[j],general_basis_core<I,P>::N,op_args);
if(err!=0){
return err;
}
if(res.m.real()==0 && res.m.imag()==0){
res.r = s;
break;
}
}
m = res.m; r = res.r;
return 0;
}
};
}
#endif
|
new2.c | #include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <string.h>
#include <math.h>
#include <time.h>
// #include <mpi.h>
#include <omp.h>
#include "constants.h"
#include "functions.h"
// void checkNumProcs(int size, int n);
int main(int argc, char **argv)
{
FILE *input = stdin, *output = stdout, *knowenWordsFile;
unsigned int keyInt = MIN_VALUE;
char *keyString;
int numBytesInKey;
char *inputfileText, *decodedText, **knowenWords, **decodedSplitArray;
int knowenWordsCounter, decodedWordsCounter, cmpRes, givenLen, maxNum;
int i, j, c;
int cond = 0, size, rank;
time_t start, end;
// MPI_Status status;
// MPI_Init(&argc, &argv);
// MPI_Comm_size(MPI_COMM_WORLD, &size);
// // checkNumProcs(size, 2);
// MPI_Comm_rank(MPI_COMM_WORLD, &rank);
start = time(NULL);
// * open crypted file
maxNum = determineMaxNum(argv[1], &givenLen);
// fprintf(stderr, "\ngivenLen -> %d || maxNum 0x%x\n", givenLen, maxNum);
input = fopen(argv[2], "r");
if (!input)
{
fprintf(stderr, "Error opening words file\n");
return 0;
}
// * open words file
if (argc > 3)
knowenWordsFile = fopen(argv[3], "r");
else
knowenWordsFile = fopen("linux_words.txt", "r");
if (!knowenWordsFile)
{
fprintf(stderr, "Error opening file words\n");
return 0;
}
// openFiles(argv[2], argv[3], argc, input, knowenWordsFile);
// * get number of words for words array dynamic memory allocation
fscanf(knowenWordsFile, "%d", &knowenWordsCounter);
// * allocate knowen words array and each of it's words
inputfileText = inputString(knowenWordsFile, ALLOCATION_SIZE);
knowenWords = splitStringByDelimiter(ALLOCATION_SIZE, inputfileText, "\n", &decodedWordsCounter);
// fprintf(stderr, "num_threads %d", omp_get_num_threads());
while (keyInt <= maxNum)
{
// fprintf(stderr, "\nBefore creation, givenLen -> %d\n", givenLen);
keyString = createKey(keyInt, 2 * givenLen);
// fprintf(stderr, "\nkeyString ----> 0x%s\n", keyString);
numBytesInKey = processKey(keyString);
// fprintf(stderr, "numBytesInKey ---> %d\n", numBytesInKey);
// * encode the text to a string
decodedText = encodeToString(numBytesInKey, input);
// fprintf(stderr, "decodedText ----> %s\n", decodedText);
// * split text string into a string array by 'space' delimiter
decodedSplitArray = splitStringByDelimiter(ALLOCATION_SIZE, strdup(decodedText), " ", &decodedWordsCounter);
// fprintf(stderr, "%d", decodedWordsCounter);
// fprintf(stderr, "%s", decodedText);
// * match all words of decoded text with each of the knowen words
#pragma omp parallel for collapse(2) private(j) num_threads(8)
for (i = 0; i < decodedWordsCounter; i++)
{
for (j = 0; j < knowenWordsCounter; j++)
{
// fprintf(stderr, "num_threads %d\n", omp_get_num_threads());
if (strlen(knowenWords[j]) > 2)
{
cmpRes = strcmp(decodedSplitArray[i], knowenWords[j]);
if (cmpRes == 0) // * words match
{
cond = 1;
// break;
}
}
}
// if (cond)
// break;
}
if (cond)
break;
// * free current iteration
free(decodedSplitArray);
free(decodedText);
// * return pointer to start of the file
fseek(input, 0, SEEK_SET);
if (keyInt == 0xFFFF)
keyInt = 0x01000000;
else
keyInt++;
}
if (cond)
{
fprintf(stderr, "\nSuccsess!\nKey is: 0x%s\nDecoded text is:\n%s\n\n", keyString, decodedText);
free(decodedSplitArray);
free(decodedText);
}
else
{
fprintf(stderr, "\nFailure! No valid key was found\n");
}
// * clean all
clean(knowenWords, keyString, inputfileText, input, output, knowenWordsFile);
end = time(NULL);
fprintf(stderr, "Time taken to calculate the key is %.2f seconds\n", difftime(end, start));
return 0;
// MPI_Finalize();
} // * main
// void checkNumProcs(int size, int n)
// {
// if (size != n)
// {
// fprintf(stderr, "Run with two processes only\n");
// MPI_Abort(MPI_COMM_WORLD, __LINE__);
// }
// } |
GB_unop__log_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__log_fp32_fp32
// op(A') function: GB_unop_tran__log_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = logf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = logf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = logf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__log_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = logf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = logf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__log_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
tree-vectorizer.h | /* Vectorizer
Copyright (C) 2003-2015 Free Software Foundation, Inc.
Contributed by Dorit Naishlos <dorit@il.ibm.com>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_TREE_VECTORIZER_H
#define GCC_TREE_VECTORIZER_H
#include "tree-data-ref.h"
#include "target.h"
#include "hash-table.h"
/* Used for naming of new temporaries. */
enum vect_var_kind {
vect_simple_var,
vect_pointer_var,
vect_scalar_var
};
/* Defines type of operation. */
enum operation_type {
unary_op = 1,
binary_op,
ternary_op
};
/* Define type of available alignment support. */
enum dr_alignment_support {
dr_unaligned_unsupported,
dr_unaligned_supported,
dr_explicit_realign,
dr_explicit_realign_optimized,
dr_aligned
};
/* Define type of def-use cross-iteration cycle. */
enum vect_def_type {
vect_uninitialized_def = 0,
vect_constant_def = 1,
vect_external_def,
vect_internal_def,
vect_induction_def,
vect_reduction_def,
vect_double_reduction_def,
vect_nested_cycle,
vect_unknown_def_type
};
#define VECTORIZABLE_CYCLE_DEF(D) (((D) == vect_reduction_def) \
|| ((D) == vect_double_reduction_def) \
|| ((D) == vect_nested_cycle))
/* Structure to encapsulate information about a group of like
instructions to be presented to the target cost model. */
typedef struct _stmt_info_for_cost {
int count;
enum vect_cost_for_stmt kind;
gimple stmt;
int misalign;
} stmt_info_for_cost;
typedef vec<stmt_info_for_cost> stmt_vector_for_cost;
static inline void
add_stmt_info_to_vec (stmt_vector_for_cost *stmt_cost_vec, int count,
enum vect_cost_for_stmt kind, gimple stmt, int misalign)
{
stmt_info_for_cost si;
si.count = count;
si.kind = kind;
si.stmt = stmt;
si.misalign = misalign;
stmt_cost_vec->safe_push (si);
}
/************************************************************************
SLP
************************************************************************/
typedef struct _slp_tree *slp_tree;
/* A computation tree of an SLP instance. Each node corresponds to a group of
stmts to be packed in a SIMD stmt. */
struct _slp_tree {
/* Nodes that contain def-stmts of this node statements operands. */
vec<slp_tree> children;
/* A group of scalar stmts to be vectorized together. */
vec<gimple> stmts;
/* Load permutation relative to the stores, NULL if there is no
permutation. */
vec<unsigned> load_permutation;
/* Vectorized stmt/s. */
vec<gimple> vec_stmts;
/* Number of vector stmts that are created to replace the group of scalar
stmts. It is calculated during the transformation phase as the number of
scalar elements in one scalar iteration (GROUP_SIZE) multiplied by VF
divided by vector size. */
unsigned int vec_stmts_size;
};
/* SLP instance is a sequence of stmts in a loop that can be packed into
SIMD stmts. */
typedef struct _slp_instance {
/* The root of SLP tree. */
slp_tree root;
/* Size of groups of scalar stmts that will be replaced by SIMD stmt/s. */
unsigned int group_size;
/* The unrolling factor required to vectorized this SLP instance. */
unsigned int unrolling_factor;
/* Vectorization costs associated with SLP instance. */
stmt_vector_for_cost body_cost_vec;
/* The group of nodes that contain loads of this SLP instance. */
vec<slp_tree> loads;
/* The first scalar load of the instance. The created vector loads will be
inserted before this statement. */
gimple first_load;
} *slp_instance;
/* Access Functions. */
#define SLP_INSTANCE_TREE(S) (S)->root
#define SLP_INSTANCE_GROUP_SIZE(S) (S)->group_size
#define SLP_INSTANCE_UNROLLING_FACTOR(S) (S)->unrolling_factor
#define SLP_INSTANCE_BODY_COST_VEC(S) (S)->body_cost_vec
#define SLP_INSTANCE_LOADS(S) (S)->loads
#define SLP_INSTANCE_FIRST_LOAD_STMT(S) (S)->first_load
#define SLP_TREE_CHILDREN(S) (S)->children
#define SLP_TREE_SCALAR_STMTS(S) (S)->stmts
#define SLP_TREE_VEC_STMTS(S) (S)->vec_stmts
#define SLP_TREE_NUMBER_OF_VEC_STMTS(S) (S)->vec_stmts_size
#define SLP_TREE_LOAD_PERMUTATION(S) (S)->load_permutation
/* This structure is used in creation of an SLP tree. Each instance
corresponds to the same operand in a group of scalar stmts in an SLP
node. */
typedef struct _slp_oprnd_info
{
/* Def-stmts for the operands. */
vec<gimple> def_stmts;
/* Information about the first statement, its vector def-type, type, the
operand itself in case it's constant, and an indication if it's a pattern
stmt. */
enum vect_def_type first_dt;
tree first_op_type;
bool first_pattern;
} *slp_oprnd_info;
/* This struct is used to store the information of a data reference,
including the data ref itself, the access offset (calculated by summing its
offset and init) and the segment length for aliasing checks.
This is used to merge alias checks. */
struct dr_with_seg_len
{
dr_with_seg_len (data_reference_p d, tree len)
: dr (d),
offset (size_binop (PLUS_EXPR, DR_OFFSET (d), DR_INIT (d))),
seg_len (len) {}
data_reference_p dr;
tree offset;
tree seg_len;
};
/* This struct contains two dr_with_seg_len objects with aliasing data
refs. Two comparisons are generated from them. */
struct dr_with_seg_len_pair_t
{
dr_with_seg_len_pair_t (const dr_with_seg_len& d1,
const dr_with_seg_len& d2)
: first (d1), second (d2) {}
dr_with_seg_len first;
dr_with_seg_len second;
};
typedef struct _vect_peel_info
{
int npeel;
struct data_reference *dr;
unsigned int count;
} *vect_peel_info;
typedef struct _vect_peel_extended_info
{
struct _vect_peel_info peel_info;
unsigned int inside_cost;
unsigned int outside_cost;
stmt_vector_for_cost body_cost_vec;
} *vect_peel_extended_info;
/* Peeling hashtable helpers. */
struct peel_info_hasher : typed_free_remove <_vect_peel_info>
{
typedef _vect_peel_info value_type;
typedef _vect_peel_info compare_type;
static inline hashval_t hash (const value_type *);
static inline bool equal (const value_type *, const compare_type *);
};
inline hashval_t
peel_info_hasher::hash (const value_type *peel_info)
{
return (hashval_t) peel_info->npeel;
}
inline bool
peel_info_hasher::equal (const value_type *a, const compare_type *b)
{
return (a->npeel == b->npeel);
}
/*-----------------------------------------------------------------*/
/* Info on vectorized loops. */
/*-----------------------------------------------------------------*/
typedef struct _loop_vec_info {
/* The loop to which this info struct refers to. */
struct loop *loop;
/* The loop basic blocks. */
basic_block *bbs;
/* Number of latch executions. */
tree num_itersm1;
/* Number of iterations. */
tree num_iters;
/* Number of iterations of the original loop. */
tree num_iters_unchanged;
/* Minimum number of iterations below which vectorization is expected to
not be profitable (as estimated by the cost model).
-1 indicates that vectorization will not be profitable.
FORNOW: This field is an int. Will be a tree in the future, to represent
values unknown at compile time. */
int min_profitable_iters;
/* Threshold of number of iterations below which vectorzation will not be
performed. It is calculated from MIN_PROFITABLE_ITERS and
PARAM_MIN_VECT_LOOP_BOUND. */
unsigned int th;
/* Is the loop vectorizable? */
bool vectorizable;
/* Unrolling factor */
int vectorization_factor;
/* Unknown DRs according to which loop was peeled. */
struct data_reference *unaligned_dr;
/* peeling_for_alignment indicates whether peeling for alignment will take
place, and what the peeling factor should be:
peeling_for_alignment = X means:
If X=0: Peeling for alignment will not be applied.
If X>0: Peel first X iterations.
If X=-1: Generate a runtime test to calculate the number of iterations
to be peeled, using the dataref recorded in the field
unaligned_dr. */
int peeling_for_alignment;
/* The mask used to check the alignment of pointers or arrays. */
int ptr_mask;
/* The loop nest in which the data dependences are computed. */
vec<loop_p> loop_nest;
/* All data references in the loop. */
vec<data_reference_p> datarefs;
/* All data dependences in the loop. */
vec<ddr_p> ddrs;
/* Data Dependence Relations defining address ranges that are candidates
for a run-time aliasing check. */
vec<ddr_p> may_alias_ddrs;
/* Data Dependence Relations defining address ranges together with segment
lengths from which the run-time aliasing check is built. */
vec<dr_with_seg_len_pair_t> comp_alias_ddrs;
/* Statements in the loop that have data references that are candidates for a
runtime (loop versioning) misalignment check. */
vec<gimple> may_misalign_stmts;
/* All interleaving chains of stores in the loop, represented by the first
stmt in the chain. */
vec<gimple> grouped_stores;
/* All SLP instances in the loop. This is a subset of the set of GROUP_STORES
of the loop. */
vec<slp_instance> slp_instances;
/* The unrolling factor needed to SLP the loop. In case of that pure SLP is
applied to the loop, i.e., no unrolling is needed, this is 1. */
unsigned slp_unrolling_factor;
/* Reduction cycles detected in the loop. Used in loop-aware SLP. */
vec<gimple> reductions;
/* All reduction chains in the loop, represented by the first
stmt in the chain. */
vec<gimple> reduction_chains;
/* Hash table used to choose the best peeling option. */
hash_table<peel_info_hasher> *peeling_htab;
/* Cost data used by the target cost model. */
void *target_cost_data;
/* When we have grouped data accesses with gaps, we may introduce invalid
memory accesses. We peel the last iteration of the loop to prevent
this. */
bool peeling_for_gaps;
/* When the number of iterations is not a multiple of the vector size
we need to peel off iterations at the end to form an epilogue loop. */
bool peeling_for_niter;
/* Reductions are canonicalized so that the last operand is the reduction
operand. If this places a constant into RHS1, this decanonicalizes
GIMPLE for other phases, so we must track when this has occurred and
fix it up. */
bool operands_swapped;
/* True if there are no loop carried data dependencies in the loop.
If loop->safelen <= 1, then this is always true, either the loop
didn't have any loop carried data dependencies, or the loop is being
vectorized guarded with some runtime alias checks, or couldn't
be vectorized at all, but then this field shouldn't be used.
For loop->safelen >= 2, the user has asserted that there are no
backward dependencies, but there still could be loop carried forward
dependencies in such loops. This flag will be false if normal
vectorizer data dependency analysis would fail or require versioning
for alias, but because of loop->safelen >= 2 it has been vectorized
even without versioning for alias. E.g. in:
#pragma omp simd
for (int i = 0; i < m; i++)
a[i] = a[i + k] * c;
(or #pragma simd or #pragma ivdep) we can vectorize this and it will
DTRT even for k > 0 && k < m, but without safelen we would not
vectorize this, so this field would be false. */
bool no_data_dependencies;
/* If if-conversion versioned this loop before conversion, this is the
loop version without if-conversion. */
struct loop *scalar_loop;
} *loop_vec_info;
/* Access Functions. */
#define LOOP_VINFO_LOOP(L) (L)->loop
#define LOOP_VINFO_BBS(L) (L)->bbs
#define LOOP_VINFO_NITERSM1(L) (L)->num_itersm1
#define LOOP_VINFO_NITERS(L) (L)->num_iters
/* Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after
prologue peeling retain total unchanged scalar loop iterations for
cost model. */
#define LOOP_VINFO_NITERS_UNCHANGED(L) (L)->num_iters_unchanged
#define LOOP_VINFO_COST_MODEL_MIN_ITERS(L) (L)->min_profitable_iters
#define LOOP_VINFO_COST_MODEL_THRESHOLD(L) (L)->th
#define LOOP_VINFO_VECTORIZABLE_P(L) (L)->vectorizable
#define LOOP_VINFO_VECT_FACTOR(L) (L)->vectorization_factor
#define LOOP_VINFO_PTR_MASK(L) (L)->ptr_mask
#define LOOP_VINFO_LOOP_NEST(L) (L)->loop_nest
#define LOOP_VINFO_DATAREFS(L) (L)->datarefs
#define LOOP_VINFO_DDRS(L) (L)->ddrs
#define LOOP_VINFO_INT_NITERS(L) (TREE_INT_CST_LOW ((L)->num_iters))
#define LOOP_VINFO_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment
#define LOOP_VINFO_UNALIGNED_DR(L) (L)->unaligned_dr
#define LOOP_VINFO_MAY_MISALIGN_STMTS(L) (L)->may_misalign_stmts
#define LOOP_VINFO_MAY_ALIAS_DDRS(L) (L)->may_alias_ddrs
#define LOOP_VINFO_COMP_ALIAS_DDRS(L) (L)->comp_alias_ddrs
#define LOOP_VINFO_GROUPED_STORES(L) (L)->grouped_stores
#define LOOP_VINFO_SLP_INSTANCES(L) (L)->slp_instances
#define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor
#define LOOP_VINFO_REDUCTIONS(L) (L)->reductions
#define LOOP_VINFO_REDUCTION_CHAINS(L) (L)->reduction_chains
#define LOOP_VINFO_PEELING_HTAB(L) (L)->peeling_htab
#define LOOP_VINFO_TARGET_COST_DATA(L) (L)->target_cost_data
#define LOOP_VINFO_PEELING_FOR_GAPS(L) (L)->peeling_for_gaps
#define LOOP_VINFO_OPERANDS_SWAPPED(L) (L)->operands_swapped
#define LOOP_VINFO_PEELING_FOR_NITER(L) (L)->peeling_for_niter
#define LOOP_VINFO_NO_DATA_DEPENDENCIES(L) (L)->no_data_dependencies
#define LOOP_VINFO_SCALAR_LOOP(L) (L)->scalar_loop
#define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L) \
((L)->may_misalign_stmts.length () > 0)
#define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L) \
((L)->may_alias_ddrs.length () > 0)
#define LOOP_VINFO_NITERS_KNOWN_P(L) \
(tree_fits_shwi_p ((L)->num_iters) && tree_to_shwi ((L)->num_iters) > 0)
static inline loop_vec_info
loop_vec_info_for_loop (struct loop *loop)
{
return (loop_vec_info) loop->aux;
}
static inline bool
nested_in_vect_loop_p (struct loop *loop, gimple stmt)
{
return (loop->inner
&& (loop->inner == (gimple_bb (stmt))->loop_father));
}
typedef struct _bb_vec_info {
basic_block bb;
/* All interleaving chains of stores in the basic block, represented by the
first stmt in the chain. */
vec<gimple> grouped_stores;
/* All SLP instances in the basic block. This is a subset of the set of
GROUP_STORES of the basic block. */
vec<slp_instance> slp_instances;
/* All data references in the basic block. */
vec<data_reference_p> datarefs;
/* All data dependences in the basic block. */
vec<ddr_p> ddrs;
/* Cost data used by the target cost model. */
void *target_cost_data;
} *bb_vec_info;
#define BB_VINFO_BB(B) (B)->bb
#define BB_VINFO_GROUPED_STORES(B) (B)->grouped_stores
#define BB_VINFO_SLP_INSTANCES(B) (B)->slp_instances
#define BB_VINFO_DATAREFS(B) (B)->datarefs
#define BB_VINFO_DDRS(B) (B)->ddrs
#define BB_VINFO_TARGET_COST_DATA(B) (B)->target_cost_data
static inline bb_vec_info
vec_info_for_bb (basic_block bb)
{
return (bb_vec_info) bb->aux;
}
/*-----------------------------------------------------------------*/
/* Info on vectorized defs. */
/*-----------------------------------------------------------------*/
enum stmt_vec_info_type {
undef_vec_info_type = 0,
load_vec_info_type,
store_vec_info_type,
shift_vec_info_type,
op_vec_info_type,
call_vec_info_type,
call_simd_clone_vec_info_type,
assignment_vec_info_type,
condition_vec_info_type,
reduc_vec_info_type,
induc_vec_info_type,
type_promotion_vec_info_type,
type_demotion_vec_info_type,
type_conversion_vec_info_type,
loop_exit_ctrl_vec_info_type
};
/* Indicates whether/how a variable is used in the scope of loop/basic
block. */
enum vect_relevant {
vect_unused_in_scope = 0,
/* The def is in the inner loop, and the use is in the outer loop, and the
use is a reduction stmt. */
vect_used_in_outer_by_reduction,
/* The def is in the inner loop, and the use is in the outer loop (and is
not part of reduction). */
vect_used_in_outer,
/* defs that feed computations that end up (only) in a reduction. These
defs may be used by non-reduction stmts, but eventually, any
computations/values that are affected by these defs are used to compute
a reduction (i.e. don't get stored to memory, for example). We use this
to identify computations that we can change the order in which they are
computed. */
vect_used_by_reduction,
vect_used_in_scope
};
/* The type of vectorization that can be applied to the stmt: regular loop-based
vectorization; pure SLP - the stmt is a part of SLP instances and does not
have uses outside SLP instances; or hybrid SLP and loop-based - the stmt is
a part of SLP instance and also must be loop-based vectorized, since it has
uses outside SLP sequences.
In the loop context the meanings of pure and hybrid SLP are slightly
different. By saying that pure SLP is applied to the loop, we mean that we
exploit only intra-iteration parallelism in the loop; i.e., the loop can be
vectorized without doing any conceptual unrolling, cause we don't pack
together stmts from different iterations, only within a single iteration.
Loop hybrid SLP means that we exploit both intra-iteration and
inter-iteration parallelism (e.g., number of elements in the vector is 4
and the slp-group-size is 2, in which case we don't have enough parallelism
within an iteration, so we obtain the rest of the parallelism from subsequent
iterations by unrolling the loop by 2). */
enum slp_vect_type {
loop_vect = 0,
pure_slp,
hybrid
};
typedef struct data_reference *dr_p;
typedef struct _stmt_vec_info {
enum stmt_vec_info_type type;
/* Indicates whether this stmts is part of a computation whose result is
used outside the loop. */
bool live;
/* Stmt is part of some pattern (computation idiom) */
bool in_pattern_p;
/* The stmt to which this info struct refers to. */
gimple stmt;
/* The loop_vec_info with respect to which STMT is vectorized. */
loop_vec_info loop_vinfo;
/* The vector type to be used for the LHS of this statement. */
tree vectype;
/* The vectorized version of the stmt. */
gimple vectorized_stmt;
/** The following is relevant only for stmts that contain a non-scalar
data-ref (array/pointer/struct access). A GIMPLE stmt is expected to have
at most one such data-ref. **/
/* Information about the data-ref (access function, etc),
relative to the inner-most containing loop. */
struct data_reference *data_ref_info;
/* Information about the data-ref relative to this loop
nest (the loop that is being considered for vectorization). */
tree dr_base_address;
tree dr_init;
tree dr_offset;
tree dr_step;
tree dr_aligned_to;
/* For loop PHI nodes, the evolution part of it. This makes sure
this information is still available in vect_update_ivs_after_vectorizer
where we may not be able to re-analyze the PHI nodes evolution as
peeling for the prologue loop can make it unanalyzable. The evolution
part is still correct though. */
tree loop_phi_evolution_part;
/* Used for various bookkeeping purposes, generally holding a pointer to
some other stmt S that is in some way "related" to this stmt.
Current use of this field is:
If this stmt is part of a pattern (i.e. the field 'in_pattern_p' is
true): S is the "pattern stmt" that represents (and replaces) the
sequence of stmts that constitutes the pattern. Similarly, the
related_stmt of the "pattern stmt" points back to this stmt (which is
the last stmt in the original sequence of stmts that constitutes the
pattern). */
gimple related_stmt;
/* Used to keep a sequence of def stmts of a pattern stmt if such exists. */
gimple_seq pattern_def_seq;
/* List of datarefs that are known to have the same alignment as the dataref
of this stmt. */
vec<dr_p> same_align_refs;
/* Selected SIMD clone's function info. First vector element
is SIMD clone's function decl, followed by a pair of trees (base + step)
for linear arguments (pair of NULLs for other arguments). */
vec<tree> simd_clone_info;
/* Classify the def of this stmt. */
enum vect_def_type def_type;
/* Whether the stmt is SLPed, loop-based vectorized, or both. */
enum slp_vect_type slp_type;
/* Interleaving and reduction chains info. */
/* First element in the group. */
gimple first_element;
/* Pointer to the next element in the group. */
gimple next_element;
/* For data-refs, in case that two or more stmts share data-ref, this is the
pointer to the previously detected stmt with the same dr. */
gimple same_dr_stmt;
/* The size of the group. */
unsigned int size;
/* For stores, number of stores from this group seen. We vectorize the last
one. */
unsigned int store_count;
/* For loads only, the gap from the previous load. For consecutive loads, GAP
is 1. */
unsigned int gap;
/* The minimum negative dependence distance this stmt participates in
or zero if none. */
unsigned int min_neg_dist;
/* Not all stmts in the loop need to be vectorized. e.g, the increment
of the loop induction variable and computation of array indexes. relevant
indicates whether the stmt needs to be vectorized. */
enum vect_relevant relevant;
/* The bb_vec_info with respect to which STMT is vectorized. */
bb_vec_info bb_vinfo;
/* Is this statement vectorizable or should it be skipped in (partial)
vectorization. */
bool vectorizable;
/* For loads only, true if this is a gather load. */
bool gather_p;
bool stride_load_p;
/* For both loads and stores. */
bool simd_lane_access_p;
} *stmt_vec_info;
/* Access Functions. */
#define STMT_VINFO_TYPE(S) (S)->type
#define STMT_VINFO_STMT(S) (S)->stmt
#define STMT_VINFO_LOOP_VINFO(S) (S)->loop_vinfo
#define STMT_VINFO_BB_VINFO(S) (S)->bb_vinfo
#define STMT_VINFO_RELEVANT(S) (S)->relevant
#define STMT_VINFO_LIVE_P(S) (S)->live
#define STMT_VINFO_VECTYPE(S) (S)->vectype
#define STMT_VINFO_VEC_STMT(S) (S)->vectorized_stmt
#define STMT_VINFO_VECTORIZABLE(S) (S)->vectorizable
#define STMT_VINFO_DATA_REF(S) (S)->data_ref_info
#define STMT_VINFO_GATHER_P(S) (S)->gather_p
#define STMT_VINFO_STRIDE_LOAD_P(S) (S)->stride_load_p
#define STMT_VINFO_SIMD_LANE_ACCESS_P(S) (S)->simd_lane_access_p
#define STMT_VINFO_DR_BASE_ADDRESS(S) (S)->dr_base_address
#define STMT_VINFO_DR_INIT(S) (S)->dr_init
#define STMT_VINFO_DR_OFFSET(S) (S)->dr_offset
#define STMT_VINFO_DR_STEP(S) (S)->dr_step
#define STMT_VINFO_DR_ALIGNED_TO(S) (S)->dr_aligned_to
#define STMT_VINFO_IN_PATTERN_P(S) (S)->in_pattern_p
#define STMT_VINFO_RELATED_STMT(S) (S)->related_stmt
#define STMT_VINFO_PATTERN_DEF_SEQ(S) (S)->pattern_def_seq
#define STMT_VINFO_SAME_ALIGN_REFS(S) (S)->same_align_refs
#define STMT_VINFO_SIMD_CLONE_INFO(S) (S)->simd_clone_info
#define STMT_VINFO_DEF_TYPE(S) (S)->def_type
#define STMT_VINFO_GROUP_FIRST_ELEMENT(S) (S)->first_element
#define STMT_VINFO_GROUP_NEXT_ELEMENT(S) (S)->next_element
#define STMT_VINFO_GROUP_SIZE(S) (S)->size
#define STMT_VINFO_GROUP_STORE_COUNT(S) (S)->store_count
#define STMT_VINFO_GROUP_GAP(S) (S)->gap
#define STMT_VINFO_GROUP_SAME_DR_STMT(S) (S)->same_dr_stmt
#define STMT_VINFO_GROUPED_ACCESS(S) ((S)->first_element != NULL && (S)->data_ref_info)
#define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part
#define STMT_VINFO_MIN_NEG_DIST(S) (S)->min_neg_dist
#define GROUP_FIRST_ELEMENT(S) (S)->first_element
#define GROUP_NEXT_ELEMENT(S) (S)->next_element
#define GROUP_SIZE(S) (S)->size
#define GROUP_STORE_COUNT(S) (S)->store_count
#define GROUP_GAP(S) (S)->gap
#define GROUP_SAME_DR_STMT(S) (S)->same_dr_stmt
#define STMT_VINFO_RELEVANT_P(S) ((S)->relevant != vect_unused_in_scope)
#define HYBRID_SLP_STMT(S) ((S)->slp_type == hybrid)
#define PURE_SLP_STMT(S) ((S)->slp_type == pure_slp)
#define STMT_SLP_TYPE(S) (S)->slp_type
struct dataref_aux {
int misalignment;
/* If true the alignment of base_decl needs to be increased. */
bool base_misaligned;
/* If true we know the base is at least vector element alignment aligned. */
bool base_element_aligned;
tree base_decl;
};
#define DR_VECT_AUX(dr) ((dataref_aux *)(dr)->aux)
#define VECT_MAX_COST 1000
/* The maximum number of intermediate steps required in multi-step type
conversion. */
#define MAX_INTERM_CVT_STEPS 3
/* The maximum vectorization factor supported by any target (V64QI). */
#define MAX_VECTORIZATION_FACTOR 64
/* Avoid GTY(()) on stmt_vec_info. */
typedef void *vec_void_p;
extern vec<vec_void_p> stmt_vec_info_vec;
void init_stmt_vec_info_vec (void);
void free_stmt_vec_info_vec (void);
/* Return a stmt_vec_info corresponding to STMT. */
static inline stmt_vec_info
vinfo_for_stmt (gimple stmt)
{
unsigned int uid = gimple_uid (stmt);
if (uid == 0)
return NULL;
return (stmt_vec_info) stmt_vec_info_vec[uid - 1];
}
/* Set vectorizer information INFO for STMT. */
static inline void
set_vinfo_for_stmt (gimple stmt, stmt_vec_info info)
{
unsigned int uid = gimple_uid (stmt);
if (uid == 0)
{
gcc_checking_assert (info);
uid = stmt_vec_info_vec.length () + 1;
gimple_set_uid (stmt, uid);
stmt_vec_info_vec.safe_push ((vec_void_p) info);
}
else
stmt_vec_info_vec[uid - 1] = (vec_void_p) info;
}
/* Return the earlier statement between STMT1 and STMT2. */
static inline gimple
get_earlier_stmt (gimple stmt1, gimple stmt2)
{
unsigned int uid1, uid2;
if (stmt1 == NULL)
return stmt2;
if (stmt2 == NULL)
return stmt1;
uid1 = gimple_uid (stmt1);
uid2 = gimple_uid (stmt2);
if (uid1 == 0 || uid2 == 0)
return NULL;
gcc_checking_assert (uid1 <= stmt_vec_info_vec.length ()
&& uid2 <= stmt_vec_info_vec.length ());
if (uid1 < uid2)
return stmt1;
else
return stmt2;
}
/* Return the later statement between STMT1 and STMT2. */
static inline gimple
get_later_stmt (gimple stmt1, gimple stmt2)
{
unsigned int uid1, uid2;
if (stmt1 == NULL)
return stmt2;
if (stmt2 == NULL)
return stmt1;
uid1 = gimple_uid (stmt1);
uid2 = gimple_uid (stmt2);
if (uid1 == 0 || uid2 == 0)
return NULL;
gcc_assert (uid1 <= stmt_vec_info_vec.length ());
gcc_assert (uid2 <= stmt_vec_info_vec.length ());
if (uid1 > uid2)
return stmt1;
else
return stmt2;
}
/* Return TRUE if a statement represented by STMT_INFO is a part of a
pattern. */
static inline bool
is_pattern_stmt_p (stmt_vec_info stmt_info)
{
gimple related_stmt;
stmt_vec_info related_stmt_info;
related_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
if (related_stmt
&& (related_stmt_info = vinfo_for_stmt (related_stmt))
&& STMT_VINFO_IN_PATTERN_P (related_stmt_info))
return true;
return false;
}
/* Return true if BB is a loop header. */
static inline bool
is_loop_header_bb_p (basic_block bb)
{
if (bb == (bb->loop_father)->header)
return true;
gcc_checking_assert (EDGE_COUNT (bb->preds) == 1);
return false;
}
/* Return pow2 (X). */
static inline int
vect_pow2 (int x)
{
int i, res = 1;
for (i = 0; i < x; i++)
res *= 2;
return res;
}
/* Alias targetm.vectorize.builtin_vectorization_cost. */
static inline int
builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
tree vectype, int misalign)
{
return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
vectype, misalign);
}
/* Get cost by calling cost target builtin. */
static inline
int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
{
return builtin_vectorization_cost (type_of_cost, NULL, 0);
}
/* Alias targetm.vectorize.init_cost. */
static inline void *
init_cost (struct loop *loop_info)
{
return targetm.vectorize.init_cost (loop_info);
}
/* Alias targetm.vectorize.add_stmt_cost. */
static inline unsigned
add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
stmt_vec_info stmt_info, int misalign,
enum vect_cost_model_location where)
{
return targetm.vectorize.add_stmt_cost (data, count, kind,
stmt_info, misalign, where);
}
/* Alias targetm.vectorize.finish_cost. */
static inline void
finish_cost (void *data, unsigned *prologue_cost,
unsigned *body_cost, unsigned *epilogue_cost)
{
targetm.vectorize.finish_cost (data, prologue_cost, body_cost, epilogue_cost);
}
/* Alias targetm.vectorize.destroy_cost_data. */
static inline void
destroy_cost_data (void *data)
{
targetm.vectorize.destroy_cost_data (data);
}
/*-----------------------------------------------------------------*/
/* Info on data references alignment. */
/*-----------------------------------------------------------------*/
inline void
set_dr_misalignment (struct data_reference *dr, int val)
{
dataref_aux *data_aux = DR_VECT_AUX (dr);
if (!data_aux)
{
data_aux = XCNEW (dataref_aux);
dr->aux = data_aux;
}
data_aux->misalignment = val;
}
inline int
dr_misalignment (struct data_reference *dr)
{
return DR_VECT_AUX (dr)->misalignment;
}
/* Reflects actual alignment of first access in the vectorized loop,
taking into account peeling/versioning if applied. */
#define DR_MISALIGNMENT(DR) dr_misalignment (DR)
#define SET_DR_MISALIGNMENT(DR, VAL) set_dr_misalignment (DR, VAL)
/* Return TRUE if the data access is aligned, and FALSE otherwise. */
static inline bool
aligned_access_p (struct data_reference *data_ref_info)
{
return (DR_MISALIGNMENT (data_ref_info) == 0);
}
/* Return TRUE if the alignment of the data access is known, and FALSE
otherwise. */
static inline bool
known_alignment_for_access_p (struct data_reference *data_ref_info)
{
return (DR_MISALIGNMENT (data_ref_info) != -1);
}
/* Return true if the vect cost model is unlimited. */
static inline bool
unlimited_cost_model (loop_p loop)
{
if (loop != NULL && loop->force_vectorize
&& flag_simd_cost_model != VECT_COST_MODEL_DEFAULT)
return flag_simd_cost_model == VECT_COST_MODEL_UNLIMITED;
return (flag_vect_cost_model == VECT_COST_MODEL_UNLIMITED);
}
/* Source location */
extern source_location vect_location;
/*-----------------------------------------------------------------*/
/* Function prototypes. */
/*-----------------------------------------------------------------*/
/* Simple loop peeling and versioning utilities for vectorizer's purposes -
in tree-vect-loop-manip.c. */
extern void slpeel_make_loop_iterate_ntimes (struct loop *, tree);
extern bool slpeel_can_duplicate_loop_p (const struct loop *, const_edge);
struct loop *slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *,
struct loop *, edge);
extern void vect_loop_versioning (loop_vec_info, unsigned int, bool);
extern void vect_do_peeling_for_loop_bound (loop_vec_info, tree, tree,
unsigned int, bool);
extern void vect_do_peeling_for_alignment (loop_vec_info, tree,
unsigned int, bool);
extern source_location find_loop_location (struct loop *);
extern bool vect_can_advance_ivs_p (loop_vec_info);
/* In tree-vect-stmts.c. */
extern unsigned int current_vector_size;
extern tree get_vectype_for_scalar_type (tree);
extern tree get_same_sized_vectype (tree, tree);
extern bool vect_is_simple_use (tree, gimple, loop_vec_info,
bb_vec_info, gimple *,
tree *, enum vect_def_type *);
extern bool vect_is_simple_use_1 (tree, gimple, loop_vec_info,
bb_vec_info, gimple *,
tree *, enum vect_def_type *, tree *);
extern bool supportable_widening_operation (enum tree_code, gimple, tree, tree,
enum tree_code *, enum tree_code *,
int *, vec<tree> *);
extern bool supportable_narrowing_operation (enum tree_code, tree, tree,
enum tree_code *,
int *, vec<tree> *);
extern stmt_vec_info new_stmt_vec_info (gimple stmt, loop_vec_info,
bb_vec_info);
extern void free_stmt_vec_info (gimple stmt);
extern tree vectorizable_function (gcall *, tree, tree);
extern void vect_model_simple_cost (stmt_vec_info, int, enum vect_def_type *,
stmt_vector_for_cost *,
stmt_vector_for_cost *);
extern void vect_model_store_cost (stmt_vec_info, int, bool,
enum vect_def_type, slp_tree,
stmt_vector_for_cost *,
stmt_vector_for_cost *);
extern void vect_model_load_cost (stmt_vec_info, int, bool, slp_tree,
stmt_vector_for_cost *,
stmt_vector_for_cost *);
extern unsigned record_stmt_cost (stmt_vector_for_cost *, int,
enum vect_cost_for_stmt, stmt_vec_info,
int, enum vect_cost_model_location);
extern void vect_finish_stmt_generation (gimple, gimple,
gimple_stmt_iterator *);
extern bool vect_mark_stmts_to_be_vectorized (loop_vec_info);
extern tree vect_get_vec_def_for_operand (tree, gimple, tree *);
extern tree vect_init_vector (gimple, tree, tree,
gimple_stmt_iterator *);
extern tree vect_get_vec_def_for_stmt_copy (enum vect_def_type, tree);
extern bool vect_transform_stmt (gimple, gimple_stmt_iterator *,
bool *, slp_tree, slp_instance);
extern void vect_remove_stores (gimple);
extern bool vect_analyze_stmt (gimple, bool *, slp_tree);
extern bool vectorizable_condition (gimple, gimple_stmt_iterator *, gimple *,
tree, int, slp_tree);
extern void vect_get_load_cost (struct data_reference *, int, bool,
unsigned int *, unsigned int *,
stmt_vector_for_cost *,
stmt_vector_for_cost *, bool);
extern void vect_get_store_cost (struct data_reference *, int,
unsigned int *, stmt_vector_for_cost *);
extern bool vect_supportable_shift (enum tree_code, tree);
extern void vect_get_vec_defs (tree, tree, gimple, vec<tree> *,
vec<tree> *, slp_tree, int);
extern tree vect_gen_perm_mask_any (tree, const unsigned char *);
extern tree vect_gen_perm_mask_checked (tree, const unsigned char *);
/* In tree-vect-data-refs.c. */
extern bool vect_can_force_dr_alignment_p (const_tree, unsigned int);
extern enum dr_alignment_support vect_supportable_dr_alignment
(struct data_reference *, bool);
extern tree vect_get_smallest_scalar_type (gimple, HOST_WIDE_INT *,
HOST_WIDE_INT *);
extern bool vect_analyze_data_ref_dependences (loop_vec_info, int *);
extern bool vect_slp_analyze_data_ref_dependences (bb_vec_info);
extern bool vect_enhance_data_refs_alignment (loop_vec_info);
extern bool vect_analyze_data_refs_alignment (loop_vec_info, bb_vec_info);
extern bool vect_verify_datarefs_alignment (loop_vec_info, bb_vec_info);
extern bool vect_analyze_data_ref_accesses (loop_vec_info, bb_vec_info);
extern bool vect_prune_runtime_alias_test_list (loop_vec_info);
extern tree vect_check_gather (gimple, loop_vec_info, tree *, tree *,
int *);
extern bool vect_analyze_data_refs (loop_vec_info, bb_vec_info, int *,
unsigned *);
extern tree vect_create_data_ref_ptr (gimple, tree, struct loop *, tree,
tree *, gimple_stmt_iterator *,
gimple *, bool, bool *,
tree = NULL_TREE);
extern tree bump_vector_ptr (tree, gimple, gimple_stmt_iterator *, gimple, tree);
extern tree vect_create_destination_var (tree, tree);
extern bool vect_grouped_store_supported (tree, unsigned HOST_WIDE_INT);
extern bool vect_store_lanes_supported (tree, unsigned HOST_WIDE_INT);
extern bool vect_grouped_load_supported (tree, unsigned HOST_WIDE_INT);
extern bool vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT);
extern void vect_permute_store_chain (vec<tree> ,unsigned int, gimple,
gimple_stmt_iterator *, vec<tree> *);
extern tree vect_setup_realignment (gimple, gimple_stmt_iterator *, tree *,
enum dr_alignment_support, tree,
struct loop **);
extern void vect_transform_grouped_load (gimple, vec<tree> , int,
gimple_stmt_iterator *);
extern void vect_record_grouped_load_vectors (gimple, vec<tree> );
extern tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *);
extern tree vect_create_addr_base_for_vector_ref (gimple, gimple_seq *,
tree, struct loop *,
tree = NULL_TREE);
/* In tree-vect-loop.c. */
/* FORNOW: Used in tree-parloops.c. */
extern void destroy_loop_vec_info (loop_vec_info, bool);
extern gimple vect_force_simple_reduction (loop_vec_info, gimple, bool, bool *);
/* Drive for loop analysis stage. */
extern loop_vec_info vect_analyze_loop (struct loop *);
/* Drive for loop transformation stage. */
extern void vect_transform_loop (loop_vec_info);
extern loop_vec_info vect_analyze_loop_form (struct loop *);
extern bool vectorizable_live_operation (gimple, gimple_stmt_iterator *,
gimple *);
extern bool vectorizable_reduction (gimple, gimple_stmt_iterator *, gimple *,
slp_tree);
extern bool vectorizable_induction (gimple, gimple_stmt_iterator *, gimple *);
extern tree get_initial_def_for_reduction (gimple, tree, tree *);
extern int vect_min_worthwhile_factor (enum tree_code);
extern int vect_get_known_peeling_cost (loop_vec_info, int, int *,
stmt_vector_for_cost *,
stmt_vector_for_cost *,
stmt_vector_for_cost *);
extern int vect_get_single_scalar_iteration_cost (loop_vec_info,
stmt_vector_for_cost *);
/* In tree-vect-slp.c. */
extern void vect_free_slp_instance (slp_instance);
extern bool vect_transform_slp_perm_load (slp_tree, vec<tree> ,
gimple_stmt_iterator *, int,
slp_instance, bool);
extern bool vect_schedule_slp (loop_vec_info, bb_vec_info);
extern void vect_update_slp_costs_according_to_vf (loop_vec_info);
extern bool vect_analyze_slp (loop_vec_info, bb_vec_info, unsigned);
extern bool vect_make_slp_decision (loop_vec_info);
extern void vect_detect_hybrid_slp (loop_vec_info);
extern void vect_get_slp_defs (vec<tree> , slp_tree,
vec<vec<tree> > *, int);
extern source_location find_bb_location (basic_block);
extern bb_vec_info vect_slp_analyze_bb (basic_block);
extern void vect_slp_transform_bb (basic_block);
/* In tree-vect-patterns.c. */
/* Pattern recognition functions.
Additional pattern recognition functions can (and will) be added
in the future. */
typedef gimple (* vect_recog_func_ptr) (vec<gimple> *, tree *, tree *);
#define NUM_PATTERNS 12
void vect_pattern_recog (loop_vec_info, bb_vec_info);
/* In tree-vectorizer.c. */
unsigned vectorize_loops (void);
void vect_destroy_datarefs (loop_vec_info, bb_vec_info);
#endif /* GCC_TREE_VECTORIZER_H */
|
pr56883.c | /* PR middle-end/56883 */
/* { dg-do compile }
/* { dg-options "-O2 -fopenmp" } */
void
f1 (int ***x)
{
int i, j, k;
#pragma omp parallel for
for (i = 0; i < 10; ++i)
{
#pragma omp parallel shared(j)
#pragma omp for
for (j = 0; j < 10; ++j)
{
#pragma omp parallel for
for (k = 0; k < 10; ++k)
x[i][j][k] = k;
}
}
}
void
f2 (int ***x)
{
int i, j, k;
#pragma omp parallel for schedule(static,1)
for (i = 0; i < 10; ++i)
{
#pragma omp parallel shared(j)
#pragma omp for schedule(static,1)
for (j = 0; j < 10; ++j)
{
#pragma omp parallel for schedule(static,1)
for (k = 0; k < 10; ++k)
x[i][j][k] = k;
}
}
}
void
f3 (int ***x)
{
int i, j, k;
#pragma omp parallel for schedule(runtime)
for (i = 0; i < 10; ++i)
{
#pragma omp parallel shared(j)
#pragma omp for schedule(runtime)
for (j = 0; j < 10; ++j)
{
#pragma omp parallel for schedule(runtime)
for (k = 0; k < 10; ++k)
x[i][j][k] = k;
}
}
}
|
depthwise_conv2d.h | // Copyright 2018 Xiaomi, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MACE_KERNELS_DEPTHWISE_CONV2D_H_
#define MACE_KERNELS_DEPTHWISE_CONV2D_H_
#if defined(MACE_ENABLE_NEON) && defined(__aarch64__)
#include <arm_neon.h>
#endif
#include <algorithm>
#include <memory>
#include <vector>
#include "mace/core/future.h"
#include "mace/kernels/conv_pool_2d_util.h"
#include "mace/kernels/activation.h"
#include "mace/kernels/arm/depthwise_conv2d_neon.h"
#include "mace/public/mace.h"
#ifdef MACE_ENABLE_OPENCL
#include "mace/core/runtime/opencl/cl2_header.h"
#endif // MACE_ENABLE_OPENCL
namespace mace {
namespace kernels {
struct DepthwiseConv2dFunctorBase {
DepthwiseConv2dFunctorBase(const int *strides,
const Padding padding_type,
const std::vector<int> &paddings,
const int *dilations,
const ActivationType activation,
const float relux_max_limit)
: strides_(strides),
padding_type_(padding_type),
paddings_(paddings),
dilations_(dilations),
activation_(activation),
relux_max_limit_(relux_max_limit) {}
const int *strides_; // [stride_h, stride_w]
const Padding padding_type_;
std::vector<int> paddings_;
const int *dilations_; // [dilation_h, dilation_w]
const ActivationType activation_;
const float relux_max_limit_;
};
template<DeviceType D, typename T>
struct DepthwiseConv2dFunctor;
template<>
struct DepthwiseConv2dFunctor<DeviceType::CPU, float>
: public DepthwiseConv2dFunctorBase {
DepthwiseConv2dFunctor(const int *strides,
const Padding padding_type,
const std::vector<int> &paddings,
const int *dilations,
const ActivationType activation,
const float relux_max_limit)
: DepthwiseConv2dFunctorBase(strides,
padding_type,
paddings,
dilations,
activation,
relux_max_limit) {}
void DepthwiseConv2dGeneral(const float *input,
const float *filter,
const index_t *in_shape,
const index_t *out_shape,
const index_t *filter_shape,
const int *stride_hw,
const int *dilation_hw,
const int *pad_hw,
float *output) {
const index_t multiplier = filter_shape[0] / filter_shape[1];
#pragma omp parallel for collapse(2)
for (index_t b = 0; b < in_shape[0]; ++b) {
for (index_t m = 0; m < filter_shape[0]; ++m) {
for (index_t h = 0; h < out_shape[2]; ++h) {
for (index_t w = 0; w < out_shape[3]; ++w) {
const index_t out_channels = filter_shape[0];
const index_t in_channels = filter_shape[1];
const index_t filter_height = filter_shape[2];
const index_t filter_width = filter_shape[3];
const index_t in_height = in_shape[2];
const index_t in_width = in_shape[3];
const index_t out_height = out_shape[2];
const index_t out_width = out_shape[3];
index_t out_offset =
((b * out_channels + m) * out_height + h) * out_width + w;
index_t c = m / multiplier;
index_t o = m % multiplier;
float sum = 0;
for (index_t kh = 0; kh < filter_height; ++kh) {
for (index_t kw = 0; kw < filter_width; ++kw) {
index_t ih = h * stride_hw[0] + kh * dilation_hw[0] - pad_hw[0];
index_t iw = w * stride_hw[1] + kw * dilation_hw[1] - pad_hw[1];
if (ih >= 0 && ih < in_height && iw >= 0 && iw < in_width) {
index_t in_offset =
((b * in_channels + c) * in_height + ih) * in_width + iw;
index_t filter_offset =
(((o * in_channels) + c) * filter_height + kh)
* filter_width
+ kw;
sum += input[in_offset] * filter[filter_offset];
}
}
}
output[out_offset] = sum;
}
}
}
}
}
MaceStatus operator()(const Tensor *input,
const Tensor *filter,
const Tensor *bias,
Tensor *output,
StatsFuture *future) {
MACE_UNUSED(future);
MACE_CHECK_NOTNULL(input);
MACE_CHECK_NOTNULL(filter);
MACE_CHECK_NOTNULL(output);
std::vector<index_t> output_shape(4);
std::vector<int> paddings(2);
std::vector<index_t> filter_shape
{filter->dim(0) * filter->dim(1), filter->dim(1), filter->dim(2),
filter->dim(3)};
if (paddings_.empty()) {
CalcNCHWPaddingAndOutputSize(input->shape().data(),
filter_shape.data(),
dilations_,
strides_,
padding_type_,
output_shape.data(),
paddings.data());
} else {
paddings = paddings_;
CalcNCHWOutputSize(input->shape().data(),
filter_shape.data(),
paddings_.data(),
dilations_,
strides_,
RoundType::FLOOR,
output_shape.data());
}
MACE_RETURN_IF_ERROR(output->Resize(output_shape));
output->Clear();
index_t batch = output->dim(0);
index_t channels = output->dim(1);
index_t height = output->dim(2);
index_t width = output->dim(3);
index_t input_batch = input->dim(0);
index_t input_channels = input->dim(1);
index_t input_height = input->dim(2);
index_t input_width = input->dim(3);
index_t filter_h = filter_shape[2];
index_t filter_w = filter_shape[3];
MACE_CHECK(filter_shape[0] == channels, filter_shape[0], " != ", channels);
MACE_CHECK(filter_shape[1] == input_channels, filter_shape[1], " != ",
input_channels);
index_t stride_h = strides_[0];
index_t stride_w = strides_[1];
index_t dilation_h = dilations_[0];
index_t dilation_w = dilations_[1];
MACE_CHECK(batch == input_batch, "Input/Output batch size mismatch");
int pad_top = paddings[0] >> 1;
int pad_bottom = paddings[0] - pad_top;
int pad_left = paddings[1] >> 1;
int pad_right = paddings[1] - pad_left;
index_t valid_h_start = pad_top == 0 ? 0 : (pad_top - 1) / stride_h + 1;
index_t valid_h_stop = pad_bottom == 0
? height
: height - ((pad_bottom - 1) / stride_h + 1);
index_t valid_w_start = pad_left == 0 ? 0 : (pad_left - 1) / stride_w + 1;
index_t valid_w_stop = pad_right == 0
? width
: width - ((pad_right - 1) / stride_w + 1);
std::function<void(const float *input, float *output)> conv_func;
Tensor::MappingGuard input_guard(input);
Tensor::MappingGuard filter_guard(filter);
Tensor::MappingGuard bias_guard(bias);
Tensor::MappingGuard output_guard(output);
auto input_data = input->data<float>();
auto filter_data = filter->data<float>();
auto bias_data = bias == nullptr ? nullptr : bias->data<float>();
auto output_data = output->mutable_data<float>();
const int pad_hw[2] = {pad_top, pad_left};
const index_t input_shape[4] =
{batch, input_channels, input_height, input_width};
// make host compiler happy
MACE_UNUSED(pad_hw);
MACE_UNUSED(input_shape);
if (filter_h == 3 && filter_w == 3 && stride_h == 1 && stride_w == 1
&& dilation_h == 1 && dilation_w == 1) {
conv_func = [=](const float *input, float *output) {
DepthwiseConv2dNeonK3x3S1(input,
filter_data,
input_shape,
output_shape.data(),
pad_hw,
valid_h_start,
valid_h_stop,
valid_w_start,
valid_w_stop,
output);
};
} else if (filter_h == 3 && filter_w == 3 && stride_h == 2 && stride_w == 2
&& dilation_h == 1 && dilation_w == 1) {
conv_func = [=](const float *input, float *output) {
DepthwiseConv2dNeonK3x3S2(input,
filter_data,
input_shape,
output_shape.data(),
pad_hw,
valid_h_start,
valid_h_stop,
valid_w_start,
valid_w_stop,
output);
};
} else {
conv_func = [=](const float *input, float *output) {
DepthwiseConv2dGeneral(input,
filter_data,
input_shape,
output_shape.data(),
filter_shape.data(),
strides_,
dilations_,
pad_hw,
output);
};
}
conv_func(input_data, output_data);
if (bias_data != nullptr) {
#pragma omp parallel for collapse(2)
for (index_t b = 0; b < batch; ++b) {
for (index_t c = 0; c < channels; ++c) {
for (index_t i = 0; i < height * width; ++i) {
output_data[(b * channels + c) * height * width + i] +=
bias_data[c];
}
}
}
}
DoActivation(output_data, output_data, output->size(), activation_,
relux_max_limit_);
return MACE_SUCCESS;
}
};
#ifdef MACE_ENABLE_OPENCL
template<typename T>
struct DepthwiseConv2dFunctor<DeviceType::GPU, T>
: DepthwiseConv2dFunctorBase {
DepthwiseConv2dFunctor(const int *strides,
const Padding padding_type,
const std::vector<int> &paddings,
const int *dilations,
const ActivationType activation,
const float relux_max_limit)
: DepthwiseConv2dFunctorBase(strides,
padding_type,
paddings,
dilations,
activation,
relux_max_limit) {}
MaceStatus operator()(const Tensor *input,
const Tensor *filter,
const Tensor *bias,
Tensor *output,
StatsFuture *future);
cl::Kernel kernel_;
uint32_t kwg_size_;
std::unique_ptr<BufferBase> kernel_error_;
std::vector<index_t> input_shape_;
};
#endif // MACE_ENABLE_OPENCL
} // namespace kernels
} // namespace mace
#endif // MACE_KERNELS_DEPTHWISE_CONV2D_H_
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 16;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
in_parallel.c | #include <stdio.h>
#include <omp.h>
int main( )
{
omp_set_num_threads(4);
printf("%d\n", omp_in_parallel( ));
#pragma omp parallel
#pragma omp master
{
printf("%d\n", omp_in_parallel( ));
}
}
|
parallel-quicksort.c | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include "omp.h"
void quicksort(int *array, int start, int end, int threads);
int ordena(int *array, int start, int end, int pivot, int* lvec);
void merge(int* array, int start, int end, int* lvec, int u, int* copia);
int main()
{
const int n = 9;
const int p = 3;
omp_set_num_threads(p);
int a[] = { 7, 12, 1, -2, 0, 15, 4, 11, 9};
int i;
printf("\n\nVector desordenado: ");
for(i = 0; i < n; ++i)
printf(" %d ", a[i]);
printf("\n");
#pragma omp parallel
#pragma omp single
quicksort( a, 0, n, p);
printf("\n\nVector ordenado: ");
for(i = 0; i < n; ++i)
printf(" %d ", a[i]);
printf("\n");
return 0;
}
void quicksort(int *array, int start, int end, int threads)
{
if(threads <= 1 || end - start < 2) //si solo se ha asignado un hilo a esta parte (sea L o U) o si sólo hay un número en ella, salgo de la recursión parallela
return; //habría que añadir que, en caso de que haya más de un número, debería realizarse para el único hilo con el que se ha llamado a esta recursión el algoritmo no paralelo
int pivot, i, u = 0; //u es una variable que guardará el número de elementos menores que el pivote
int* lvec = calloc(end - start, sizeof(int)); //un vector que tendrá 1s en las posiciones en que el array tenga elementos menores o iguales que el pivote y 0s en las que los elementos sean mayores
//esta asignación de 1s y 0s se hace una vez los trozos del array se han ordenado en paralelo según el pivote
int* copia = malloc(sizeof(int) * (end - start)); //servirá como vector auxiliar para no pisar los datos en el array original
srand(time(NULL));
pivot = array[rand() % (end - start) + start];
printf("pivot: %d\n", pivot);
for(i = 0; i < threads; i++) //se lanzan tantas tareas como hilos se hayan asignado a el array (o trozo del array) repartiendo las posiciones equitativamente entre los hilos
{
if (i+1 == threads)
{
#pragma omp task
ordena(array, start + (i*((end - start) / threads)), end, pivot, lvec);
}
else
{
#pragma omp task
ordena(array, start + (i*((end - start) / threads)), start + ((i + 1)*((end - start) / threads)), pivot, lvec);
}
}
#pragma omp taskwait //esperamos a que los trozos hayan sido ordenados
for (i = start; i < end; i++) //rellenamos la variable @u con el número total de elementos menores que el pivote
if(lvec[i] == 1)
u++;
for(i = 0; i < threads; i++) //uno las partes menores que el pivote a la izquierda del vector y las mayores a la derecha en paralelo con tareas
if (i+1 == threads)
#pragma omp task
merge(array, start + (i*((end - start) / threads)), end, lvec, u, copia);
else
#pragma omp task
merge(array, start + (i*((end - start) / threads)), start + ((i + 1)*((end - start) / threads)), lvec, u, copia);
#pragma omp taskwait
//en este punto tengo en @copia el vector ordenado para esta iteración
//En función al tamaño de la parte menor y mayor que el pivote, reparto el número de hilos disponible
int repartohilos = (int) (((float) u / (end - start) ) * threads + 0.5 );
int rpeartohilos2 = (int) (((float) ((end - start) - u) / (end - start) ) * threads + 0.5 );
//copio @copia en @array, esto debería hacerse sólo al final, pasando simplemente @copia durante la recursión
for(i = start; i < end; i++)
array[i] = copia[i];
/*
#pragma omp task
quicksort(array, start, start + u, repartohilos);
#pragma omp task
quicksort(array, start + u, end, rpeartohilos2);*/
}
void merge(int* array, int start, int end, int* lvec, int u, int* copia)
{
int l = 0, i;
for (i = 0; i < start; i++)
if(lvec[i]==1)
l++;
else
u++;
for (i = start; i < end; i++)
if(lvec[i] == 1)
{
copia[l] = array[i];
l++;
}
else
{
copia[u] = array[i];
u++;
}
}
int ordena(int *array, int start, int end, int pivot, int* lvec)
{
int i, tmp, numElementos = 0;
for(i = start; i < end; i++)
if (array[i] <= pivot)
{
lvec[start] = 1;
numElementos++;
tmp = array[start];
array[start] = array[i];
array[i] = tmp;
start++;
}
return numElementos;
}
|
OpenMPClause.h | //===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file defines OpenMP AST classes for clauses.
/// There are clauses for executable directives, clauses for declarative
/// directives and clauses which can be used in both kinds of directives.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H
#define LLVM_CLANG_AST_OPENMPCLAUSE_H
#include "clang/AST/Decl.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/TrailingObjects.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <utility>
namespace clang {
class ASTContext;
//===----------------------------------------------------------------------===//
// AST classes for clauses.
//===----------------------------------------------------------------------===//
/// This is a basic class for representing single OpenMP clause.
class OMPClause {
/// Starting location of the clause (the clause keyword).
SourceLocation StartLoc;
/// Ending location of the clause.
SourceLocation EndLoc;
/// Kind of the clause.
OpenMPClauseKind Kind;
protected:
OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc)
: StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {}
public:
/// Returns the starting location of the clause.
SourceLocation getLocStart() const { return StartLoc; }
/// Returns the ending location of the clause.
SourceLocation getLocEnd() const { return EndLoc; }
/// Sets the starting location of the clause.
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// Sets the ending location of the clause.
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// Returns kind of OpenMP clause (private, shared, reduction, etc.).
OpenMPClauseKind getClauseKind() const { return Kind; }
bool isImplicit() const { return StartLoc.isInvalid(); }
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<OMPClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *) { return true; }
};
/// Class that handles pre-initialization statement for some clauses, like
/// 'shedule', 'firstprivate' etc.
class OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Pre-initialization statement for the clause.
Stmt *PreInit = nullptr;
/// Region that captures the associated stmt.
OpenMPDirectiveKind CaptureRegion = OMPD_unknown;
protected:
OMPClauseWithPreInit(const OMPClause *This) {
assert(get(This) && "get is not tuned for pre-init.");
}
/// Set pre-initialization statement for the clause.
void setPreInitStmt(Stmt *S, OpenMPDirectiveKind ThisRegion = OMPD_unknown) {
PreInit = S;
CaptureRegion = ThisRegion;
}
public:
/// Get pre-initialization statement for the clause.
const Stmt *getPreInitStmt() const { return PreInit; }
/// Get pre-initialization statement for the clause.
Stmt *getPreInitStmt() { return PreInit; }
/// Get capture region for the stmt in the clause.
OpenMPDirectiveKind getCaptureRegion() const { return CaptureRegion; }
static OMPClauseWithPreInit *get(OMPClause *C);
static const OMPClauseWithPreInit *get(const OMPClause *C);
};
/// Class that handles post-update expression for some clauses, like
/// 'lastprivate', 'reduction' etc.
class OMPClauseWithPostUpdate : public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Post-update expression for the clause.
Expr *PostUpdate = nullptr;
protected:
OMPClauseWithPostUpdate(const OMPClause *This) : OMPClauseWithPreInit(This) {
assert(get(This) && "get is not tuned for post-update.");
}
/// Set pre-initialization statement for the clause.
void setPostUpdateExpr(Expr *S) { PostUpdate = S; }
public:
/// Get post-update expression for the clause.
const Expr *getPostUpdateExpr() const { return PostUpdate; }
/// Get post-update expression for the clause.
Expr *getPostUpdateExpr() { return PostUpdate; }
static OMPClauseWithPostUpdate *get(OMPClause *C);
static const OMPClauseWithPostUpdate *get(const OMPClause *C);
};
/// This represents clauses with the list of variables like 'private',
/// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the
/// '#pragma omp ...' directives.
template <class T> class OMPVarListClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Number of variables in the list.
unsigned NumVars;
protected:
/// Build a clause with \a N variables
///
/// \param K Kind of the clause.
/// \param StartLoc Starting location of the clause (the clause keyword).
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N)
: OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {}
/// Fetches list of variables associated with this clause.
MutableArrayRef<Expr *> getVarRefs() {
return MutableArrayRef<Expr *>(
static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars);
}
/// Sets the list of variables for this clause.
void setVarRefs(ArrayRef<Expr *> VL) {
assert(VL.size() == NumVars &&
"Number of variables is not the same as the preallocated buffer");
std::copy(VL.begin(), VL.end(),
static_cast<T *>(this)->template getTrailingObjects<Expr *>());
}
public:
using varlist_iterator = MutableArrayRef<Expr *>::iterator;
using varlist_const_iterator = ArrayRef<const Expr *>::iterator;
using varlist_range = llvm::iterator_range<varlist_iterator>;
using varlist_const_range = llvm::iterator_range<varlist_const_iterator>;
unsigned varlist_size() const { return NumVars; }
bool varlist_empty() const { return NumVars == 0; }
varlist_range varlists() {
return varlist_range(varlist_begin(), varlist_end());
}
varlist_const_range varlists() const {
return varlist_const_range(varlist_begin(), varlist_end());
}
varlist_iterator varlist_begin() { return getVarRefs().begin(); }
varlist_iterator varlist_end() { return getVarRefs().end(); }
varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); }
varlist_const_iterator varlist_end() const { return getVarRefs().end(); }
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Fetches list of all variables in the clause.
ArrayRef<const Expr *> getVarRefs() const {
return llvm::makeArrayRef(
static_cast<const T *>(this)->template getTrailingObjects<Expr *>(),
NumVars);
}
};
/// This represents 'if' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp parallel if(parallel:a > 5)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'if' clause with
/// condition 'a > 5' and directive name modifier 'parallel'.
class OMPIfClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Condition of the 'if' clause.
Stmt *Condition = nullptr;
/// Location of ':' (if any).
SourceLocation ColonLoc;
/// Directive name modifier for the clause.
OpenMPDirectiveKind NameModifier = OMPD_unknown;
/// Name modifier location.
SourceLocation NameModifierLoc;
/// Set condition.
void setCondition(Expr *Cond) { Condition = Cond; }
/// Set directive name modifier for the clause.
void setNameModifier(OpenMPDirectiveKind NM) { NameModifier = NM; }
/// Set location of directive name modifier for the clause.
void setNameModifierLoc(SourceLocation Loc) { NameModifierLoc = Loc; }
/// Set location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// Build 'if' clause with condition \a Cond.
///
/// \param NameModifier [OpenMP 4.1] Directive name modifier of clause.
/// \param Cond Condition of the clause.
/// \param HelperCond Helper condition for the clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param NameModifierLoc Location of directive name modifier.
/// \param ColonLoc [OpenMP 4.1] Location of ':'.
/// \param EndLoc Ending location of the clause.
OMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Cond, Stmt *HelperCond,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation NameModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc)
: OMPClause(OMPC_if, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Condition(Cond), ColonLoc(ColonLoc),
NameModifier(NameModifier), NameModifierLoc(NameModifierLoc) {
setPreInitStmt(HelperCond, CaptureRegion);
}
/// Build an empty clause.
OMPIfClause()
: OMPClause(OMPC_if, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Returns condition.
Expr *getCondition() const { return cast_or_null<Expr>(Condition); }
/// Return directive name modifier associated with the clause.
OpenMPDirectiveKind getNameModifier() const { return NameModifier; }
/// Return the location of directive name modifier.
SourceLocation getNameModifierLoc() const { return NameModifierLoc; }
child_range children() { return child_range(&Condition, &Condition + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_if;
}
};
/// This represents 'final' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp task final(a > 5)
/// \endcode
/// In this example directive '#pragma omp task' has simple 'final'
/// clause with condition 'a > 5'.
class OMPFinalClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Condition of the 'if' clause.
Stmt *Condition = nullptr;
/// Set condition.
void setCondition(Expr *Cond) { Condition = Cond; }
public:
/// Build 'final' clause with condition \a Cond.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Cond Condition of the clause.
/// \param EndLoc Ending location of the clause.
OMPFinalClause(Expr *Cond, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_final, StartLoc, EndLoc), LParenLoc(LParenLoc),
Condition(Cond) {}
/// Build an empty clause.
OMPFinalClause()
: OMPClause(OMPC_final, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns condition.
Expr *getCondition() const { return cast_or_null<Expr>(Condition); }
child_range children() { return child_range(&Condition, &Condition + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_final;
}
};
/// This represents 'num_threads' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp parallel num_threads(6)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'num_threads'
/// clause with number of threads '6'.
class OMPNumThreadsClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Condition of the 'num_threads' clause.
Stmt *NumThreads = nullptr;
/// Set condition.
void setNumThreads(Expr *NThreads) { NumThreads = NThreads; }
public:
/// Build 'num_threads' clause with condition \a NumThreads.
///
/// \param NumThreads Number of threads for the construct.
/// \param HelperNumThreads Helper Number of threads for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPNumThreadsClause(Expr *NumThreads, Stmt *HelperNumThreads,
OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_num_threads, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc),
NumThreads(NumThreads) {
setPreInitStmt(HelperNumThreads, CaptureRegion);
}
/// Build an empty clause.
OMPNumThreadsClause()
: OMPClause(OMPC_num_threads, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns number of threads.
Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); }
child_range children() { return child_range(&NumThreads, &NumThreads + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_num_threads;
}
};
/// This represents 'safelen' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd safelen(4)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'safelen'
/// with single expression '4'.
/// If the safelen clause is used then no two iterations executed
/// concurrently with SIMD instructions can have a greater distance
/// in the logical iteration space than its value. The parameter of
/// the safelen clause must be a constant positive integer expression.
class OMPSafelenClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *Safelen = nullptr;
/// Set safelen.
void setSafelen(Expr *Len) { Safelen = Len; }
public:
/// Build 'safelen' clause.
///
/// \param Len Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc),
Safelen(Len) {}
/// Build an empty clause.
explicit OMPSafelenClause()
: OMPClause(OMPC_safelen, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); }
child_range children() { return child_range(&Safelen, &Safelen + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_safelen;
}
};
/// This represents 'simdlen' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd simdlen(4)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'simdlen'
/// with single expression '4'.
/// If the 'simdlen' clause is used then it specifies the preferred number of
/// iterations to be executed concurrently. The parameter of the 'simdlen'
/// clause must be a constant positive integer expression.
class OMPSimdlenClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *Simdlen = nullptr;
/// Set simdlen.
void setSimdlen(Expr *Len) { Simdlen = Len; }
public:
/// Build 'simdlen' clause.
///
/// \param Len Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_simdlen, StartLoc, EndLoc), LParenLoc(LParenLoc),
Simdlen(Len) {}
/// Build an empty clause.
explicit OMPSimdlenClause()
: OMPClause(OMPC_simdlen, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); }
child_range children() { return child_range(&Simdlen, &Simdlen + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_simdlen;
}
};
/// This represents 'collapse' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd collapse(3)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'collapse'
/// with single expression '3'.
/// The parameter must be a constant positive integer expression, it specifies
/// the number of nested loops that should be collapsed into a single iteration
/// space.
class OMPCollapseClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Number of for-loops.
Stmt *NumForLoops = nullptr;
/// Set the number of associated for-loops.
void setNumForLoops(Expr *Num) { NumForLoops = Num; }
public:
/// Build 'collapse' clause.
///
/// \param Num Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPCollapseClause(Expr *Num, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc),
NumForLoops(Num) {}
/// Build an empty clause.
explicit OMPCollapseClause()
: OMPClause(OMPC_collapse, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return the number of associated for-loops.
Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); }
child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_collapse;
}
};
/// This represents 'default' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp parallel default(shared)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'default'
/// clause with kind 'shared'.
class OMPDefaultClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'default' clause.
OpenMPDefaultClauseKind Kind = OMPC_DEFAULT_unknown;
/// Start location of the kind in source code.
SourceLocation KindKwLoc;
/// Set kind of the clauses.
///
/// \param K Argument of clause.
void setDefaultKind(OpenMPDefaultClauseKind K) { Kind = K; }
/// Set argument location.
///
/// \param KLoc Argument location.
void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
public:
/// Build 'default' clause with argument \a A ('none' or 'shared').
///
/// \param A Argument of the clause ('none' or 'shared').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPDefaultClause(OpenMPDefaultClauseKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc),
Kind(A), KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPDefaultClause()
: OMPClause(OMPC_default, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns kind of the clause.
OpenMPDefaultClauseKind getDefaultKind() const { return Kind; }
/// Returns location of clause kind.
SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_default;
}
};
/// This represents 'proc_bind' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp parallel proc_bind(master)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'proc_bind'
/// clause with kind 'master'.
class OMPProcBindClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'proc_bind' clause.
OpenMPProcBindClauseKind Kind = OMPC_PROC_BIND_unknown;
/// Start location of the kind in source code.
SourceLocation KindKwLoc;
/// Set kind of the clause.
///
/// \param K Kind of clause.
void setProcBindKind(OpenMPProcBindClauseKind K) { Kind = K; }
/// Set clause kind location.
///
/// \param KLoc Kind location.
void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
public:
/// Build 'proc_bind' clause with argument \a A ('master', 'close' or
/// 'spread').
///
/// \param A Argument of the clause ('master', 'close' or 'spread').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPProcBindClause(OpenMPProcBindClauseKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc),
Kind(A), KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPProcBindClause()
: OMPClause(OMPC_proc_bind, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns kind of the clause.
OpenMPProcBindClauseKind getProcBindKind() const { return Kind; }
/// Returns location of clause kind.
SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_proc_bind;
}
};
/// This represents 'schedule' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for schedule(static, 3)
/// \endcode
/// In this example directive '#pragma omp for' has 'schedule' clause with
/// arguments 'static' and '3'.
class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'schedule' clause.
OpenMPScheduleClauseKind Kind = OMPC_SCHEDULE_unknown;
/// Modifiers for 'schedule' clause.
enum {FIRST, SECOND, NUM_MODIFIERS};
OpenMPScheduleClauseModifier Modifiers[NUM_MODIFIERS];
/// Locations of modifiers.
SourceLocation ModifiersLoc[NUM_MODIFIERS];
/// Start location of the schedule ind in source code.
SourceLocation KindLoc;
/// Location of ',' (if any).
SourceLocation CommaLoc;
/// Chunk size.
Expr *ChunkSize = nullptr;
/// Set schedule kind.
///
/// \param K Schedule kind.
void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; }
/// Set the first schedule modifier.
///
/// \param M Schedule modifier.
void setFirstScheduleModifier(OpenMPScheduleClauseModifier M) {
Modifiers[FIRST] = M;
}
/// Set the second schedule modifier.
///
/// \param M Schedule modifier.
void setSecondScheduleModifier(OpenMPScheduleClauseModifier M) {
Modifiers[SECOND] = M;
}
/// Set location of the first schedule modifier.
void setFirstScheduleModifierLoc(SourceLocation Loc) {
ModifiersLoc[FIRST] = Loc;
}
/// Set location of the second schedule modifier.
void setSecondScheduleModifierLoc(SourceLocation Loc) {
ModifiersLoc[SECOND] = Loc;
}
/// Set schedule modifier location.
///
/// \param M Schedule modifier location.
void setScheduleModifer(OpenMPScheduleClauseModifier M) {
if (Modifiers[FIRST] == OMPC_SCHEDULE_MODIFIER_unknown)
Modifiers[FIRST] = M;
else {
assert(Modifiers[SECOND] == OMPC_SCHEDULE_MODIFIER_unknown);
Modifiers[SECOND] = M;
}
}
/// Sets the location of '('.
///
/// \param Loc Location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Set schedule kind start location.
///
/// \param KLoc Schedule kind location.
void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
/// Set location of ','.
///
/// \param Loc Location of ','.
void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; }
/// Set chunk size.
///
/// \param E Chunk size.
void setChunkSize(Expr *E) { ChunkSize = E; }
public:
/// Build 'schedule' clause with schedule kind \a Kind and chunk size
/// expression \a ChunkSize.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param CommaLoc Location of ','.
/// \param EndLoc Ending location of the clause.
/// \param Kind Schedule kind.
/// \param ChunkSize Chunk size.
/// \param HelperChunkSize Helper chunk size for combined directives.
/// \param M1 The first modifier applied to 'schedule' clause.
/// \param M1Loc Location of the first modifier
/// \param M2 The second modifier applied to 'schedule' clause.
/// \param M2Loc Location of the second modifier
OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation KLoc, SourceLocation CommaLoc,
SourceLocation EndLoc, OpenMPScheduleClauseKind Kind,
Expr *ChunkSize, Stmt *HelperChunkSize,
OpenMPScheduleClauseModifier M1, SourceLocation M1Loc,
OpenMPScheduleClauseModifier M2, SourceLocation M2Loc)
: OMPClause(OMPC_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc),
ChunkSize(ChunkSize) {
setPreInitStmt(HelperChunkSize);
Modifiers[FIRST] = M1;
Modifiers[SECOND] = M2;
ModifiersLoc[FIRST] = M1Loc;
ModifiersLoc[SECOND] = M2Loc;
}
/// Build an empty clause.
explicit OMPScheduleClause()
: OMPClause(OMPC_schedule, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {
Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown;
Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown;
}
/// Get kind of the clause.
OpenMPScheduleClauseKind getScheduleKind() const { return Kind; }
/// Get the first modifier of the clause.
OpenMPScheduleClauseModifier getFirstScheduleModifier() const {
return Modifiers[FIRST];
}
/// Get the second modifier of the clause.
OpenMPScheduleClauseModifier getSecondScheduleModifier() const {
return Modifiers[SECOND];
}
/// Get location of '('.
SourceLocation getLParenLoc() { return LParenLoc; }
/// Get kind location.
SourceLocation getScheduleKindLoc() { return KindLoc; }
/// Get the first modifier location.
SourceLocation getFirstScheduleModifierLoc() const {
return ModifiersLoc[FIRST];
}
/// Get the second modifier location.
SourceLocation getSecondScheduleModifierLoc() const {
return ModifiersLoc[SECOND];
}
/// Get location of ','.
SourceLocation getCommaLoc() { return CommaLoc; }
/// Get chunk size.
Expr *getChunkSize() { return ChunkSize; }
/// Get chunk size.
const Expr *getChunkSize() const { return ChunkSize; }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(&ChunkSize),
reinterpret_cast<Stmt **>(&ChunkSize) + 1);
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_schedule;
}
};
/// This represents 'ordered' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for ordered (2)
/// \endcode
/// In this example directive '#pragma omp for' has 'ordered' clause with
/// parameter 2.
class OMPOrderedClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Number of for-loops.
Stmt *NumForLoops = nullptr;
/// Set the number of associated for-loops.
void setNumForLoops(Expr *Num) { NumForLoops = Num; }
public:
/// Build 'ordered' clause.
///
/// \param Num Expression, possibly associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPOrderedClause(Expr *Num, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc),
NumForLoops(Num) {}
/// Build an empty clause.
explicit OMPOrderedClause()
: OMPClause(OMPC_ordered, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return the number of associated for-loops.
Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); }
child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_ordered;
}
};
/// This represents 'nowait' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for nowait
/// \endcode
/// In this example directive '#pragma omp for' has 'nowait' clause.
class OMPNowaitClause : public OMPClause {
public:
/// Build 'nowait' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_nowait, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPNowaitClause()
: OMPClause(OMPC_nowait, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_nowait;
}
};
/// This represents 'untied' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp task untied
/// \endcode
/// In this example directive '#pragma omp task' has 'untied' clause.
class OMPUntiedClause : public OMPClause {
public:
/// Build 'untied' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_untied, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPUntiedClause()
: OMPClause(OMPC_untied, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_untied;
}
};
/// This represents 'mergeable' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp task mergeable
/// \endcode
/// In this example directive '#pragma omp task' has 'mergeable' clause.
class OMPMergeableClause : public OMPClause {
public:
/// Build 'mergeable' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_mergeable, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPMergeableClause()
: OMPClause(OMPC_mergeable, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_mergeable;
}
};
/// This represents 'read' clause in the '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic read
/// \endcode
/// In this example directive '#pragma omp atomic' has 'read' clause.
class OMPReadClause : public OMPClause {
public:
/// Build 'read' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_read, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPReadClause() : OMPClause(OMPC_read, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_read;
}
};
/// This represents 'write' clause in the '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic write
/// \endcode
/// In this example directive '#pragma omp atomic' has 'write' clause.
class OMPWriteClause : public OMPClause {
public:
/// Build 'write' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_write, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPWriteClause()
: OMPClause(OMPC_write, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_write;
}
};
/// This represents 'update' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic update
/// \endcode
/// In this example directive '#pragma omp atomic' has 'update' clause.
class OMPUpdateClause : public OMPClause {
public:
/// Build 'update' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_update, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPUpdateClause()
: OMPClause(OMPC_update, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_update;
}
};
/// This represents 'capture' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic capture
/// \endcode
/// In this example directive '#pragma omp atomic' has 'capture' clause.
class OMPCaptureClause : public OMPClause {
public:
/// Build 'capture' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_capture, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPCaptureClause()
: OMPClause(OMPC_capture, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_capture;
}
};
/// This represents 'seq_cst' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic seq_cst
/// \endcode
/// In this example directive '#pragma omp atomic' has 'seq_cst' clause.
class OMPSeqCstClause : public OMPClause {
public:
/// Build 'seq_cst' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_seq_cst, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPSeqCstClause()
: OMPClause(OMPC_seq_cst, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_seq_cst;
}
};
/// This represents clause 'private' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel private(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'private'
/// with the variables 'a' and 'b'.
class OMPPrivateClause final
: public OMPVarListClause<OMPPrivateClause>,
private llvm::TrailingObjects<OMPPrivateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPPrivateClause>(OMPC_private, StartLoc, LParenLoc,
EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPPrivateClause(unsigned N)
: OMPVarListClause<OMPPrivateClause>(OMPC_private, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
/// Sets the list of references to private copies with initializers for
/// new private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// Gets the list of references to private copies with initializers for
/// new private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param PrivateVL List of references to private copies with initializers.
static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL,
ArrayRef<Expr *> PrivateVL);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using private_copies_iterator = MutableArrayRef<Expr *>::iterator;
using private_copies_const_iterator = ArrayRef<const Expr *>::iterator;
using private_copies_range = llvm::iterator_range<private_copies_iterator>;
using private_copies_const_range =
llvm::iterator_range<private_copies_const_iterator>;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_private;
}
};
/// This represents clause 'firstprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp parallel firstprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'firstprivate'
/// with the variables 'a' and 'b'.
class OMPFirstprivateClause final
: public OMPVarListClause<OMPFirstprivateClause>,
public OMPClauseWithPreInit,
private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPFirstprivateClause>(OMPC_firstprivate, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPreInit(this) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPFirstprivateClause(unsigned N)
: OMPVarListClause<OMPFirstprivateClause>(
OMPC_firstprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPreInit(this) {}
/// Sets the list of references to private copies with initializers for
/// new private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// Gets the list of references to private copies with initializers for
/// new private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Sets the list of references to initializer variables for new
/// private variables.
/// \param VL List of references.
void setInits(ArrayRef<Expr *> VL);
/// Gets the list of references to initializer variables for new
/// private variables.
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the original variables.
/// \param PrivateVL List of references to private copies with initializers.
/// \param InitVL List of references to auto generated variables used for
/// initialization of a single array element. Used if firstprivate variable is
/// of array type.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
static OMPFirstprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL,
ArrayRef<Expr *> InitVL, Stmt *PreInit);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using private_copies_iterator = MutableArrayRef<Expr *>::iterator;
using private_copies_const_iterator = ArrayRef<const Expr *>::iterator;
using private_copies_range = llvm::iterator_range<private_copies_iterator>;
using private_copies_const_range =
llvm::iterator_range<private_copies_const_iterator>;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
using inits_iterator = MutableArrayRef<Expr *>::iterator;
using inits_const_iterator = ArrayRef<const Expr *>::iterator;
using inits_range = llvm::iterator_range<inits_iterator>;
using inits_const_range = llvm::iterator_range<inits_const_iterator>;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_firstprivate;
}
};
/// This represents clause 'lastprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd lastprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'lastprivate'
/// with the variables 'a' and 'b'.
class OMPLastprivateClause final
: public OMPVarListClause<OMPLastprivateClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPLastprivateClause, Expr *> {
// There are 4 additional tail-allocated arrays at the end of the class:
// 1. Contains list of pseudo variables with the default initialization for
// each non-firstprivate variables. Used in codegen for initialization of
// lastprivate copies.
// 2. List of helper expressions for proper generation of assignment operation
// required for lastprivate clause. This list represents private variables
// (for arrays, single array element).
// 3. List of helper expressions for proper generation of assignment operation
// required for lastprivate clause. This list represents original variables
// (for arrays, single array element).
// 4. List of helper expressions that represents assignment operation:
// \code
// DstExprs = SrcExprs;
// \endcode
// Required for proper codegen of final assignment performed by the
// lastprivate clause.
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPLastprivateClause>(OMPC_lastprivate, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPLastprivateClause(unsigned N)
: OMPVarListClause<OMPLastprivateClause>(
OMPC_lastprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// Get the list of helper expressions for initialization of private
/// copies for lastprivate variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent private variables (for arrays, single
/// array element) in the final assignment statement performed by the
/// lastprivate clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent original variables (for arrays, single
/// array element) in the final assignment statement performed by the
/// lastprivate clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign private copy of the variable to original variable.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for lastprivate clause. This list represents
/// private variables (for arrays, single array element).
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for lastprivate clause. This list represents
/// original variables (for arrays, single array element).
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of final assignment performed by the
/// lastprivate clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPLastprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps,
Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
/// Set list of helper expressions, required for generation of private
/// copies of original lastprivate variables.
void setPrivateCopies(ArrayRef<Expr *> PrivateCopies);
helper_expr_const_range private_copies() const {
return helper_expr_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
helper_expr_range private_copies() {
return helper_expr_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_lastprivate;
}
};
/// This represents clause 'shared' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel shared(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'shared'
/// with the variables 'a' and 'b'.
class OMPSharedClause final
: public OMPVarListClause<OMPSharedClause>,
private llvm::TrailingObjects<OMPSharedClause, Expr *> {
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPSharedClause>(OMPC_shared, StartLoc, LParenLoc,
EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPSharedClause(unsigned N)
: OMPVarListClause<OMPSharedClause>(OMPC_shared, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_shared;
}
};
/// This represents clause 'reduction' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp parallel reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'reduction'
/// with operator '+' and the variables 'a' and 'b'.
class OMPReductionClause final
: public OMPVarListClause<OMPReductionClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPReductionClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// Name of custom operator.
DeclarationNameInfo NameInfo;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param ColonLoc Location of ':'.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPReductionClause>(OMPC_reduction, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPReductionClause(unsigned N)
: OMPVarListClause<OMPReductionClause>(OMPC_reduction, SourceLocation(),
SourceLocation(), SourceLocation(),
N),
OMPClauseWithPostUpdate(this) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent private copy of the reduction
/// variable.
void setPrivates(ArrayRef<Expr *> Privates);
/// Get the list of helper privates.
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent LHS expression in the final
/// reduction expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent RHS expression in the final
/// reduction expression performed by the reduction clause.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param Privates List of helper expressions for proper generation of
/// private copies.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPReductionClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range privates() const {
return helper_expr_const_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_range privates() {
return helper_expr_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_reduction;
}
};
/// This represents clause 'task_reduction' in the '#pragma omp taskgroup'
/// directives.
///
/// \code
/// #pragma omp taskgroup task_reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp taskgroup' has clause
/// 'task_reduction' with operator '+' and the variables 'a' and 'b'.
class OMPTaskReductionClause final
: public OMPVarListClause<OMPTaskReductionClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPTaskReductionClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// Name of custom operator.
DeclarationNameInfo NameInfo;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param ColonLoc Location of ':'.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
OMPTaskReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned N, NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPTaskReductionClause>(OMPC_task_reduction, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPTaskReductionClause(unsigned N)
: OMPVarListClause<OMPTaskReductionClause>(
OMPC_task_reduction, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent private copy of the reduction variable.
void setPrivates(ArrayRef<Expr *> Privates);
/// Get the list of helper privates.
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent LHS expression in the final reduction
/// expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent RHS expression in the final reduction
/// expression performed by the reduction clause. Also, variables in these
/// expressions are used for proper initialization of reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param Privates List of helper expressions for proper generation of
/// private copies.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPTaskReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPTaskReductionClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range privates() const {
return helper_expr_const_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_range privates() {
return helper_expr_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_task_reduction;
}
};
/// This represents clause 'in_reduction' in the '#pragma omp task' directives.
///
/// \code
/// #pragma omp task in_reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp task' has clause 'in_reduction' with
/// operator '+' and the variables 'a' and 'b'.
class OMPInReductionClause final
: public OMPVarListClause<OMPInReductionClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPInReductionClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// Name of custom operator.
DeclarationNameInfo NameInfo;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param ColonLoc Location of ':'.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
OMPInReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned N, NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPInReductionClause>(OMPC_in_reduction, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPInReductionClause(unsigned N)
: OMPVarListClause<OMPInReductionClause>(
OMPC_in_reduction, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent private copy of the reduction variable.
void setPrivates(ArrayRef<Expr *> Privates);
/// Get the list of helper privates.
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent LHS expression in the final reduction
/// expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent RHS expression in the final reduction
/// expression performed by the reduction clause. Also, variables in these
/// expressions are used for proper initialization of reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
/// Set list of helper reduction taskgroup descriptors.
void setTaskgroupDescriptors(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction taskgroup descriptors.
MutableArrayRef<Expr *> getTaskgroupDescriptors() {
return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size());
}
ArrayRef<const Expr *> getTaskgroupDescriptors() const {
return llvm::makeArrayRef(getReductionOps().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param Privates List of helper expressions for proper generation of
/// private copies.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
/// \param TaskgroupDescriptors List of helper taskgroup descriptors for
/// corresponding items in parent taskgroup task_reduction clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPInReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> TaskgroupDescriptors,
Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPInReductionClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range privates() const {
return helper_expr_const_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_range privates() {
return helper_expr_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_const_range taskgroup_descriptors() const {
return helper_expr_const_range(getTaskgroupDescriptors().begin(),
getTaskgroupDescriptors().end());
}
helper_expr_range taskgroup_descriptors() {
return helper_expr_range(getTaskgroupDescriptors().begin(),
getTaskgroupDescriptors().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_in_reduction;
}
};
/// This represents clause 'linear' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd linear(a,b : 2)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'linear'
/// with variables 'a', 'b' and linear step '2'.
class OMPLinearClause final
: public OMPVarListClause<OMPLinearClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPLinearClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Modifier of 'linear' clause.
OpenMPLinearClauseKind Modifier = OMPC_LINEAR_val;
/// Location of linear modifier if any.
SourceLocation ModifierLoc;
/// Location of ':'.
SourceLocation ColonLoc;
/// Sets the linear step for clause.
void setStep(Expr *Step) { *(getFinals().end()) = Step; }
/// Sets the expression to calculate linear step for clause.
void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; }
/// Build 'linear' clause with given number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of variables.
OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned NumVars)
: OMPVarListClause<OMPLinearClause>(OMPC_linear, StartLoc, LParenLoc,
EndLoc, NumVars),
OMPClauseWithPostUpdate(this), Modifier(Modifier),
ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {}
/// Build an empty clause.
///
/// \param NumVars Number of variables.
explicit OMPLinearClause(unsigned NumVars)
: OMPVarListClause<OMPLinearClause>(OMPC_linear, SourceLocation(),
SourceLocation(), SourceLocation(),
NumVars),
OMPClauseWithPostUpdate(this) {}
/// Gets the list of initial values for linear variables.
///
/// There are NumVars expressions with initial values allocated after the
/// varlist, they are followed by NumVars update expressions (used to update
/// the linear variable's value on current iteration) and they are followed by
/// NumVars final expressions (used to calculate the linear variable's
/// value after the loop body). After these lists, there are 2 helper
/// expressions - linear step and a helper to calculate it before the
/// loop body (used when the linear step is not constant):
///
/// { Vars[] /* in OMPVarListClause */; Privates[]; Inits[]; Updates[];
/// Finals[]; Step; CalcStep; }
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Sets the list of update expressions for linear variables.
MutableArrayRef<Expr *> getUpdates() {
return MutableArrayRef<Expr *>(getInits().end(), varlist_size());
}
ArrayRef<const Expr *> getUpdates() const {
return llvm::makeArrayRef(getInits().end(), varlist_size());
}
/// Sets the list of final update expressions for linear variables.
MutableArrayRef<Expr *> getFinals() {
return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size());
}
ArrayRef<const Expr *> getFinals() const {
return llvm::makeArrayRef(getUpdates().end(), varlist_size());
}
/// Sets the list of the copies of original linear variables.
/// \param PL List of expressions.
void setPrivates(ArrayRef<Expr *> PL);
/// Sets the list of the initial values for linear variables.
/// \param IL List of expressions.
void setInits(ArrayRef<Expr *> IL);
public:
/// Creates clause with a list of variables \a VL and a linear step
/// \a Step.
///
/// \param C AST Context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Modifier Modifier of 'linear' clause.
/// \param ModifierLoc Modifier location.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param PL List of private copies of original variables.
/// \param IL List of initial values for the variables.
/// \param Step Linear step.
/// \param CalcStep Calculation of the linear step.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPLinearClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep,
Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of variables.
static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars);
/// Set modifier.
void setModifier(OpenMPLinearClauseKind Kind) { Modifier = Kind; }
/// Return modifier.
OpenMPLinearClauseKind getModifier() const { return Modifier; }
/// Set modifier location.
void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; }
/// Return modifier location.
SourceLocation getModifierLoc() const { return ModifierLoc; }
/// Sets the location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// Returns the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Returns linear step.
Expr *getStep() { return *(getFinals().end()); }
/// Returns linear step.
const Expr *getStep() const { return *(getFinals().end()); }
/// Returns expression to calculate linear step.
Expr *getCalcStep() { return *(getFinals().end() + 1); }
/// Returns expression to calculate linear step.
const Expr *getCalcStep() const { return *(getFinals().end() + 1); }
/// Sets the list of update expressions for linear variables.
/// \param UL List of expressions.
void setUpdates(ArrayRef<Expr *> UL);
/// Sets the list of final update expressions for linear variables.
/// \param FL List of expressions.
void setFinals(ArrayRef<Expr *> FL);
using privates_iterator = MutableArrayRef<Expr *>::iterator;
using privates_const_iterator = ArrayRef<const Expr *>::iterator;
using privates_range = llvm::iterator_range<privates_iterator>;
using privates_const_range = llvm::iterator_range<privates_const_iterator>;
privates_range privates() {
return privates_range(getPrivates().begin(), getPrivates().end());
}
privates_const_range privates() const {
return privates_const_range(getPrivates().begin(), getPrivates().end());
}
using inits_iterator = MutableArrayRef<Expr *>::iterator;
using inits_const_iterator = ArrayRef<const Expr *>::iterator;
using inits_range = llvm::iterator_range<inits_iterator>;
using inits_const_range = llvm::iterator_range<inits_const_iterator>;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
using updates_iterator = MutableArrayRef<Expr *>::iterator;
using updates_const_iterator = ArrayRef<const Expr *>::iterator;
using updates_range = llvm::iterator_range<updates_iterator>;
using updates_const_range = llvm::iterator_range<updates_const_iterator>;
updates_range updates() {
return updates_range(getUpdates().begin(), getUpdates().end());
}
updates_const_range updates() const {
return updates_const_range(getUpdates().begin(), getUpdates().end());
}
using finals_iterator = MutableArrayRef<Expr *>::iterator;
using finals_const_iterator = ArrayRef<const Expr *>::iterator;
using finals_range = llvm::iterator_range<finals_iterator>;
using finals_const_range = llvm::iterator_range<finals_const_iterator>;
finals_range finals() {
return finals_range(getFinals().begin(), getFinals().end());
}
finals_const_range finals() const {
return finals_const_range(getFinals().begin(), getFinals().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_linear;
}
};
/// This represents clause 'aligned' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd aligned(a,b : 8)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'aligned'
/// with variables 'a', 'b' and alignment '8'.
class OMPAlignedClause final
: public OMPVarListClause<OMPAlignedClause>,
private llvm::TrailingObjects<OMPAlignedClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Sets the alignment for clause.
void setAlignment(Expr *A) { *varlist_end() = A; }
/// Build 'aligned' clause with given number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of variables.
OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned NumVars)
: OMPVarListClause<OMPAlignedClause>(OMPC_aligned, StartLoc, LParenLoc,
EndLoc, NumVars),
ColonLoc(ColonLoc) {}
/// Build an empty clause.
///
/// \param NumVars Number of variables.
explicit OMPAlignedClause(unsigned NumVars)
: OMPVarListClause<OMPAlignedClause>(OMPC_aligned, SourceLocation(),
SourceLocation(), SourceLocation(),
NumVars) {}
public:
/// Creates clause with a list of variables \a VL and alignment \a A.
///
/// \param C AST Context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param A Alignment.
static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL,
Expr *A);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of variables.
static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars);
/// Sets the location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// Returns the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Returns alignment.
Expr *getAlignment() { return *varlist_end(); }
/// Returns alignment.
const Expr *getAlignment() const { return *varlist_end(); }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_aligned;
}
};
/// This represents clause 'copyin' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel copyin(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'copyin'
/// with the variables 'a' and 'b'.
class OMPCopyinClause final
: public OMPVarListClause<OMPCopyinClause>,
private llvm::TrailingObjects<OMPCopyinClause, Expr *> {
// Class has 3 additional tail allocated arrays:
// 1. List of helper expressions for proper generation of assignment operation
// required for copyin clause. This list represents sources.
// 2. List of helper expressions for proper generation of assignment operation
// required for copyin clause. This list represents destinations.
// 3. List of helper expressions that represents assignment operation:
// \code
// DstExprs = SrcExprs;
// \endcode
// Required for proper codegen of propagation of master's thread values of
// threadprivate variables to local instances of that variables in other
// implicit threads.
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPCopyinClause>(OMPC_copyin, StartLoc, LParenLoc,
EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPCopyinClause(unsigned N)
: OMPVarListClause<OMPCopyinClause>(OMPC_copyin, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent source expression in the final
/// assignment statement performed by the copyin clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent destination expression in the final
/// assignment statement performed by the copyin clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign source helper expressions to destination helper expressions
/// correspondingly.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for copyin clause. This list represents
/// sources.
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for copyin clause. This list represents
/// destinations.
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of propagation of master's thread values of
/// threadprivate variables to local instances of that variables in other
/// implicit threads.
static OMPCopyinClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N);
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_copyin;
}
};
/// This represents clause 'copyprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp single copyprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp single' has clause 'copyprivate'
/// with the variables 'a' and 'b'.
class OMPCopyprivateClause final
: public OMPVarListClause<OMPCopyprivateClause>,
private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPCopyprivateClause>(OMPC_copyprivate, StartLoc,
LParenLoc, EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPCopyprivateClause(unsigned N)
: OMPVarListClause<OMPCopyprivateClause>(
OMPC_copyprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent source expression in the final
/// assignment statement performed by the copyprivate clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent destination expression in the final
/// assignment statement performed by the copyprivate clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign source helper expressions to destination helper expressions
/// correspondingly.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// sources.
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// destinations.
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of final assignment performed by the
/// copyprivate clause.
static OMPCopyprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_copyprivate;
}
};
/// This represents implicit clause 'flush' for the '#pragma omp flush'
/// directive.
/// This clause does not exist by itself, it can be only as a part of 'omp
/// flush' directive. This clause is introduced to keep the original structure
/// of \a OMPExecutableDirective class and its derivatives and to use the
/// existing infrastructure of clauses with the list of variables.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has implicit clause 'flush'
/// with the variables 'a' and 'b'.
class OMPFlushClause final
: public OMPVarListClause<OMPFlushClause>,
private llvm::TrailingObjects<OMPFlushClause, Expr *> {
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPFlushClause>(OMPC_flush, StartLoc, LParenLoc,
EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPFlushClause(unsigned N)
: OMPVarListClause<OMPFlushClause>(OMPC_flush, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
ArrayRef<Expr *> VL);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_flush;
}
};
/// This represents implicit clause 'depend' for the '#pragma omp task'
/// directive.
///
/// \code
/// #pragma omp task depend(in:a,b)
/// \endcode
/// In this example directive '#pragma omp task' with clause 'depend' with the
/// variables 'a' and 'b' with dependency 'in'.
class OMPDependClause final
: public OMPVarListClause<OMPDependClause>,
private llvm::TrailingObjects<OMPDependClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Dependency type (one of in, out, inout).
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
/// Dependency type location.
SourceLocation DepLoc;
/// Colon location.
SourceLocation ColonLoc;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPDependClause>(OMPC_depend, StartLoc, LParenLoc,
EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPDependClause(unsigned N)
: OMPVarListClause<OMPDependClause>(OMPC_depend, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
/// Set dependency kind.
void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; }
/// Set dependency kind and its location.
void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; }
/// Set colon location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param DepKind Dependency type.
/// \param DepLoc Location of the dependency type.
/// \param ColonLoc Colon location.
/// \param VL List of references to the variables.
static OMPDependClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Get dependency type.
OpenMPDependClauseKind getDependencyKind() const { return DepKind; }
/// Get dependency type location.
SourceLocation getDependencyLoc() const { return DepLoc; }
/// Get colon location.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Set the loop counter value for the depend clauses with 'sink|source' kind
/// of dependency. Required for codegen.
void setCounterValue(Expr *V);
/// Get the loop counter value.
Expr *getCounterValue();
/// Get the loop counter value.
const Expr *getCounterValue() const;
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_depend;
}
};
/// This represents 'device' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp target device(a)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'device'
/// with single expression 'a'.
class OMPDeviceClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Device number.
Stmt *Device = nullptr;
/// Set the device number.
///
/// \param E Device number.
void setDevice(Expr *E) { Device = E; }
public:
/// Build 'device' clause.
///
/// \param E Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPDeviceClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_device, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Device(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// Build an empty clause.
OMPDeviceClause()
: OMPClause(OMPC_device, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return device number.
Expr *getDevice() { return cast<Expr>(Device); }
/// Return device number.
Expr *getDevice() const { return cast<Expr>(Device); }
child_range children() { return child_range(&Device, &Device + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_device;
}
};
/// This represents 'threads' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp ordered threads
/// \endcode
/// In this example directive '#pragma omp ordered' has simple 'threads' clause.
class OMPThreadsClause : public OMPClause {
public:
/// Build 'threads' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_threads, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPThreadsClause()
: OMPClause(OMPC_threads, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_threads;
}
};
/// This represents 'simd' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp ordered simd
/// \endcode
/// In this example directive '#pragma omp ordered' has simple 'simd' clause.
class OMPSIMDClause : public OMPClause {
public:
/// Build 'simd' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_simd, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPSIMDClause() : OMPClause(OMPC_simd, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_simd;
}
};
/// Struct that defines common infrastructure to handle mappable
/// expressions used in OpenMP clauses.
class OMPClauseMappableExprCommon {
public:
/// Class that represents a component of a mappable expression. E.g.
/// for an expression S.a, the first component is a declaration reference
/// expression associated with 'S' and the second is a member expression
/// associated with the field declaration 'a'. If the expression is an array
/// subscript it may not have any associated declaration. In that case the
/// associated declaration is set to nullptr.
class MappableComponent {
/// Expression associated with the component.
Expr *AssociatedExpression = nullptr;
/// Declaration associated with the declaration. If the component does
/// not have a declaration (e.g. array subscripts or section), this is set
/// to nullptr.
ValueDecl *AssociatedDeclaration = nullptr;
public:
explicit MappableComponent() = default;
explicit MappableComponent(Expr *AssociatedExpression,
ValueDecl *AssociatedDeclaration)
: AssociatedExpression(AssociatedExpression),
AssociatedDeclaration(
AssociatedDeclaration
? cast<ValueDecl>(AssociatedDeclaration->getCanonicalDecl())
: nullptr) {}
Expr *getAssociatedExpression() const { return AssociatedExpression; }
ValueDecl *getAssociatedDeclaration() const {
return AssociatedDeclaration;
}
};
// List of components of an expression. This first one is the whole
// expression and the last one is the base expression.
using MappableExprComponentList = SmallVector<MappableComponent, 8>;
using MappableExprComponentListRef = ArrayRef<MappableComponent>;
// List of all component lists associated to the same base declaration.
// E.g. if both 'S.a' and 'S.b' are a mappable expressions, each will have
// their component list but the same base declaration 'S'.
using MappableExprComponentLists = SmallVector<MappableExprComponentList, 8>;
using MappableExprComponentListsRef = ArrayRef<MappableExprComponentList>;
protected:
// Return the total number of elements in a list of component lists.
static unsigned
getComponentsTotalNumber(MappableExprComponentListsRef ComponentLists);
// Return the total number of elements in a list of declarations. All
// declarations are expected to be canonical.
static unsigned
getUniqueDeclarationsTotalNumber(ArrayRef<const ValueDecl *> Declarations);
};
/// This represents clauses with a list of expressions that are mappable.
/// Examples of these clauses are 'map' in
/// '#pragma omp target [enter|exit] [data]...' directives, and 'to' and 'from
/// in '#pragma omp target update...' directives.
template <class T>
class OMPMappableExprListClause : public OMPVarListClause<T>,
public OMPClauseMappableExprCommon {
friend class OMPClauseReader;
/// Number of unique declarations in this clause.
unsigned NumUniqueDeclarations;
/// Number of component lists in this clause.
unsigned NumComponentLists;
/// Total number of components in this clause.
unsigned NumComponents;
protected:
/// Build a clause for \a NumUniqueDeclarations declarations, \a
/// NumComponentLists total component lists, and \a NumComponents total
/// components.
///
/// \param K Kind of the clause.
/// \param StartLoc Starting location of the clause (the clause keyword).
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of expressions listed in the clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause - one
/// list for each expression in the clause.
/// \param NumComponents Total number of expression components in the clause.
OMPMappableExprListClause(OpenMPClauseKind K, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
unsigned NumVars, unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: OMPVarListClause<T>(K, StartLoc, LParenLoc, EndLoc, NumVars),
NumUniqueDeclarations(NumUniqueDeclarations),
NumComponentLists(NumComponentLists), NumComponents(NumComponents) {}
/// Get the unique declarations that are in the trailing objects of the
/// class.
MutableArrayRef<ValueDecl *> getUniqueDeclsRef() {
return MutableArrayRef<ValueDecl *>(
static_cast<T *>(this)->template getTrailingObjects<ValueDecl *>(),
NumUniqueDeclarations);
}
/// Get the unique declarations that are in the trailing objects of the
/// class.
ArrayRef<ValueDecl *> getUniqueDeclsRef() const {
return ArrayRef<ValueDecl *>(
static_cast<const T *>(this)
->template getTrailingObjects<ValueDecl *>(),
NumUniqueDeclarations);
}
/// Set the unique declarations that are in the trailing objects of the
/// class.
void setUniqueDecls(ArrayRef<ValueDecl *> UDs) {
assert(UDs.size() == NumUniqueDeclarations &&
"Unexpected amount of unique declarations.");
std::copy(UDs.begin(), UDs.end(), getUniqueDeclsRef().begin());
}
/// Get the number of lists per declaration that are in the trailing
/// objects of the class.
MutableArrayRef<unsigned> getDeclNumListsRef() {
return MutableArrayRef<unsigned>(
static_cast<T *>(this)->template getTrailingObjects<unsigned>(),
NumUniqueDeclarations);
}
/// Get the number of lists per declaration that are in the trailing
/// objects of the class.
ArrayRef<unsigned> getDeclNumListsRef() const {
return ArrayRef<unsigned>(
static_cast<const T *>(this)->template getTrailingObjects<unsigned>(),
NumUniqueDeclarations);
}
/// Set the number of lists per declaration that are in the trailing
/// objects of the class.
void setDeclNumLists(ArrayRef<unsigned> DNLs) {
assert(DNLs.size() == NumUniqueDeclarations &&
"Unexpected amount of list numbers.");
std::copy(DNLs.begin(), DNLs.end(), getDeclNumListsRef().begin());
}
/// Get the cumulative component lists sizes that are in the trailing
/// objects of the class. They are appended after the number of lists.
MutableArrayRef<unsigned> getComponentListSizesRef() {
return MutableArrayRef<unsigned>(
static_cast<T *>(this)->template getTrailingObjects<unsigned>() +
NumUniqueDeclarations,
NumComponentLists);
}
/// Get the cumulative component lists sizes that are in the trailing
/// objects of the class. They are appended after the number of lists.
ArrayRef<unsigned> getComponentListSizesRef() const {
return ArrayRef<unsigned>(
static_cast<const T *>(this)->template getTrailingObjects<unsigned>() +
NumUniqueDeclarations,
NumComponentLists);
}
/// Set the cumulative component lists sizes that are in the trailing
/// objects of the class.
void setComponentListSizes(ArrayRef<unsigned> CLSs) {
assert(CLSs.size() == NumComponentLists &&
"Unexpected amount of component lists.");
std::copy(CLSs.begin(), CLSs.end(), getComponentListSizesRef().begin());
}
/// Get the components that are in the trailing objects of the class.
MutableArrayRef<MappableComponent> getComponentsRef() {
return MutableArrayRef<MappableComponent>(
static_cast<T *>(this)
->template getTrailingObjects<MappableComponent>(),
NumComponents);
}
/// Get the components that are in the trailing objects of the class.
ArrayRef<MappableComponent> getComponentsRef() const {
return ArrayRef<MappableComponent>(
static_cast<const T *>(this)
->template getTrailingObjects<MappableComponent>(),
NumComponents);
}
/// Set the components that are in the trailing objects of the class.
/// This requires the list sizes so that it can also fill the original
/// expressions, which are the first component of each list.
void setComponents(ArrayRef<MappableComponent> Components,
ArrayRef<unsigned> CLSs) {
assert(Components.size() == NumComponents &&
"Unexpected amount of component lists.");
assert(CLSs.size() == NumComponentLists &&
"Unexpected amount of list sizes.");
std::copy(Components.begin(), Components.end(), getComponentsRef().begin());
}
/// Fill the clause information from the list of declarations and
/// associated component lists.
void setClauseInfo(ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists) {
// Perform some checks to make sure the data sizes are consistent with the
// information available when the clause was created.
assert(getUniqueDeclarationsTotalNumber(Declarations) ==
NumUniqueDeclarations &&
"Unexpected number of mappable expression info entries!");
assert(getComponentsTotalNumber(ComponentLists) == NumComponents &&
"Unexpected total number of components!");
assert(Declarations.size() == ComponentLists.size() &&
"Declaration and component lists size is not consistent!");
assert(Declarations.size() == NumComponentLists &&
"Unexpected declaration and component lists size!");
// Organize the components by declaration and retrieve the original
// expression. Original expressions are always the first component of the
// mappable component list.
llvm::MapVector<ValueDecl *, SmallVector<MappableExprComponentListRef, 8>>
ComponentListMap;
{
auto CI = ComponentLists.begin();
for (auto DI = Declarations.begin(), DE = Declarations.end(); DI != DE;
++DI, ++CI) {
assert(!CI->empty() && "Invalid component list!");
ComponentListMap[*DI].push_back(*CI);
}
}
// Iterators of the target storage.
auto UniqueDeclarations = getUniqueDeclsRef();
auto UDI = UniqueDeclarations.begin();
auto DeclNumLists = getDeclNumListsRef();
auto DNLI = DeclNumLists.begin();
auto ComponentListSizes = getComponentListSizesRef();
auto CLSI = ComponentListSizes.begin();
auto Components = getComponentsRef();
auto CI = Components.begin();
// Variable to compute the accumulation of the number of components.
unsigned PrevSize = 0u;
// Scan all the declarations and associated component lists.
for (auto &M : ComponentListMap) {
// The declaration.
auto *D = M.first;
// The component lists.
auto CL = M.second;
// Initialize the entry.
*UDI = D;
++UDI;
*DNLI = CL.size();
++DNLI;
// Obtain the cumulative sizes and concatenate all the components in the
// reserved storage.
for (auto C : CL) {
// Accumulate with the previous size.
PrevSize += C.size();
// Save the size.
*CLSI = PrevSize;
++CLSI;
// Append components after the current components iterator.
CI = std::copy(C.begin(), C.end(), CI);
}
}
}
public:
/// Return the number of unique base declarations in this clause.
unsigned getUniqueDeclarationsNum() const { return NumUniqueDeclarations; }
/// Return the number of lists derived from the clause expressions.
unsigned getTotalComponentListNum() const { return NumComponentLists; }
/// Return the total number of components in all lists derived from the
/// clause.
unsigned getTotalComponentsNum() const { return NumComponents; }
/// Iterator that browse the components by lists. It also allows
/// browsing components of a single declaration.
class const_component_lists_iterator
: public llvm::iterator_adaptor_base<
const_component_lists_iterator,
MappableExprComponentListRef::const_iterator,
std::forward_iterator_tag, MappableComponent, ptrdiff_t,
MappableComponent, MappableComponent> {
// The declaration the iterator currently refers to.
ArrayRef<ValueDecl *>::iterator DeclCur;
// The list number associated with the current declaration.
ArrayRef<unsigned>::iterator NumListsCur;
// Remaining lists for the current declaration.
unsigned RemainingLists = 0;
// The cumulative size of the previous list, or zero if there is no previous
// list.
unsigned PrevListSize = 0;
// The cumulative sizes of the current list - it will delimit the remaining
// range of interest.
ArrayRef<unsigned>::const_iterator ListSizeCur;
ArrayRef<unsigned>::const_iterator ListSizeEnd;
// Iterator to the end of the components storage.
MappableExprComponentListRef::const_iterator End;
public:
/// Construct an iterator that scans all lists.
explicit const_component_lists_iterator(
ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum,
ArrayRef<unsigned> CumulativeListSizes,
MappableExprComponentListRef Components)
: const_component_lists_iterator::iterator_adaptor_base(
Components.begin()),
DeclCur(UniqueDecls.begin()), NumListsCur(DeclsListNum.begin()),
ListSizeCur(CumulativeListSizes.begin()),
ListSizeEnd(CumulativeListSizes.end()), End(Components.end()) {
assert(UniqueDecls.size() == DeclsListNum.size() &&
"Inconsistent number of declarations and list sizes!");
if (!DeclsListNum.empty())
RemainingLists = *NumListsCur;
}
/// Construct an iterator that scan lists for a given declaration \a
/// Declaration.
explicit const_component_lists_iterator(
const ValueDecl *Declaration, ArrayRef<ValueDecl *> UniqueDecls,
ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes,
MappableExprComponentListRef Components)
: const_component_lists_iterator(UniqueDecls, DeclsListNum,
CumulativeListSizes, Components) {
// Look for the desired declaration. While we are looking for it, we
// update the state so that we know the component where a given list
// starts.
for (; DeclCur != UniqueDecls.end(); ++DeclCur, ++NumListsCur) {
if (*DeclCur == Declaration)
break;
assert(*NumListsCur > 0 && "No lists associated with declaration??");
// Skip the lists associated with the current declaration, but save the
// last list size that was skipped.
std::advance(ListSizeCur, *NumListsCur - 1);
PrevListSize = *ListSizeCur;
++ListSizeCur;
}
// If we didn't find any declaration, advance the iterator to after the
// last component and set remaining lists to zero.
if (ListSizeCur == CumulativeListSizes.end()) {
this->I = End;
RemainingLists = 0u;
return;
}
// Set the remaining lists with the total number of lists of the current
// declaration.
RemainingLists = *NumListsCur;
// Adjust the list size end iterator to the end of the relevant range.
ListSizeEnd = ListSizeCur;
std::advance(ListSizeEnd, RemainingLists);
// Given that the list sizes are cumulative, the index of the component
// that start the list is the size of the previous list.
std::advance(this->I, PrevListSize);
}
// Return the array with the current list. The sizes are cumulative, so the
// array size is the difference between the current size and previous one.
std::pair<const ValueDecl *, MappableExprComponentListRef>
operator*() const {
assert(ListSizeCur != ListSizeEnd && "Invalid iterator!");
return std::make_pair(
*DeclCur,
MappableExprComponentListRef(&*this->I, *ListSizeCur - PrevListSize));
}
std::pair<const ValueDecl *, MappableExprComponentListRef>
operator->() const {
return **this;
}
// Skip the components of the current list.
const_component_lists_iterator &operator++() {
assert(ListSizeCur != ListSizeEnd && RemainingLists &&
"Invalid iterator!");
// If we don't have more lists just skip all the components. Otherwise,
// advance the iterator by the number of components in the current list.
if (std::next(ListSizeCur) == ListSizeEnd) {
this->I = End;
RemainingLists = 0;
} else {
std::advance(this->I, *ListSizeCur - PrevListSize);
PrevListSize = *ListSizeCur;
// We are done with a declaration, move to the next one.
if (!(--RemainingLists)) {
++DeclCur;
++NumListsCur;
RemainingLists = *NumListsCur;
assert(RemainingLists && "No lists in the following declaration??");
}
}
++ListSizeCur;
return *this;
}
};
using const_component_lists_range =
llvm::iterator_range<const_component_lists_iterator>;
/// Iterators for all component lists.
const_component_lists_iterator component_lists_begin() const {
return const_component_lists_iterator(
getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(),
getComponentsRef());
}
const_component_lists_iterator component_lists_end() const {
return const_component_lists_iterator(
ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(),
MappableExprComponentListRef(getComponentsRef().end(),
getComponentsRef().end()));
}
const_component_lists_range component_lists() const {
return {component_lists_begin(), component_lists_end()};
}
/// Iterators for component lists associated with the provided
/// declaration.
const_component_lists_iterator
decl_component_lists_begin(const ValueDecl *VD) const {
return const_component_lists_iterator(
VD, getUniqueDeclsRef(), getDeclNumListsRef(),
getComponentListSizesRef(), getComponentsRef());
}
const_component_lists_iterator decl_component_lists_end() const {
return component_lists_end();
}
const_component_lists_range decl_component_lists(const ValueDecl *VD) const {
return {decl_component_lists_begin(VD), decl_component_lists_end()};
}
/// Iterators to access all the declarations, number of lists, list sizes, and
/// components.
using const_all_decls_iterator = ArrayRef<ValueDecl *>::iterator;
using const_all_decls_range = llvm::iterator_range<const_all_decls_iterator>;
const_all_decls_range all_decls() const {
auto A = getUniqueDeclsRef();
return const_all_decls_range(A.begin(), A.end());
}
using const_all_num_lists_iterator = ArrayRef<unsigned>::iterator;
using const_all_num_lists_range =
llvm::iterator_range<const_all_num_lists_iterator>;
const_all_num_lists_range all_num_lists() const {
auto A = getDeclNumListsRef();
return const_all_num_lists_range(A.begin(), A.end());
}
using const_all_lists_sizes_iterator = ArrayRef<unsigned>::iterator;
using const_all_lists_sizes_range =
llvm::iterator_range<const_all_lists_sizes_iterator>;
const_all_lists_sizes_range all_lists_sizes() const {
auto A = getComponentListSizesRef();
return const_all_lists_sizes_range(A.begin(), A.end());
}
using const_all_components_iterator = ArrayRef<MappableComponent>::iterator;
using const_all_components_range =
llvm::iterator_range<const_all_components_iterator>;
const_all_components_range all_components() const {
auto A = getComponentsRef();
return const_all_components_range(A.begin(), A.end());
}
};
/// This represents clause 'map' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target map(a,b)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'map'
/// with the variables 'a' and 'b'.
class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>,
private llvm::TrailingObjects<
OMPMapClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
/// Map type modifier for the 'map' clause.
OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown;
/// Map type for the 'map' clause.
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
/// Is this an implicit map type or not.
bool MapTypeIsImplicit = false;
/// Location of the map type.
SourceLocation MapLoc;
/// Colon location.
SourceLocation ColonLoc;
/// Build a clause for \a NumVars listed expressions, \a
/// NumUniqueDeclarations declarations, \a NumComponentLists total component
/// lists, and \a NumComponents total expression components.
///
/// \param MapTypeModifier Map type modifier.
/// \param MapType Map type.
/// \param MapTypeIsImplicit Map type is inferred implicitly.
/// \param MapLoc Location of the map type.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
explicit OMPMapClause(OpenMPMapClauseKind MapTypeModifier,
OpenMPMapClauseKind MapType, bool MapTypeIsImplicit,
SourceLocation MapLoc, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
unsigned NumVars, unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: OMPMappableExprListClause(OMPC_map, StartLoc, LParenLoc, EndLoc,
NumVars, NumUniqueDeclarations,
NumComponentLists, NumComponents),
MapTypeModifier(MapTypeModifier), MapType(MapType),
MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) {}
/// Build an empty clause.
///
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
explicit OMPMapClause(unsigned NumVars, unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: OMPMappableExprListClause(
OMPC_map, SourceLocation(), SourceLocation(), SourceLocation(),
NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {}
/// Set type modifier for the clause.
///
/// \param T Type Modifier for the clause.
void setMapTypeModifier(OpenMPMapClauseKind T) { MapTypeModifier = T; }
/// Set type for the clause.
///
/// \param T Type for the clause.
void setMapType(OpenMPMapClauseKind T) { MapType = T; }
/// Set type location.
///
/// \param TLoc Type location.
void setMapLoc(SourceLocation TLoc) { MapLoc = TLoc; }
/// Set colon location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
/// \param TypeModifier Map type modifier.
/// \param Type Map type.
/// \param TypeIsImplicit Map type is inferred implicitly.
/// \param TypeLoc Location of the map type.
static OMPMapClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists,
OpenMPMapClauseKind TypeModifier,
OpenMPMapClauseKind Type, bool TypeIsImplicit,
SourceLocation TypeLoc);
/// Creates an empty clause with the place for \a NumVars original
/// expressions, \a NumUniqueDeclarations declarations, \NumComponentLists
/// lists, and \a NumComponents expression components.
///
/// \param C AST context.
/// \param NumVars Number of expressions listed in the clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of unique base declarations in this
/// clause.
/// \param NumComponents Total number of expression components in the clause.
static OMPMapClause *CreateEmpty(const ASTContext &C, unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents);
/// Fetches mapping kind for the clause.
OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; }
/// Is this an implicit map type?
/// We have to capture 'IsMapTypeImplicit' from the parser for more
/// informative error messages. It helps distinguish map(r) from
/// map(tofrom: r), which is important to print more helpful error
/// messages for some target directives.
bool isImplicitMapType() const LLVM_READONLY { return MapTypeIsImplicit; }
/// Fetches the map type modifier for the clause.
OpenMPMapClauseKind getMapTypeModifier() const LLVM_READONLY {
return MapTypeModifier;
}
/// Fetches location of clause mapping kind.
SourceLocation getMapLoc() const LLVM_READONLY { return MapLoc; }
/// Get colon location.
SourceLocation getColonLoc() const { return ColonLoc; }
child_range children() {
return child_range(
reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_map;
}
};
/// This represents 'num_teams' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp teams num_teams(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'num_teams'
/// with single expression 'n'.
class OMPNumTeamsClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// NumTeams number.
Stmt *NumTeams = nullptr;
/// Set the NumTeams number.
///
/// \param E NumTeams number.
void setNumTeams(Expr *E) { NumTeams = E; }
public:
/// Build 'num_teams' clause.
///
/// \param E Expression associated with this clause.
/// \param HelperE Helper Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPNumTeamsClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_num_teams, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), NumTeams(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// Build an empty clause.
OMPNumTeamsClause()
: OMPClause(OMPC_num_teams, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return NumTeams number.
Expr *getNumTeams() { return cast<Expr>(NumTeams); }
/// Return NumTeams number.
Expr *getNumTeams() const { return cast<Expr>(NumTeams); }
child_range children() { return child_range(&NumTeams, &NumTeams + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_num_teams;
}
};
/// This represents 'thread_limit' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp teams thread_limit(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'thread_limit'
/// with single expression 'n'.
class OMPThreadLimitClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// ThreadLimit number.
Stmt *ThreadLimit = nullptr;
/// Set the ThreadLimit number.
///
/// \param E ThreadLimit number.
void setThreadLimit(Expr *E) { ThreadLimit = E; }
public:
/// Build 'thread_limit' clause.
///
/// \param E Expression associated with this clause.
/// \param HelperE Helper Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPThreadLimitClause(Expr *E, Stmt *HelperE,
OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_thread_limit, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadLimit(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// Build an empty clause.
OMPThreadLimitClause()
: OMPClause(OMPC_thread_limit, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return ThreadLimit number.
Expr *getThreadLimit() { return cast<Expr>(ThreadLimit); }
/// Return ThreadLimit number.
Expr *getThreadLimit() const { return cast<Expr>(ThreadLimit); }
child_range children() { return child_range(&ThreadLimit, &ThreadLimit + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_thread_limit;
}
};
/// This represents 'priority' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp task priority(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'priority' with
/// single expression 'n'.
class OMPPriorityClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Priority number.
Stmt *Priority = nullptr;
/// Set the Priority number.
///
/// \param E Priority number.
void setPriority(Expr *E) { Priority = E; }
public:
/// Build 'priority' clause.
///
/// \param E Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPPriorityClause(Expr *E, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_priority, StartLoc, EndLoc), LParenLoc(LParenLoc),
Priority(E) {}
/// Build an empty clause.
OMPPriorityClause()
: OMPClause(OMPC_priority, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return Priority number.
Expr *getPriority() { return cast<Expr>(Priority); }
/// Return Priority number.
Expr *getPriority() const { return cast<Expr>(Priority); }
child_range children() { return child_range(&Priority, &Priority + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_priority;
}
};
/// This represents 'grainsize' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp taskloop grainsize(4)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clause 'grainsize'
/// with single expression '4'.
class OMPGrainsizeClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *Grainsize = nullptr;
/// Set safelen.
void setGrainsize(Expr *Size) { Grainsize = Size; }
public:
/// Build 'grainsize' clause.
///
/// \param Size Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_grainsize, StartLoc, EndLoc), LParenLoc(LParenLoc),
Grainsize(Size) {}
/// Build an empty clause.
explicit OMPGrainsizeClause()
: OMPClause(OMPC_grainsize, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); }
child_range children() { return child_range(&Grainsize, &Grainsize + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_grainsize;
}
};
/// This represents 'nogroup' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp taskloop nogroup
/// \endcode
/// In this example directive '#pragma omp taskloop' has 'nogroup' clause.
class OMPNogroupClause : public OMPClause {
public:
/// Build 'nogroup' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_nogroup, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPNogroupClause()
: OMPClause(OMPC_nogroup, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_nogroup;
}
};
/// This represents 'num_tasks' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp taskloop num_tasks(4)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clause 'num_tasks'
/// with single expression '4'.
class OMPNumTasksClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *NumTasks = nullptr;
/// Set safelen.
void setNumTasks(Expr *Size) { NumTasks = Size; }
public:
/// Build 'num_tasks' clause.
///
/// \param Size Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPNumTasksClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_num_tasks, StartLoc, EndLoc), LParenLoc(LParenLoc),
NumTasks(Size) {}
/// Build an empty clause.
explicit OMPNumTasksClause()
: OMPClause(OMPC_num_tasks, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); }
child_range children() { return child_range(&NumTasks, &NumTasks + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_num_tasks;
}
};
/// This represents 'hint' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp critical (name) hint(6)
/// \endcode
/// In this example directive '#pragma omp critical' has name 'name' and clause
/// 'hint' with argument '6'.
class OMPHintClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Hint expression of the 'hint' clause.
Stmt *Hint = nullptr;
/// Set hint expression.
void setHint(Expr *H) { Hint = H; }
public:
/// Build 'hint' clause with expression \a Hint.
///
/// \param Hint Hint expression.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc),
Hint(Hint) {}
/// Build an empty clause.
OMPHintClause() : OMPClause(OMPC_hint, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns number of threads.
Expr *getHint() const { return cast_or_null<Expr>(Hint); }
child_range children() { return child_range(&Hint, &Hint + 1); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_hint;
}
};
/// This represents 'dist_schedule' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp distribute dist_schedule(static, 3)
/// \endcode
/// In this example directive '#pragma omp distribute' has 'dist_schedule'
/// clause with arguments 'static' and '3'.
class OMPDistScheduleClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'schedule' clause.
OpenMPDistScheduleClauseKind Kind = OMPC_DIST_SCHEDULE_unknown;
/// Start location of the schedule kind in source code.
SourceLocation KindLoc;
/// Location of ',' (if any).
SourceLocation CommaLoc;
/// Chunk size.
Expr *ChunkSize = nullptr;
/// Set schedule kind.
///
/// \param K Schedule kind.
void setDistScheduleKind(OpenMPDistScheduleClauseKind K) { Kind = K; }
/// Sets the location of '('.
///
/// \param Loc Location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Set schedule kind start location.
///
/// \param KLoc Schedule kind location.
void setDistScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
/// Set location of ','.
///
/// \param Loc Location of ','.
void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; }
/// Set chunk size.
///
/// \param E Chunk size.
void setChunkSize(Expr *E) { ChunkSize = E; }
public:
/// Build 'dist_schedule' clause with schedule kind \a Kind and chunk
/// size expression \a ChunkSize.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param CommaLoc Location of ','.
/// \param EndLoc Ending location of the clause.
/// \param Kind DistSchedule kind.
/// \param ChunkSize Chunk size.
/// \param HelperChunkSize Helper chunk size for combined directives.
OMPDistScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation KLoc, SourceLocation CommaLoc,
SourceLocation EndLoc,
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
Stmt *HelperChunkSize)
: OMPClause(OMPC_dist_schedule, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind),
KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) {
setPreInitStmt(HelperChunkSize);
}
/// Build an empty clause.
explicit OMPDistScheduleClause()
: OMPClause(OMPC_dist_schedule, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Get kind of the clause.
OpenMPDistScheduleClauseKind getDistScheduleKind() const { return Kind; }
/// Get location of '('.
SourceLocation getLParenLoc() { return LParenLoc; }
/// Get kind location.
SourceLocation getDistScheduleKindLoc() { return KindLoc; }
/// Get location of ','.
SourceLocation getCommaLoc() { return CommaLoc; }
/// Get chunk size.
Expr *getChunkSize() { return ChunkSize; }
/// Get chunk size.
const Expr *getChunkSize() const { return ChunkSize; }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(&ChunkSize),
reinterpret_cast<Stmt **>(&ChunkSize) + 1);
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_dist_schedule;
}
};
/// This represents 'defaultmap' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp target defaultmap(tofrom: scalar)
/// \endcode
/// In this example directive '#pragma omp target' has 'defaultmap' clause of kind
/// 'scalar' with modifier 'tofrom'.
class OMPDefaultmapClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Modifiers for 'defaultmap' clause.
OpenMPDefaultmapClauseModifier Modifier = OMPC_DEFAULTMAP_MODIFIER_unknown;
/// Locations of modifiers.
SourceLocation ModifierLoc;
/// A kind of the 'defaultmap' clause.
OpenMPDefaultmapClauseKind Kind = OMPC_DEFAULTMAP_unknown;
/// Start location of the defaultmap kind in source code.
SourceLocation KindLoc;
/// Set defaultmap kind.
///
/// \param K Defaultmap kind.
void setDefaultmapKind(OpenMPDefaultmapClauseKind K) { Kind = K; }
/// Set the defaultmap modifier.
///
/// \param M Defaultmap modifier.
void setDefaultmapModifier(OpenMPDefaultmapClauseModifier M) {
Modifier = M;
}
/// Set location of the defaultmap modifier.
void setDefaultmapModifierLoc(SourceLocation Loc) {
ModifierLoc = Loc;
}
/// Sets the location of '('.
///
/// \param Loc Location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Set defaultmap kind start location.
///
/// \param KLoc Defaultmap kind location.
void setDefaultmapKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
public:
/// Build 'defaultmap' clause with defaultmap kind \a Kind
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param EndLoc Ending location of the clause.
/// \param Kind Defaultmap kind.
/// \param M The modifier applied to 'defaultmap' clause.
/// \param MLoc Location of the modifier
OMPDefaultmapClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation MLoc, SourceLocation KLoc,
SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind,
OpenMPDefaultmapClauseModifier M)
: OMPClause(OMPC_defaultmap, StartLoc, EndLoc), LParenLoc(LParenLoc),
Modifier(M), ModifierLoc(MLoc), Kind(Kind), KindLoc(KLoc) {}
/// Build an empty clause.
explicit OMPDefaultmapClause()
: OMPClause(OMPC_defaultmap, SourceLocation(), SourceLocation()) {}
/// Get kind of the clause.
OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; }
/// Get the modifier of the clause.
OpenMPDefaultmapClauseModifier getDefaultmapModifier() const {
return Modifier;
}
/// Get location of '('.
SourceLocation getLParenLoc() { return LParenLoc; }
/// Get kind location.
SourceLocation getDefaultmapKindLoc() { return KindLoc; }
/// Get the modifier location.
SourceLocation getDefaultmapModifierLoc() const {
return ModifierLoc;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_defaultmap;
}
};
/// This represents clause 'to' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target update to(a,b)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'to'
/// with the variables 'a' and 'b'.
class OMPToClause final : public OMPMappableExprListClause<OMPToClause>,
private llvm::TrailingObjects<
OMPToClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
explicit OMPToClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: OMPMappableExprListClause(OMPC_to, StartLoc, LParenLoc, EndLoc, NumVars,
NumUniqueDeclarations, NumComponentLists,
NumComponents) {}
/// Build an empty clause.
///
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
explicit OMPToClause(unsigned NumVars, unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: OMPMappableExprListClause(
OMPC_to, SourceLocation(), SourceLocation(), SourceLocation(),
NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
static OMPToClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of expressions listed in the clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of unique base declarations in this
/// clause.
/// \param NumComponents Total number of expression components in the clause.
static OMPToClause *CreateEmpty(const ASTContext &C, unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_to;
}
};
/// This represents clause 'from' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target update from(a,b)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'from'
/// with the variables 'a' and 'b'.
class OMPFromClause final
: public OMPMappableExprListClause<OMPFromClause>,
private llvm::TrailingObjects<
OMPFromClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
explicit OMPFromClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: OMPMappableExprListClause(OMPC_from, StartLoc, LParenLoc, EndLoc,
NumVars, NumUniqueDeclarations,
NumComponentLists, NumComponents) {}
/// Build an empty clause.
///
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
explicit OMPFromClause(unsigned NumVars, unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: OMPMappableExprListClause(
OMPC_from, SourceLocation(), SourceLocation(), SourceLocation(),
NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
static OMPFromClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of expressions listed in the clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of unique base declarations in this
/// clause.
/// \param NumComponents Total number of expression components in the clause.
static OMPFromClause *CreateEmpty(const ASTContext &C, unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_from;
}
};
/// This represents clause 'use_device_ptr' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target data use_device_ptr(a,b)
/// \endcode
/// In this example directive '#pragma omp target data' has clause
/// 'use_device_ptr' with the variables 'a' and 'b'.
class OMPUseDevicePtrClause final
: public OMPMappableExprListClause<OMPUseDevicePtrClause>,
private llvm::TrailingObjects<
OMPUseDevicePtrClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
explicit OMPUseDevicePtrClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents)
: OMPMappableExprListClause(OMPC_use_device_ptr, StartLoc, LParenLoc,
EndLoc, NumVars, NumUniqueDeclarations,
NumComponentLists, NumComponents) {}
/// Build an empty clause.
///
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
explicit OMPUseDevicePtrClause(unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents)
: OMPMappableExprListClause(OMPC_use_device_ptr, SourceLocation(),
SourceLocation(), SourceLocation(), NumVars,
NumUniqueDeclarations, NumComponentLists,
NumComponents) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return 3 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
/// Sets the list of references to private copies with initializers for new
/// private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// Gets the list of references to private copies with initializers for new
/// private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Sets the list of references to initializer variables for new private
/// variables.
/// \param VL List of references.
void setInits(ArrayRef<Expr *> VL);
/// Gets the list of references to initializer variables for new private
/// variables.
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param PrivateVars Expressions referring to private copies.
/// \param Inits Expressions referring to private copy initializers.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
static OMPUseDevicePtrClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> Vars,
ArrayRef<Expr *> PrivateVars, ArrayRef<Expr *> Inits,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of expressions listed in the clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of unique base declarations in this
/// clause.
/// \param NumComponents Total number of expression components in the clause.
static OMPUseDevicePtrClause *CreateEmpty(const ASTContext &C,
unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents);
using private_copies_iterator = MutableArrayRef<Expr *>::iterator;
using private_copies_const_iterator = ArrayRef<const Expr *>::iterator;
using private_copies_range = llvm::iterator_range<private_copies_iterator>;
using private_copies_const_range =
llvm::iterator_range<private_copies_const_iterator>;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
using inits_iterator = MutableArrayRef<Expr *>::iterator;
using inits_const_iterator = ArrayRef<const Expr *>::iterator;
using inits_range = llvm::iterator_range<inits_iterator>;
using inits_const_range = llvm::iterator_range<inits_const_iterator>;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_use_device_ptr;
}
};
/// This represents clause 'is_device_ptr' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target is_device_ptr(a,b)
/// \endcode
/// In this example directive '#pragma omp target' has clause
/// 'is_device_ptr' with the variables 'a' and 'b'.
class OMPIsDevicePtrClause final
: public OMPMappableExprListClause<OMPIsDevicePtrClause>,
private llvm::TrailingObjects<
OMPIsDevicePtrClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
explicit OMPIsDevicePtrClause(SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents)
: OMPMappableExprListClause(OMPC_is_device_ptr, StartLoc, LParenLoc,
EndLoc, NumVars, NumUniqueDeclarations,
NumComponentLists, NumComponents) {}
/// Build an empty clause.
///
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
explicit OMPIsDevicePtrClause(unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents)
: OMPMappableExprListClause(OMPC_is_device_ptr, SourceLocation(),
SourceLocation(), SourceLocation(), NumVars,
NumUniqueDeclarations, NumComponentLists,
NumComponents) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
static OMPIsDevicePtrClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of expressions listed in the clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of unique base declarations in this
/// clause.
/// \param NumComponents Total number of expression components in the clause.
static OMPIsDevicePtrClause *CreateEmpty(const ASTContext &C,
unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_is_device_ptr;
}
};
} // namespace clang
#endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
|
mpncecat.c | /* $Header$ */
/* ncecat -- netCDF ensemble concatenator */
/* Purpose: Join variables across files into a new record variable */
/* Copyright (C) 1995--present Charlie Zender
This file is part of NCO, the netCDF Operators. NCO is free software.
You may redistribute and/or modify NCO under the terms of the
3-Clause BSD License.
You are permitted to link NCO with the HDF, netCDF, OPeNDAP, and UDUnits
libraries and to distribute the resulting executables under the terms
of the BSD, but in addition obeying the extra stipulations of the
HDF, netCDF, OPeNDAP, and UDUnits licenses.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the 3-Clause BSD License for more details.
The original author of this software, Charlie Zender, seeks to improve
it with your suggestions, contributions, bug-reports, and patches.
Please contact the NCO project at http://nco.sf.net or write to
Charlie Zender
Department of Earth System Science
University of California, Irvine
Irvine, CA 92697-3100 */
#ifdef HAVE_CONFIG_H
# include <config.h> /* Autotools tokens */
#endif /* !HAVE_CONFIG_H */
/* Standard header files */
#include <math.h> /* sin cos cos sin 3.14159 */
#include <stdio.h> /* stderr, FILE, NULL, etc. */
#include <stdlib.h> /* atof, atoi, malloc, getopt */
#include <string.h> /* strcmp() */
#include <sys/stat.h> /* stat() */
#include <time.h> /* machine time */
#include <unistd.h> /* POSIX stuff */
#ifndef HAVE_GETOPT_LONG
# include "nco_getopt.h"
#else /* HAVE_GETOPT_LONG */
# ifdef HAVE_GETOPT_H
# include <getopt.h>
# endif /* !HAVE_GETOPT_H */
#endif /* HAVE_GETOPT_LONG */
/* 3rd party vendors */
#include <netcdf.h> /* netCDF definitions and C library */
#ifdef ENABLE_MPI
# include <mpi.h> /* MPI definitions */
# include "nco_mpi.h" /* MPI utilities */
#endif /* !ENABLE_MPI */
/* Personal headers */
/* #define MAIN_PROGRAM_FILE MUST precede #include libnco.h */
#define MAIN_PROGRAM_FILE
#include "libnco.h" /* netCDF Operator (NCO) library */
int
main(int argc,char **argv)
{
char **fl_lst_abb=NULL; /* Option a */
char **fl_lst_in;
char **gaa_arg=NULL; /* [sng] Global attribute arguments */
char **var_lst_in=NULL_CEWI;
char *aux_arg[NC_MAX_DIMS];
char *cmd_ln;
char *cnk_arg[NC_MAX_DIMS];
char *cnk_map_sng=NULL_CEWI; /* [sng] Chunking map */
char *cnk_plc_sng=NULL_CEWI; /* [sng] Chunking policy */
char *fl_in=NULL;
char *fl_out=NULL; /* Option o */
char *fl_out_tmp=NULL; /* MPI CEWI */
char *fl_pth=NULL; /* Option p */
char *fl_pth_lcl=NULL; /* Option l */
char *lmt_arg[NC_MAX_DIMS];
char *opt_crr=NULL; /* [sng] String representation of current long-option name */
char *optarg_lcl=NULL; /* [sng] Local copy of system optarg */
char *rec_dmn_nm=NULL; /* [sng] New record dimension name */
char *sng_cnv_rcd=NULL_CEWI; /* [sng] strtol()/strtoul() return code */
const char * const CVS_Id="$Id$";
const char * const CVS_Revision="$Revision$";
const char * const opt_sht_lst="34567ACcD:d:FHhL:l:n:Oo:p:rRSt:u:v:X:x-:";
cnk_dmn_sct **cnk_dmn=NULL_CEWI;
dmn_sct *rec_dmn;
dmn_sct **dim;
dmn_sct **dmn_out;
extern char *optarg;
extern int optind;
/* Using naked stdin/stdout/stderr in parallel region generates warning
Copy appropriate filehandle to variable scoped shared in parallel clause */
FILE * const fp_stderr=stderr; /* [fl] stderr filehandle CEWI */
FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */
int *in_id_arr;
int abb_arg_nbr=0;
int aux_nbr=0; /* [nbr] Number of auxiliary coordinate hyperslabs specified */
int cnk_map=nco_cnk_map_nil; /* [enm] Chunking map */
int cnk_nbr=0; /* [nbr] Number of chunk sizes */
int cnk_plc=nco_cnk_plc_nil; /* [enm] Chunking policy */
int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */
int fl_idx;
int fl_nbr=0;
int fl_in_fmt; /* [enm] Input file format */
int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */
int fll_md_old; /* [enm] Old fill mode */
int gaa_nbr=0; /* [nbr] Number of global attributes to add */
int idx;
int jdx;
int in_id;
int lmt_nbr=0; /* Option d. NB: lmt_nbr gets incremented */
int log_lvl=0; /* [enm] netCDF library debugging verbosity [0..5] */
int md_open; /* [enm] Mode flag for nc_open() call */
int nbr_dmn_fl;
int nbr_dmn_xtr;
int nbr_var_fix; /* nbr_var_fix gets incremented */
int nbr_var_fl;
int nbr_var_prc; /* nbr_var_prc gets incremented */
int xtr_nbr=0; /* xtr_nbr won't otherwise be set for -c with no -v */
int opt;
int out_id;
int rcd=NC_NOERR; /* [rcd] Return code */
int rec_dmn_id=NCO_REC_DMN_UNDEFINED;
int thr_idx; /* [idx] Index of current thread */
int thr_nbr=int_CEWI; /* [nbr] Thread number Option t */
int var_lst_in_nbr=0;
lmt_sct **aux=NULL_CEWI; /* Auxiliary coordinate limits */
lmt_sct **lmt;
lmt_all_sct **lmt_all_lst; /* List of *lmt_all structures */
long idx_rec_out=0L; /* idx_rec_out gets incremented */
cnv_sct *cnv; /* [sct] Convention structure */
nco_bool EXCLUDE_INPUT_LIST=False; /* Option c */
nco_bool EXTRACT_ALL_COORDINATES=False; /* Option c */
nco_bool EXTRACT_ASSOCIATED_COORDINATES=True; /* Option C */
nco_bool FL_RTR_RMT_LCN;
nco_bool FL_LST_IN_APPEND=True; /* Option H */
nco_bool FL_LST_IN_FROM_STDIN=False; /* [flg] fl_lst_in comes from stdin */
nco_bool FORCE_APPEND=False; /* Option A */
nco_bool FORCE_OVERWRITE=False; /* Option O */
nco_bool FORTRAN_IDX_CNV=False; /* Option F */
nco_bool HISTORY_APPEND=True; /* Option h */
nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */
nco_bool MSA_USR_RDR=False; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */
nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */
nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */
nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */
nco_bool WRT_TMP_FL=True; /* [flg] Write output to temporary file */
nco_bool flg_mmr_cln=False; /* [flg] Clean memory prior to exit */
nm_id_sct *dmn_lst;
nm_id_sct *xtr_lst=NULL; /* xtr_lst may be alloc()'d from NULL with -c option */
size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */
size_t cnk_csh_byt=NCO_CNK_CSH_BYT_DFL; /* [B] Chunk cache size */
size_t cnk_min_byt=NCO_CNK_SZ_MIN_BYT_DFL; /* [B] Minimize size of variable to chunk */
size_t cnk_sz_byt=0UL; /* [B] Chunk size in bytes */
size_t cnk_sz_scl=0UL; /* [nbr] Chunk size scalar */
size_t hdr_pad=0UL; /* [B] Pad at end of header section */
var_sct **var;
var_sct **var_fix;
var_sct **var_fix_out;
var_sct **var_out;
var_sct **var_prc;
var_sct **var_prc_out;
#ifdef ENABLE_MPI
/* Declare all MPI-specific variables here */
MPI_Comm mpi_cmm=MPI_COMM_WORLD; /* [prc] Communicator */
MPI_Info mpi_nfo=MPI_INFO_NULL; /* [sct] File geometry hints */
MPI_Status mpi_stt; /* [enm] Status check to decode msg_tag_typ */
nco_bool TKN_WRT_FREE=True; /* [flg] Write-access to output file is available */
int fl_nm_lng; /* [nbr] Output file name length */
int msg_bfr[msg_bfr_lng]; /* [bfr] Buffer containing var, idx, tkn_wrt_rsp */
int msg_tag_typ; /* [enm] MPI message tag type */
int prc_rnk; /* [idx] Process rank */
int prc_nbr=0; /* [nbr] Number of MPI processes */
int tkn_wrt_rsp; /* [enm] Response to request for write token */
int var_wrt_nbr=0; /* [nbr] Variables written to output file until now */
int rnk_wrk; /* [idx] Worker rank */
int wrk_id_bfr[wrk_id_bfr_lng]; /* [bfr] Buffer for rnk_wrk */
#endif /* !ENABLE_MPI */
static struct option opt_lng[]={ /* Structure ordered by short option key if possible */
/* Long options with no argument, no short option counterpart */
{"clean",no_argument,0,0}, /* [flg] Clean memory prior to exit */
{"mmr_cln",no_argument,0,0}, /* [flg] Clean memory prior to exit */
{"drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"dirty",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"mmr_drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"msa_usr_rdr",no_argument,0,0}, /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */
{"msa_user_order",no_argument,0,0}, /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */
{"ram_all",no_argument,0,0}, /* [flg] Open (netCDF3) and create file(s) in RAM */
{"create_ram",no_argument,0,0}, /* [flg] Create file in RAM */
{"open_ram",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) in RAM */
{"diskless_all",no_argument,0,0}, /* [flg] Open (netCDF3) and create file(s) in RAM */
{"wrt_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */
{"write_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */
{"no_tmp_fl",no_argument,0,0}, /* [flg] Do not write output to temporary file */
{"version",no_argument,0,0},
{"vrs",no_argument,0,0},
/* Long options with argument, no short option counterpart */
{"bfr_sz_hnt",required_argument,0,0}, /* [B] Buffer size hint */
{"buffer_size_hint",required_argument,0,0}, /* [B] Buffer size hint */
{"cnk_byt",required_argument,0,0}, /* [B] Chunk size in bytes */
{"chunk_byte",required_argument,0,0}, /* [B] Chunk size in bytes */
{"cnk_dmn",required_argument,0,0}, /* [nbr] Chunk size */
{"chunk_dimension",required_argument,0,0}, /* [nbr] Chunk size */
{"cnk_map",required_argument,0,0}, /* [nbr] Chunking map */
{"chunk_map",required_argument,0,0}, /* [nbr] Chunking map */
{"cnk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */
{"chunk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */
{"cnk_plc",required_argument,0,0}, /* [nbr] Chunking policy */
{"chunk_policy",required_argument,0,0}, /* [nbr] Chunking policy */
{"cnk_scl",required_argument,0,0}, /* [nbr] Chunk size scalar */
{"chunk_scalar",required_argument,0,0}, /* [nbr] Chunk size scalar */
{"fl_fmt",required_argument,0,0},
{"file_format",required_argument,0,0},
{"gaa",required_argument,0,0}, /* [sng] Global attribute add */
{"glb_att_add",required_argument,0,0}, /* [sng] Global attribute add */
{"hdr_pad",required_argument,0,0},
{"header_pad",required_argument,0,0},
{"log_lvl",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */
{"log_level",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */
/* Long options with short counterparts */
{"3",no_argument,0,'3'},
{"4",no_argument,0,'4'},
{"netcdf4",no_argument,0,'4'},
{"5",no_argument,0,'5'},
{"64bit_data",no_argument,0,'5'},
{"cdf5",no_argument,0,'5'},
{"pnetcdf",no_argument,0,'5'},
{"64bit_offset",no_argument,0,'6'},
{"7",no_argument,0,'7'},
{"append",no_argument,0,'A'},
{"coords",no_argument,0,'c'},
{"crd",no_argument,0,'c'},
{"xtr_ass_var",no_argument,0,'c'},
{"xcl_ass_var",no_argument,0,'C'},
{"no_coords",no_argument,0,'C'},
{"no_crd",no_argument,0,'C'},
{"debug",required_argument,0,'D'},
{"nco_dbg_lvl",required_argument,0,'D'},
{"dimension",required_argument,0,'d'},
{"dmn",required_argument,0,'d'},
{"fortran",no_argument,0,'F'},
{"ftn",no_argument,0,'F'},
{"fl_lst_in",no_argument,0,'H'},
{"file_list",no_argument,0,'H'},
{"history",no_argument,0,'h'},
{"hst",no_argument,0,'h'},
{"dfl_lvl",required_argument,0,'L'}, /* [enm] Deflate level */
{"deflate",required_argument,0,'L'}, /* [enm] Deflate level */
{"local",required_argument,0,'l'},
{"lcl",required_argument,0,'l'},
{"nintap",required_argument,0,'n'},
{"overwrite",no_argument,0,'O'},
{"ovr",no_argument,0,'O'},
{"output",required_argument,0,'o'},
{"fl_out",required_argument,0,'o'},
{"path",required_argument,0,'p'},
{"retain",no_argument,0,'R'},
{"rtn",no_argument,0,'R'},
{"revision",no_argument,0,'r'},
{"suspend", no_argument,0,'S'},
{"thr_nbr",required_argument,0,'t'},
{"threads",required_argument,0,'t'},
{"omp_num_threads",required_argument,0,'t'},
{"ulm_nm",required_argument,0,'u'},
{"rcd_nm",required_argument,0,'u'},
{"variable",required_argument,0,'v'},
{"auxiliary",required_argument,0,'X'},
{"exclude",no_argument,0,'x'},
{"xcl",no_argument,0,'x'},
{"help",no_argument,0,'?'},
{"hlp",no_argument,0,'?'},
{0,0,0,0}
}; /* end opt_lng */
int opt_idx=0; /* Index of current long option into opt_lng array */
#ifdef ENABLE_MPI
/* MPI Initialization */
MPI_Init(&argc,&argv);
MPI_Comm_size(mpi_cmm,&prc_nbr);
MPI_Comm_rank(mpi_cmm,&prc_rnk);
#endif /* !ENABLE_MPI */
/* Start clock and save command line */
cmd_ln=nco_cmd_ln_sng(argc,argv);
/* Get program name and set program enum (e.g., nco_prg_id=ncra) */
nco_prg_nm=nco_prg_prs(argv[0],&nco_prg_id);
/* Parse command line arguments */
while(1){
/* getopt_long_only() allows one dash to prefix long options */
opt=getopt_long(argc,argv,opt_sht_lst,opt_lng,&opt_idx);
/* NB: access to opt_crr is only valid when long_opt is detected */
if(opt == EOF) break; /* Parse positional arguments once getopt_long() returns EOF */
opt_crr=(char *)strdup(opt_lng[opt_idx].name);
/* Process long options without short option counterparts */
if(opt == 0){
if(!strcmp(opt_crr,"bfr_sz_hnt") || !strcmp(opt_crr,"buffer_size_hint")){
bfr_sz_hnt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_byt") || !strcmp(opt_crr,"chunk_byte")){
cnk_sz_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk_byt */
if(!strcmp(opt_crr,"cnk_min") || !strcmp(opt_crr,"chunk_min")){
cnk_min_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk_min */
if(!strcmp(opt_crr,"cnk_dmn") || !strcmp(opt_crr,"chunk_dimension")){
/* Copy limit argument for later processing */
cnk_arg[cnk_nbr]=(char *)strdup(optarg);
cnk_nbr++;
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_scl") || !strcmp(opt_crr,"chunk_scalar")){
cnk_sz_scl=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_map") || !strcmp(opt_crr,"chunk_map")){
/* Chunking map */
cnk_map_sng=(char *)strdup(optarg);
cnk_map=nco_cnk_map_get(cnk_map_sng);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_plc") || !strcmp(opt_crr,"chunk_policy")){
/* Chunking policy */
cnk_plc_sng=(char *)strdup(optarg);
cnk_plc=nco_cnk_plc_get(cnk_plc_sng);
} /* endif cnk */
if(!strcmp(opt_crr,"mmr_cln") || !strcmp(opt_crr,"clean")) flg_mmr_cln=True; /* [flg] Clean memory prior to exit */
if(!strcmp(opt_crr,"drt") || !strcmp(opt_crr,"mmr_drt") || !strcmp(opt_crr,"dirty")) flg_mmr_cln=False; /* [flg] Clean memory prior to exit */
if(!strcmp(opt_crr,"fl_fmt") || !strcmp(opt_crr,"file_format")) rcd=nco_create_mode_prs(optarg,&fl_out_fmt);
if(!strcmp(opt_crr,"gaa") || !strcmp(opt_crr,"glb_att_add")){
gaa_arg=(char **)nco_realloc(gaa_arg,(gaa_nbr+1)*sizeof(char *));
gaa_arg[gaa_nbr++]=(char *)strdup(optarg);
} /* endif gaa */
if(!strcmp(opt_crr,"hdr_pad") || !strcmp(opt_crr,"header_pad")){
hdr_pad=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif "hdr_pad" */
if(!strcmp(opt_crr,"log_lvl") || !strcmp(opt_crr,"log_level")){
log_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
nc_set_log_level(log_lvl);
} /* !log_lvl */
if(!strcmp(opt_crr,"msa_usr_rdr") || !strcmp(opt_crr,"msa_user_order")) MSA_USR_RDR=True; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */
if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"create_ram") || !strcmp(opt_crr,"diskless_all")) RAM_CREATE=True; /* [flg] Open (netCDF3) file(s) in RAM */
if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"open_ram") || !strcmp(opt_crr,"diskless_all")) RAM_OPEN=True; /* [flg] Create file in RAM */
if(!strcmp(opt_crr,"vrs") || !strcmp(opt_crr,"version")){
(void)nco_vrs_prn(CVS_Id,CVS_Revision);
nco_exit(EXIT_SUCCESS);
} /* endif "vrs" */
if(!strcmp(opt_crr,"wrt_tmp_fl") || !strcmp(opt_crr,"write_tmp_fl")) WRT_TMP_FL=True;
if(!strcmp(opt_crr,"no_tmp_fl")) WRT_TMP_FL=False;
} /* opt != 0 */
/* Process short options */
switch(opt){
case 0: /* Long options have already been processed, return */
break;
case '3': /* Request netCDF3 output storage format */
fl_out_fmt=NC_FORMAT_CLASSIC;
break;
case '4': /* Request netCDF4 output storage format */
fl_out_fmt=NC_FORMAT_NETCDF4;
break;
case '5': /* Request netCDF3 64-bit offset+data storage (i.e., pnetCDF) format */
fl_out_fmt=NC_FORMAT_CDF5;
break;
case '6': /* Request netCDF3 64-bit offset output storage format */
fl_out_fmt=NC_FORMAT_64BIT_OFFSET;
break;
case '7': /* Request netCDF4-classic output storage format */
fl_out_fmt=NC_FORMAT_NETCDF4_CLASSIC;
break;
case 'A': /* Toggle FORCE_APPEND */
FORCE_APPEND=!FORCE_APPEND;
break;
case 'C': /* Extract all coordinates associated with extracted variables? */
EXTRACT_ASSOCIATED_COORDINATES=False;
break;
case 'c':
EXTRACT_ALL_COORDINATES=True;
break;
case 'D': /* Debugging level. Default is 0. */
nco_dbg_lvl=(unsigned short int)strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
break;
case 'd': /* Copy limit argument for later processing */
lmt_arg[lmt_nbr]=(char *)strdup(optarg);
lmt_nbr++;
break;
case 'F': /* Toggle index convention. Default is 0-based arrays (C-style). */
FORTRAN_IDX_CNV=!FORTRAN_IDX_CNV;
break;
case 'H': /* Toggle writing input file list attribute */
FL_LST_IN_APPEND=!FL_LST_IN_APPEND;
break;
case 'h': /* Toggle appending to history global attribute */
HISTORY_APPEND=!HISTORY_APPEND;
break;
case 'L': /* [enm] Deflate level. Default is 0. */
dfl_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
break;
case 'l': /* Local path prefix for files retrieved from remote file system */
fl_pth_lcl=(char *)strdup(optarg);
break;
case 'n': /* NINTAP-style abbreviation of files to process */
fl_lst_abb=nco_lst_prs_2D(optarg,",",&abb_arg_nbr);
if(abb_arg_nbr < 1 || abb_arg_nbr > 6){
(void)fprintf(stdout,"%s: ERROR Incorrect abbreviation for file list\n",nco_prg_nm);
(void)nco_usg_prn();
nco_exit(EXIT_FAILURE);
} /* end if */
break;
case 'O': /* Toggle FORCE_OVERWRITE */
FORCE_OVERWRITE=!FORCE_OVERWRITE;
break;
case 'o': /* Name of output file */
fl_out=(char *)strdup(optarg);
break;
case 'p': /* Common file path */
fl_pth=(char *)strdup(optarg);
break;
case 'R': /* Toggle removal of remotely-retrieved-files. Default is True. */
RM_RMT_FL_PST_PRC=!RM_RMT_FL_PST_PRC;
break;
case 'r': /* Print CVS program information and copyright notice */
(void)nco_vrs_prn(CVS_Id,CVS_Revision);
(void)nco_lbr_vrs_prn();
(void)nco_cpy_prn();
(void)nco_cnf_prn();
nco_exit(EXIT_SUCCESS);
break;
#ifdef ENABLE_MPI
case 'S': /* Suspend with signal handler to facilitate debugging */
if(signal(SIGUSR1,nco_cnt_run) == SIG_ERR) (void)fprintf(fp_stdout,"%s: ERROR Could not install suspend handler.\n",nco_prg_nm);
while(!nco_spn_lck_brk) usleep(nco_spn_lck_us); /* Spinlock. fxm: should probably insert a sched_yield */
break;
#endif /* !ENABLE_MPI */
case 't': /* Thread number */
thr_nbr=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
break;
case 'u': /* New record dimension name */
rec_dmn_nm=(char *)strdup(optarg);
break;
case 'v': /* Variables to extract/exclude */
/* Replace commas with hashes when within braces (convert back later) */
optarg_lcl=(char *)strdup(optarg);
(void)nco_rx_comma2hash(optarg_lcl);
var_lst_in=nco_lst_prs_2D(optarg_lcl,",",&var_lst_in_nbr);
optarg_lcl=(char *)nco_free(optarg_lcl);
xtr_nbr=var_lst_in_nbr;
break;
case 'X': /* Copy auxiliary coordinate argument for later processing */
aux_arg[aux_nbr]=(char *)strdup(optarg);
aux_nbr++;
MSA_USR_RDR=True; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */
break;
case 'x': /* Exclude rather than extract variables specified with -v */
EXCLUDE_INPUT_LIST=True;
break;
case '?': /* Print proper usage */
(void)nco_usg_prn();
nco_exit(EXIT_SUCCESS);
break;
case '-': /* Long options are not allowed */
(void)fprintf(stderr,"%s: ERROR Long options are not available in this build. Use single letter options instead.\n",nco_prg_nm_get());
nco_exit(EXIT_FAILURE);
break;
default: /* Print proper usage */
(void)fprintf(stdout,"%s ERROR in command-line syntax/options. Please reformulate command accordingly.\n",nco_prg_nm_get());
(void)nco_usg_prn();
nco_exit(EXIT_FAILURE);
break;
} /* end switch */
if(opt_crr) opt_crr=(char *)nco_free(opt_crr);
} /* end while loop */
/* Process positional arguments and fill-in filenames */
fl_lst_in=nco_fl_lst_mk(argv,argc,optind,&fl_nbr,&fl_out,&FL_LST_IN_FROM_STDIN,FORCE_OVERWRITE);
/* Make uniform list of user-specified chunksizes */
if(cnk_nbr > 0) cnk_dmn=nco_cnk_prs(cnk_nbr,cnk_arg);
/* Make uniform list of user-specified dimension limits */
lmt=nco_lmt_prs(lmt_nbr,lmt_arg);
/* Initialize thread information */
thr_nbr=nco_openmp_ini(thr_nbr);
in_id_arr=(int *)nco_malloc(thr_nbr*sizeof(int));
/* Parse filename */
fl_in=nco_fl_nm_prs(fl_in,0,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth);
/* Make sure file is on local system and is readable or die trying */
fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN);
/* Open file using appropriate buffer size hints and verbosity */
if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE;
rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id);
/* Parse auxiliary coordinates */
if(aux_nbr > 0){
int aux_idx_nbr;
aux=nco_aux_evl(in_id,aux_nbr,aux_arg,&aux_idx_nbr);
if(aux_idx_nbr > 0){
lmt=(lmt_sct **)nco_realloc(lmt,(lmt_nbr+aux_idx_nbr)*sizeof(lmt_sct *));
int lmt_nbr_new=lmt_nbr+aux_idx_nbr;
int aux_idx=0;
for(int lmt_idx=lmt_nbr;lmt_idx<lmt_nbr_new;lmt_idx++) lmt[lmt_idx]=aux[aux_idx++];
lmt_nbr=lmt_nbr_new;
} /* endif aux */
} /* endif aux_nbr */
/* Get number of variables, dimensions, and record dimension ID of input file */
(void)nco_inq(in_id,&nbr_dmn_fl,&nbr_var_fl,(int *)NULL,&rec_dmn_id);
(void)nco_inq_format(in_id,&fl_in_fmt);
/* Form initial extraction list which may include extended regular expressions */
xtr_lst=nco_var_lst_mk(in_id,nbr_var_fl,var_lst_in,EXCLUDE_INPUT_LIST,EXTRACT_ALL_COORDINATES,&xtr_nbr);
/* Change included variables to excluded variables */
if(EXCLUDE_INPUT_LIST) xtr_lst=nco_var_lst_xcl(in_id,nbr_var_fl,xtr_lst,&xtr_nbr);
/* Determine conventions (ARM/CCM/CCSM/CF/MPAS) for treating file */
cnv=nco_cnv_ini(in_id);
/* Add all coordinate variables to extraction list */
if(EXTRACT_ALL_COORDINATES) xtr_lst=nco_var_lst_crd_add(in_id,nbr_dmn_fl,nbr_var_fl,xtr_lst,&xtr_nbr,cnv);
/* Extract coordinates associated with extracted variables */
if(EXTRACT_ASSOCIATED_COORDINATES) xtr_lst=nco_var_lst_crd_ass_add(in_id,xtr_lst,&xtr_nbr,cnv);
/* Sort extraction list by variable ID for fastest I/O */
if(xtr_nbr > 1) xtr_lst=nco_lst_srt_nm_id(xtr_lst,xtr_nbr,False);
/* We now have final list of variables to extract. Phew. */
/* Find coordinate/dimension values associated with user-specified limits
NB: nco_lmt_evl() with same nc_id contains OpenMP critical region */
for(idx=0;idx<lmt_nbr;idx++) (void)nco_lmt_evl(in_id,lmt[idx],0L,FORTRAN_IDX_CNV);
/* Place all dimensions in lmt_all_lst */
lmt_all_lst=(lmt_all_sct **)nco_malloc(nbr_dmn_fl*sizeof(lmt_all_sct *));
/* Initialize lmt_all_sct's */
(void)nco_msa_lmt_all_ntl(in_id,MSA_USR_RDR,lmt_all_lst,nbr_dmn_fl,lmt,lmt_nbr);
/* Find dimensions associated with variables to be extracted */
dmn_lst=nco_dmn_lst_ass_var(in_id,xtr_lst,xtr_nbr,&nbr_dmn_xtr);
/* Fill-in dimension structure for all extracted dimensions */
dim=(dmn_sct **)nco_malloc(nbr_dmn_xtr*sizeof(dmn_sct *));
for(idx=0;idx<nbr_dmn_xtr;idx++) dim[idx]=nco_dmn_fll(in_id,dmn_lst[idx].id,dmn_lst[idx].nm);
/* Dimension list no longer needed */
dmn_lst=nco_nm_id_lst_free(dmn_lst,nbr_dmn_xtr);
/* Merge hyperslab limit information into dimension structures */
if(lmt_nbr > 0) (void)nco_dmn_lmt_mrg(dim,nbr_dmn_xtr,lmt,lmt_nbr);
/* Duplicate input dimension structures for output dimension structures */
dmn_out=(dmn_sct **)nco_malloc(nbr_dmn_xtr*sizeof(dmn_sct *));
for(idx=0;idx<nbr_dmn_xtr;idx++){
dmn_out[idx]=nco_dmn_dpl(dim[idx]);
(void)nco_dmn_xrf(dim[idx],dmn_out[idx]);
} /* end loop over idx */
/* Fill-in variable structure list for all extracted variables */
var=(var_sct **)nco_malloc(xtr_nbr*sizeof(var_sct *));
var_out=(var_sct **)nco_malloc(xtr_nbr*sizeof(var_sct *));
for(idx=0;idx<xtr_nbr;idx++){
var[idx]=nco_var_fll(in_id,xtr_lst[idx].id,xtr_lst[idx].nm,dim,nbr_dmn_xtr);
var_out[idx]=nco_var_dpl(var[idx]);
(void)nco_xrf_var(var[idx],var_out[idx]);
(void)nco_xrf_dmn(var_out[idx]);
} /* end loop over idx */
/* Extraction list no longer needed */
xtr_lst=nco_nm_id_lst_free(xtr_lst,xtr_nbr);
/* Divide variable lists into lists of fixed variables and variables to be processed */
(void)nco_var_lst_dvd(var,var_out,xtr_nbr,cnv,True,nco_pck_plc_nil,nco_pck_map_nil,(dmn_sct **)NULL,0,&var_fix,&var_fix_out,&nbr_var_fix,&var_prc,&var_prc_out,&nbr_var_prc);
#ifdef ENABLE_MPI
if(prc_rnk == rnk_mgr){ /* MPI manager code */
#endif /* !ENABLE_MPI */
/* Make output and input files consanguinous */
if(fl_out_fmt == NCO_FORMAT_UNDEFINED) fl_out_fmt=fl_in_fmt;
/* Verify output file format supports requested actions */
(void)nco_fl_fmt_vet(fl_out_fmt,cnk_nbr,dfl_lvl);
/* Open output file */
fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,WRT_TMP_FL,&out_id);
/* Copy global attributes */
(void)nco_att_cpy(in_id,out_id,NC_GLOBAL,NC_GLOBAL,(nco_bool)True);
/* Catenate time-stamped command line to "history" global attribute */
if(HISTORY_APPEND) (void)nco_hst_att_cat(out_id,cmd_ln);
if(HISTORY_APPEND && FORCE_APPEND) (void)nco_prv_att_cat(fl_in,in_id,out_id);
if(gaa_nbr > 0) (void)nco_glb_att_add(out_id,gaa_arg,gaa_nbr);
if(HISTORY_APPEND) (void)nco_vrs_att_cat(out_id);
if(thr_nbr > 1 && HISTORY_APPEND) (void)nco_thr_att_cat(out_id,thr_nbr);
#ifdef ENABLE_MPI
/* Initialize MPI task information */
if(prc_nbr > 0 && HISTORY_APPEND) (void)nco_mpi_att_cat(out_id,prc_nbr);
#endif /* !ENABLE_MPI */
/* Add input file list global attribute */
if(FL_LST_IN_APPEND && HISTORY_APPEND && FL_LST_IN_FROM_STDIN) (void)nco_fl_lst_att_cat(out_id,fl_lst_in,fl_nbr);
#ifdef ENABLE_MPI
} /* prc_rnk != rnk_mgr */
#endif /* !ENABLE_MPI */
/* ncecat-specific operations */
if(True){
/* Always construct new "record" dimension from scratch */
rec_dmn=(dmn_sct *)nco_malloc(sizeof(dmn_sct));
if(rec_dmn_nm == NULL) rec_dmn->nm=rec_dmn_nm=(char *)strdup("record"); else rec_dmn->nm=rec_dmn_nm;
rec_dmn->id=-1;
rec_dmn->nc_id=-1;
rec_dmn->xrf=NULL;
rec_dmn->val.vp=NULL;
rec_dmn->is_crd_dmn=False;
rec_dmn->is_rec_dmn=True;
rec_dmn->sz=0L;
rec_dmn->cnt=0L;
rec_dmn->srd=0L;
rec_dmn->srt=0L;
rec_dmn->end=rec_dmn->sz-1L;
/* Change existing record dimension, if any, to regular dimension */
for(idx=0;idx<nbr_dmn_xtr;idx++){
/* Is any input dimension a record dimension? */
if(dmn_out[idx]->is_rec_dmn){
dmn_out[idx]->is_rec_dmn=False;
break;
} /* end if */
} /* end loop over idx */
/* Add record dimension to end of dimension list */
nbr_dmn_xtr++;
dmn_out=(dmn_sct **)nco_realloc(dmn_out,nbr_dmn_xtr*sizeof(dmn_sct **));
dmn_out[nbr_dmn_xtr-1]=rec_dmn;
} /* end if */
#ifdef ENABLE_MPI
if(prc_rnk == rnk_mgr){ /* MPI manager code */
#endif /* !ENABLE_MPI */
/* Define dimensions in output file */
(void)nco_dmn_dfn(fl_out,out_id,dmn_out,nbr_dmn_xtr);
#ifdef ENABLE_MPI
} /* prc_rnk != rnk_mgr */
#endif /* !ENABLE_MPI */
if(True){
/* Prepend record dimension to beginning of all vectors for processed variables */
for(idx=0;idx<nbr_var_prc;idx++){
var_prc_out[idx]->nbr_dim++;
var_prc_out[idx]->is_rec_var=True;
var_prc_out[idx]->sz_rec=var_prc_out[idx]->sz;
/* Allocate space to hold dimension IDs */
var_prc_out[idx]->dim=(dmn_sct **)nco_realloc(var_prc_out[idx]->dim,var_prc_out[idx]->nbr_dim*sizeof(dmn_sct *));
var_prc_out[idx]->dmn_id=(int *)nco_realloc(var_prc_out[idx]->dmn_id,var_prc_out[idx]->nbr_dim*sizeof(int));
var_prc_out[idx]->cnt=(long *)nco_realloc(var_prc_out[idx]->cnt,var_prc_out[idx]->nbr_dim*sizeof(long int));
var_prc_out[idx]->end=(long *)nco_realloc(var_prc_out[idx]->end,var_prc_out[idx]->nbr_dim*sizeof(long int));
var_prc_out[idx]->srd=(long *)nco_realloc(var_prc_out[idx]->srd,var_prc_out[idx]->nbr_dim*sizeof(long int));
var_prc_out[idx]->srt=(long *)nco_realloc(var_prc_out[idx]->srt,var_prc_out[idx]->nbr_dim*sizeof(long int));
/* Move current array by one to make room for new record dimension info */
(void)memmove((void *)(var_prc_out[idx]->dim+1),(void *)(var_prc_out[idx]->dim),(var_prc_out[idx]->nbr_dim-1)*sizeof(dmn_sct *));
(void)memmove((void *)(var_prc_out[idx]->dmn_id+1),(void *)(var_prc_out[idx]->dmn_id),(var_prc_out[idx]->nbr_dim-1)*sizeof(int));
(void)memmove((void *)(var_prc_out[idx]->cnt+1),(void *)(var_prc_out[idx]->cnt),(var_prc_out[idx]->nbr_dim-1)*sizeof(long int));
(void)memmove((void *)(var_prc_out[idx]->end+1),(void *)(var_prc_out[idx]->end),(var_prc_out[idx]->nbr_dim-1)*sizeof(long int));
(void)memmove((void *)(var_prc_out[idx]->srd+1),(void *)(var_prc_out[idx]->srd),(var_prc_out[idx]->nbr_dim-1)*sizeof(long int));
(void)memmove((void *)(var_prc_out[idx]->srt+1),(void *)(var_prc_out[idx]->srt),(var_prc_out[idx]->nbr_dim-1)*sizeof(long int));
/* Insert value for new record dimension */
var_prc_out[idx]->dim[0]=rec_dmn;
var_prc_out[idx]->dmn_id[0]=rec_dmn->id;
var_prc_out[idx]->cnt[0]=1L;
var_prc_out[idx]->end[0]=-1L;
var_prc_out[idx]->srd[0]=-1L;
var_prc_out[idx]->srt[0]=-1L;
} /* end loop over idx */
} /* end if */
#ifdef ENABLE_MPI
if(prc_rnk == rnk_mgr){ /* MPI manager code */
#endif /* !ENABLE_MPI */
/* Define variables in output file, copy their attributes */
(void)nco_var_dfn(in_id,fl_out,out_id,var_out,xtr_nbr,(dmn_sct **)NULL,(int)0,nco_pck_plc_nil,nco_pck_map_nil,dfl_lvl);
#ifdef ENABLE_MPI
} /* prc_rnk != rnk_mgr */
#endif /* !ENABLE_MPI */
/* Assign zero to start and unity to stride vectors in output variables */
(void)nco_var_srd_srt_set(var_out,xtr_nbr);
#ifdef ENABLE_MPI
if(prc_rnk == rnk_mgr){ /* prc_rnk != rnk_mgr */
#endif /* !ENABLE_MPI */
/* Set chunksize parameters */
if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) (void)nco_cnk_sz_set(out_id,lmt_all_lst,nbr_dmn_fl,&cnk_map,&cnk_plc,cnk_sz_scl,cnk_dmn,cnk_nbr);
/* Turn-off default filling behavior to enhance efficiency */
nco_set_fill(out_id,NC_NOFILL,&fll_md_old);
/* Take output file out of define mode */
if(hdr_pad == 0UL){
(void)nco_enddef(out_id);
}else{
(void)nco__enddef(out_id,hdr_pad);
if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO Padding header with %lu extra bytes\n",nco_prg_nm_get(),(unsigned long)hdr_pad);
} /* hdr_pad */
#ifdef ENABLE_MPI
} /* prc_rnk != rnk_mgr */
/* Manager obtains output filename and broadcasts to workers */
if(prc_rnk == rnk_mgr) fl_nm_lng=(int)strlen(fl_out_tmp);
MPI_Bcast(&fl_nm_lng,1,MPI_INT,0,mpi_cmm);
if(prc_rnk != rnk_mgr) fl_out_tmp=(char *)nco_malloc((fl_nm_lng+1)*sizeof(char));
MPI_Bcast(fl_out_tmp,fl_nm_lng+1,MPI_CHAR,0,mpi_cmm);
if(prc_rnk == rnk_mgr){ /* MPI manager code */
TKN_WRT_FREE=False;
#endif /* !ENABLE_MPI */
/* Copy variable data for non-processed variables */
(void)nco_var_val_cpy(in_id,out_id,var_fix,nbr_var_fix);
#ifdef ENABLE_MPI
/* Close output file so workers can open it */
nco_close(out_id);
TKN_WRT_FREE=True;
} /* prc_rnk != rnk_mgr */
#endif /* !ENABLE_MPI */
/* Close first input netCDF file */
(void)nco_close(in_id);
/* Loop over input files */
for(fl_idx=0;fl_idx<fl_nbr;fl_idx++){
#ifdef ENABLE_MPI
MPI_Barrier(mpi_cmm);
#endif /* !ENABLE_MPI */
/* Parse filename */
if(fl_idx != 0) fl_in=nco_fl_nm_prs(fl_in,fl_idx,(int *)NULL,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth);
if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(fp_stderr,"\nInput file %d is %s; ",fl_idx,fl_in);
/* Make sure file is on local system and is readable or die trying */
if(fl_idx != 0) fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN);
if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(fp_stderr,"local file %s:\n",fl_in);
/* Open file once per thread to improve caching */
for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) rcd=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,in_id_arr+thr_idx);
#if 0
/* fxm: netCDF4: Change to independent variable reads? */
#ifdef ENABLE_NETCDF4
rcd=nco_open_par(fl_in,NC_MPIIO|NC_NETCDF4,mpi_cmm,mpi_nfo,&in_id);
#endif /* !ENABLE_NETCDF4 */
#endif /* !0 */
/* Perform various error-checks on input file */
if(False) (void)nco_fl_cmp_err_chk();
#ifdef ENABLE_MPI
if(prc_rnk == rnk_mgr){ /* MPI manager code */
/* Compensate for incrementing on each worker's first message */
var_wrt_nbr=-prc_nbr+1;
idx=0;
/* While variables remain to be processed or written... */
while(var_wrt_nbr < nbr_var_prc){
/* Receive message from any worker */
MPI_Recv(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,MPI_ANY_SOURCE,MPI_ANY_TAG,mpi_cmm,&mpi_stt);
/* Obtain MPI message tag type */
msg_tag_typ=mpi_stt.MPI_TAG;
/* Get sender's prc_rnk */
rnk_wrk=wrk_id_bfr[0];
/* Allocate next variable, if any, to worker */
if(msg_tag_typ == msg_tag_wrk_rqs){
var_wrt_nbr++; /* [nbr] Number of variables written */
/* Worker closed output file before sending msg_tag_wrk_rqs */
TKN_WRT_FREE=True;
if(idx > nbr_var_prc-1){
msg_bfr[0]=idx_all_wrk_ass; /* [enm] All variables already assigned */
msg_bfr[1]=out_id; /* Output file ID */
}else{
/* Tell requesting worker to allocate space for next variable */
msg_bfr[0]=idx; /* [idx] Variable to be processed */
msg_bfr[1]=out_id; /* Output file ID */
msg_bfr[2]=var_prc_out[idx]->id; /* [id] Variable ID in output file */
/* Point to next variable on list */
idx++;
} /* endif idx */
MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_wrk,msg_tag_wrk_rsp,mpi_cmm);
/* msg_tag_typ != msg_tag_wrk_rqs */
}else if(msg_tag_typ == msg_tag_tkn_wrt_rqs){
/* Allocate token if free, else ask worker to try later */
if(TKN_WRT_FREE){
TKN_WRT_FREE=False;
msg_bfr[0]=tkn_wrt_rqs_xcp; /* Accept request for write token */
}else{
msg_bfr[0]=tkn_wrt_rqs_dny; /* Deny request for write token */
} /* !TKN_WRT_FREE */
MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_wrk,msg_tag_tkn_wrt_rsp,mpi_cmm);
} /* msg_tag_typ != msg_tag_tkn_wrt_rqs */
} /* end while var_wrt_nbr < nbr_var_prc */
}else{ /* prc_rnk != rnk_mgr, end Manager code begin Worker code */
wrk_id_bfr[0]=prc_rnk;
while(1){ /* While work remains... */
/* Send msg_tag_wrk_rqs */
wrk_id_bfr[0]=prc_rnk;
MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_wrk_rqs,mpi_cmm);
/* Receive msg_tag_wrk_rsp */
MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,0,msg_tag_wrk_rsp,mpi_cmm,&mpi_stt);
idx=msg_bfr[0];
out_id=msg_bfr[1];
if(idx == idx_all_wrk_ass) break;
else{
var_prc_out[idx]->id=msg_bfr[2];
/* Process this variable same as UP code */
#else /* !ENABLE_MPI */
/* OpenMP with threading over variables, not files */
#ifdef _OPENMP
#pragma omp parallel for default(none) private(idx,in_id) shared(nco_dbg_lvl,fl_nbr,idx_rec_out,in_id_arr,nbr_var_prc,out_id,var_prc,var_prc_out,lmt_all_lst,nbr_dmn_fl,jdx)
#endif /* !_OPENMP */
/* Process all variables in current file */
for(idx=0;idx<nbr_var_prc;idx++){
#endif /* !ENABLE_MPI */
/* Common code for UP and MPI */ /* fxm: requires C99 as is? */
in_id=in_id_arr[omp_get_thread_num()];
if(nco_dbg_lvl >= nco_dbg_var) (void)fprintf(fp_stderr,"%s, ",var_prc[idx]->nm);
if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr);
/* Variables may have different ID, missing_value, type, in each file */
(void)nco_var_mtd_refresh(in_id,var_prc[idx]);
/* Retrieve variable from disk into memory */
/* NB: nco_var_get() with same nc_id contains OpenMP critical region */
(void)nco_var_get(in_id,var_prc[idx]);
/* Size of record dimension is 1 in output file */
var_prc_out[idx]->cnt[0]=1L;
var_prc_out[idx]->srt[0]=idx_rec_out;
#ifdef ENABLE_MPI
/* Obtain token and prepare to write */
while(1){ /* Send msg_tag_tkn_wrt_rqs repeatedly until token obtained */
wrk_id_bfr[0]=prc_rnk;
MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rqs,mpi_cmm);
MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rsp,mpi_cmm,&mpi_stt);
tkn_wrt_rsp=msg_bfr[0];
/* Wait then re-send request */
if(tkn_wrt_rsp == tkn_wrt_rqs_dny) sleep(tkn_wrt_rqs_ntv); else break;
} /* end while loop waiting for write token */
/* Worker has token---prepare to write */
if(tkn_wrt_rsp == tkn_wrt_rqs_xcp){
if(RAM_OPEN) md_open=NC_WRITE|NC_SHARE|NC_DISKLESS; else md_open=NC_WRITE|NC_SHARE;
rcd=nco_fl_open(fl_out_tmp,md_open,&bfr_sz_hnt,&out_id);
/* Set chunksize parameters */
if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) (void)nco_cnk_sz_set(out_id,lmt_all_lst,nbr_dmn_fl,&cnk_map,&cnk_plc,cnk_sz_scl,cnk_dmn,cnk_nbr);
/* Turn-off default filling behavior to enhance efficiency */
nco_set_fill(out_id,NC_NOFILL,&fll_md_old);
#else /* !ENABLE_MPI */
#ifdef _OPENMP
#pragma omp critical
#endif /* _OPENMP */
#endif /* !ENABLE_MPI */
{ /* begin OpenMP critical */
/* Write variable into current record in output file */
if(var_prc[idx]->nbr_dim == 0){
(void)nco_put_var1(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc[idx]->val.vp,var_prc[idx]->type);
}else{ /* end if variable is scalar */
(void)nco_put_vara(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc[idx]->val.vp,var_prc[idx]->type);
} /* end if variable is array */
/* Free current input buffer */
var_prc[idx]->val.vp=nco_free(var_prc[idx]->val.vp);
} /* end OpenMP critical */
#ifdef ENABLE_MPI
/* Close output file and increment written counter */
nco_close(out_id);
var_wrt_nbr++;
} /* endif tkn_wrt_rqs_xcp */
} /* end else !idx_all_wrk_ass */
} /* end while loop requesting work/token */
} /* endif Worker */
#else /* !ENABLE_MPI */
} /* end (OpenMP parallel for) loop over idx */
#endif /* !ENABLE_MPI */
idx_rec_out++; /* [idx] Index of current record in output file (0 is first, ...) */
if(nco_dbg_lvl > nco_dbg_scl) (void)fprintf(stderr,"\n");
/* Close input netCDF file */
for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) nco_close(in_id_arr[thr_idx]);
/* Remove local copy of file */
if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in);
#ifdef ENABLE_MPI
MPI_Barrier(mpi_cmm);
#endif /* !ENABLE_MPI */
} /* end loop over fl_idx */
#ifdef ENABLE_MPI
/* Manager moves output file (closed by workers) from temporary to permanent location */
if(prc_rnk == rnk_mgr) (void)nco_fl_mv(fl_out_tmp,fl_out);
#else /* !ENABLE_MPI */
/* Close output file and move it from temporary to permanent location */
(void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id);
#endif /* end !ENABLE_MPI */
/* Clean memory unless dirty memory allowed */
if(flg_mmr_cln){
/* ncecat-specific memory cleanup */
if(rec_dmn_nm) rec_dmn_nm=(char *)nco_free(rec_dmn_nm);
/* NB: free lmt[] is now referenced within lmt_all_lst[idx] */
for(idx=0;idx<nbr_dmn_fl;idx++)
for(jdx=0;jdx<lmt_all_lst[idx]->lmt_dmn_nbr;jdx++)
lmt_all_lst[idx]->lmt_dmn[jdx]=nco_lmt_free(lmt_all_lst[idx]->lmt_dmn[jdx]);
lmt=(lmt_sct**)nco_free(lmt);
if(nbr_dmn_fl > 0) lmt_all_lst=nco_lmt_all_lst_free(lmt_all_lst,nbr_dmn_fl);
/* NCO-generic clean-up */
/* Free individual strings/arrays */
if(cmd_ln) cmd_ln=(char *)nco_free(cmd_ln);
if(cnk_map_sng) cnk_map_sng=(char *)nco_free(cnk_map_sng);
if(cnk_plc_sng) cnk_plc_sng=(char *)nco_free(cnk_plc_sng);
if(fl_in) fl_in=(char *)nco_free(fl_in);
if(fl_out) fl_out=(char *)nco_free(fl_out);
if(fl_out_tmp) fl_out_tmp=(char *)nco_free(fl_out_tmp);
if(fl_pth) fl_pth=(char *)nco_free(fl_pth);
if(fl_pth_lcl) fl_pth_lcl=(char *)nco_free(fl_pth_lcl);
if(in_id_arr) in_id_arr=(int *)nco_free(in_id_arr);
/* Free lists of strings */
if(fl_lst_in && fl_lst_abb == NULL) fl_lst_in=nco_sng_lst_free(fl_lst_in,fl_nbr);
if(fl_lst_in && fl_lst_abb) fl_lst_in=nco_sng_lst_free(fl_lst_in,1);
if(fl_lst_abb) fl_lst_abb=nco_sng_lst_free(fl_lst_abb,abb_arg_nbr);
if(gaa_nbr > 0) gaa_arg=nco_sng_lst_free(gaa_arg,gaa_nbr);
if(var_lst_in_nbr > 0) var_lst_in=nco_sng_lst_free(var_lst_in,var_lst_in_nbr);
/* Free limits */
for(idx=0;idx<lmt_nbr;idx++) lmt_arg[idx]=(char *)nco_free(lmt_arg[idx]);
for(idx=0;idx<aux_nbr;idx++) aux_arg[idx]=(char *)nco_free(aux_arg[idx]);
if(aux_nbr > 0) aux=(lmt_sct **)nco_free(aux);
/* Free chunking information */
for(idx=0;idx<cnk_nbr;idx++) cnk_arg[idx]=(char *)nco_free(cnk_arg[idx]);
if(cnk_nbr > 0) cnk_dmn=nco_cnk_lst_free(cnk_dmn,cnk_nbr);
/* Free dimension lists */
if(nbr_dmn_xtr > 0) dim=nco_dmn_lst_free(dim,nbr_dmn_xtr-1); /* NB: ncecat has one fewer input than output dimension */
if(nbr_dmn_xtr > 0) dmn_out=nco_dmn_lst_free(dmn_out,nbr_dmn_xtr);
/* Free variable lists */
if(xtr_nbr > 0) var=nco_var_lst_free(var,xtr_nbr);
if(xtr_nbr > 0) var_out=nco_var_lst_free(var_out,xtr_nbr);
var_prc=(var_sct **)nco_free(var_prc);
var_prc_out=(var_sct **)nco_free(var_prc_out);
var_fix=(var_sct **)nco_free(var_fix);
var_fix_out=(var_sct **)nco_free(var_fix_out);
} /* !flg_mmr_cln */
#ifdef ENABLE_MPI
MPI_Finalize();
#endif /* !ENABLE_MPI */
if(rcd != NC_NOERR) nco_err_exit(rcd,"main");
nco_exit_gracefully();
return EXIT_SUCCESS;
} /* end main() */
|
util.c |
#include "kt.h"
int32_t max_elem(
int32_t const * const restrict array,
int64_t N)
{
assert(N > 0);
int32_t max_val = array[0];
#pragma omp parallel for schedule(static) reduction(max: max_val)
for(int64_t i=0; i < N; ++i) {
max_val = gk_max(max_val, array[i]);
}
return max_val;
}
ssize_t graph_max_degree(
gk_graph_t const * const G)
{
ssize_t degree = 0;
#pragma omp parallel for schedule(static) reduction(max: degree)
for(int32_t v=0; v < G->nvtxs; ++v) {
degree = gk_max(degree, G->xadj[v+1] - G->xadj[v]);
}
return degree;
}
gk_graph_t * transpose_graph(
gk_graph_t const * const G)
{
int32_t const nvtxs = G->nvtxs;
int64_t const nedges = G->xadj[G->nvtxs];
gk_graph_t * T = gk_graph_Create();
T->nvtxs = nvtxs;
T->xadj = gk_zmalloc(nvtxs+1, "T->xadj");
T->adjncy = gk_i32malloc(nedges, "T->adjncy");
/* get counts for xadj */
par_memset(T->xadj, 0, (nvtxs+1) * sizeof(*T->xadj));
#pragma omp parallel for schedule(static)
for(int64_t e=0; e < nedges; ++e) {
#pragma omp atomic
T->xadj[G->adjncy[e]]++;
}
{
int32_t v;
MAKECSR(v, nvtxs, T->xadj);
}
/* fill in data */
#pragma omp parallel
for(int32_t v=0; v < nvtxs; ++v) {
/* parallel because there should be no multi edges, thus each thread will
* have unique u vertices */
#pragma omp for schedule(static)
for(ssize_t e = G->xadj[v]; e < G->xadj[v+1]; ++e) {
int32_t const u = G->adjncy[e];
T->adjncy[T->xadj[u]++] = v;
}
}
{
int32_t v;
SHIFTCSR(v, nvtxs, T->xadj);
}
#ifndef NDEBUG
/* make sure each adjncy is sorted */
for(int32_t v=0; v < nvtxs; ++v) {
for(ssize_t e = T->xadj[v]+1; e < T->xadj[v+1]; ++e) {
assert(T->adjncy[e] > T->adjncy[e-1]);
}
}
#endif
return T;
}
void graph_add_island(
gk_graph_t * graph)
{
int32_t const nvtxs = graph->nvtxs + 1;
int32_t const nedges = graph->xadj[nvtxs-1];
ssize_t * xadj = gk_zmalloc(nvtxs + 1, "island->xadj");
/* copy data */
xadj[0] = 0;
for(int32_t v=0; v < nvtxs; ++v) {
xadj[1+v] = graph->xadj[v];
}
/* shift adjncy values by 1 */
#pragma omp parallel for schedule(static)
for(ssize_t e=0; e < nedges; ++e) {
++(graph->adjncy[e]);
}
graph->nvtxs++;
gk_free((void **) graph->xadj, LTERM);
graph->xadj = xadj;
}
void * par_memset(
void * const s,
int const c,
size_t const bytes)
{
#pragma omp parallel
{
int const nthreads = omp_get_num_threads();
int const tid = omp_get_thread_num();
/* block distribution */
size_t const n_per_thread = (bytes + nthreads - 1)/nthreads;
size_t const n_begin = gk_min(n_per_thread * tid, bytes);
size_t const n_end = gk_min(n_begin + n_per_thread, bytes);
char * const data = (char *) s;
memset(data + n_begin, c, n_end - n_begin);
}
return s;
}
void par_memcpy(
void * const dest,
void const * const src,
size_t const bytes)
{
#pragma omp parallel
{
int const nthreads = omp_get_num_threads();
int const tid = omp_get_thread_num();
/* block distribution */
size_t const n_per_thread = (bytes + nthreads - 1)/nthreads;
size_t const n_begin = gk_min(n_per_thread * tid, bytes);
size_t const n_end = gk_min(n_begin + n_per_thread, bytes);
char * const restrict dest_data = (char *) dest;
char * const restrict src_data = (char *) src;
memcpy(dest_data + n_begin, src_data + n_begin, n_end - n_begin);
}
}
int64_t count_nnz(
int64_t const N,
int32_t const * const supports)
{
int64_t nnz = 0;
#pragma omp parallel for schedule(static) reduction(+:nnz)
for(int64_t e = 0; e < N; ++e) {
if(supports[e] > 0) {
++nnz;
}
}
return nnz;
}
int64_t count_support(
int64_t const N,
int32_t const * const supports)
{
int64_t total_support = 0;
#pragma omp parallel for schedule(static) reduction(+:total_support)
for(int64_t e = 0; e < N; ++e) {
if(supports[e] > 0) {
total_support += supports[e];
}
}
return total_support;
}
void sort_triangles(
triangle_t * const restrict tris,
int32_t ntriangles)
{
double sort_timer = 0;
gk_startwctimer(sort_timer);
/* tie breakers are u -> v -> w */
#define tri_lt(a,b) ( \
((a)->u < (b)->u) \
|| \
(((a)->u == (b)->u) && ((a)->v < (b)->v)) \
|| \
(((a)->u == (b)->u) && ((a)->v == (b)->v) && ((a)->w < (b)->w)) \
)
GK_MKQSORT(triangle_t, tris, ntriangles, tri_lt);
#undef tri_lt
gk_stopwctimer(sort_timer);
#if 0
printf("SORTING-SIZE: %d (%0.3fs)\n", ntriangles, sort_timer);
#endif
}
void gk_epairsorti(size_t n, epair_t * base)
{
#define idxkey_lt(a, b) ((a)->key < (b)->key)
GK_MKQSORT(epair_t, base, n, idxkey_lt);
#undef idxkey_lt
}
|
convolutiondepthwise_3x3_pack16.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_pack16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
__m512 _bias0 = bias ? _mm512_loadu_ps((const float*)bias + g * 16) : _mm512_setzero_ps();
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
float* outptr1 = out.row(1);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
__m512 _k00 = _mm512_load_ps(k0);
__m512 _k01 = _mm512_load_ps(k0 + 16);
__m512 _k02 = _mm512_load_ps(k0 + 32);
__m512 _k10 = _mm512_load_ps(k0 + 48);
__m512 _k11 = _mm512_load_ps(k0 + 64);
__m512 _k12 = _mm512_load_ps(k0 + 80);
__m512 _k20 = _mm512_load_ps(k0 + 96);
__m512 _k21 = _mm512_load_ps(k0 + 112);
__m512 _k22 = _mm512_load_ps(k0 + 128);
int i = 0;
for (; i + 1 < outh; i += 2)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
__m512 _sum00 = _bias0;
__m512 _sum01 = _bias0;
__m512 _sum02 = _bias0;
__m512 _sum03 = _bias0;
__m512 _sum10 = _bias0;
__m512 _sum11 = _bias0;
__m512 _sum12 = _bias0;
__m512 _sum13 = _bias0;
__m512 _r00 = _mm512_load_ps(r0);
__m512 _r01 = _mm512_load_ps(r0 + 16);
__m512 _r02 = _mm512_load_ps(r0 + 32);
__m512 _r03 = _mm512_load_ps(r0 + 48);
__m512 _r04 = _mm512_load_ps(r0 + 64);
__m512 _r05 = _mm512_load_ps(r0 + 80);
_sum00 = _mm512_fmadd_ps(_k00, _r00, _sum00);
_sum01 = _mm512_fmadd_ps(_k00, _r01, _sum01);
_sum02 = _mm512_fmadd_ps(_k00, _r02, _sum02);
_sum03 = _mm512_fmadd_ps(_k00, _r03, _sum03);
_sum00 = _mm512_fmadd_ps(_k01, _r01, _sum00);
_sum01 = _mm512_fmadd_ps(_k01, _r02, _sum01);
_sum02 = _mm512_fmadd_ps(_k01, _r03, _sum02);
_sum03 = _mm512_fmadd_ps(_k01, _r04, _sum03);
_sum00 = _mm512_fmadd_ps(_k02, _r02, _sum00);
_sum01 = _mm512_fmadd_ps(_k02, _r03, _sum01);
_sum02 = _mm512_fmadd_ps(_k02, _r04, _sum02);
_sum03 = _mm512_fmadd_ps(_k02, _r05, _sum03);
__m512 _r10 = _mm512_load_ps(r1);
__m512 _r11 = _mm512_load_ps(r1 + 16);
__m512 _r12 = _mm512_load_ps(r1 + 32);
__m512 _r13 = _mm512_load_ps(r1 + 48);
__m512 _r14 = _mm512_load_ps(r1 + 64);
__m512 _r15 = _mm512_load_ps(r1 + 80);
_sum10 = _mm512_fmadd_ps(_k00, _r10, _sum10);
_sum11 = _mm512_fmadd_ps(_k00, _r11, _sum11);
_sum12 = _mm512_fmadd_ps(_k00, _r12, _sum12);
_sum13 = _mm512_fmadd_ps(_k00, _r13, _sum13);
_sum00 = _mm512_fmadd_ps(_k10, _r10, _sum00);
_sum01 = _mm512_fmadd_ps(_k10, _r11, _sum01);
_sum02 = _mm512_fmadd_ps(_k10, _r12, _sum02);
_sum03 = _mm512_fmadd_ps(_k10, _r13, _sum03);
_sum10 = _mm512_fmadd_ps(_k01, _r11, _sum10);
_sum11 = _mm512_fmadd_ps(_k01, _r12, _sum11);
_sum12 = _mm512_fmadd_ps(_k01, _r13, _sum12);
_sum13 = _mm512_fmadd_ps(_k01, _r14, _sum13);
_sum00 = _mm512_fmadd_ps(_k11, _r11, _sum00);
_sum01 = _mm512_fmadd_ps(_k11, _r12, _sum01);
_sum02 = _mm512_fmadd_ps(_k11, _r13, _sum02);
_sum03 = _mm512_fmadd_ps(_k11, _r14, _sum03);
_sum10 = _mm512_fmadd_ps(_k02, _r12, _sum10);
_sum11 = _mm512_fmadd_ps(_k02, _r13, _sum11);
_sum12 = _mm512_fmadd_ps(_k02, _r14, _sum12);
_sum13 = _mm512_fmadd_ps(_k02, _r15, _sum13);
_sum00 = _mm512_fmadd_ps(_k12, _r12, _sum00);
_sum01 = _mm512_fmadd_ps(_k12, _r13, _sum01);
_sum02 = _mm512_fmadd_ps(_k12, _r14, _sum02);
_sum03 = _mm512_fmadd_ps(_k12, _r15, _sum03);
__m512 _r20 = _mm512_load_ps(r2);
__m512 _r21 = _mm512_load_ps(r2 + 16);
__m512 _r22 = _mm512_load_ps(r2 + 32);
__m512 _r23 = _mm512_load_ps(r2 + 48);
__m512 _r24 = _mm512_load_ps(r2 + 64);
__m512 _r25 = _mm512_load_ps(r2 + 80);
_sum10 = _mm512_fmadd_ps(_k10, _r20, _sum10);
_sum11 = _mm512_fmadd_ps(_k10, _r21, _sum11);
_sum12 = _mm512_fmadd_ps(_k10, _r22, _sum12);
_sum13 = _mm512_fmadd_ps(_k10, _r23, _sum13);
_sum00 = _mm512_fmadd_ps(_k20, _r20, _sum00);
_sum01 = _mm512_fmadd_ps(_k20, _r21, _sum01);
_sum02 = _mm512_fmadd_ps(_k20, _r22, _sum02);
_sum03 = _mm512_fmadd_ps(_k20, _r23, _sum03);
_sum10 = _mm512_fmadd_ps(_k11, _r21, _sum10);
_sum11 = _mm512_fmadd_ps(_k11, _r22, _sum11);
_sum12 = _mm512_fmadd_ps(_k11, _r23, _sum12);
_sum13 = _mm512_fmadd_ps(_k11, _r24, _sum13);
_sum00 = _mm512_fmadd_ps(_k21, _r21, _sum00);
_sum01 = _mm512_fmadd_ps(_k21, _r22, _sum01);
_sum02 = _mm512_fmadd_ps(_k21, _r23, _sum02);
_sum03 = _mm512_fmadd_ps(_k21, _r24, _sum03);
_sum10 = _mm512_fmadd_ps(_k12, _r22, _sum10);
_sum11 = _mm512_fmadd_ps(_k12, _r23, _sum11);
_sum12 = _mm512_fmadd_ps(_k12, _r24, _sum12);
_sum13 = _mm512_fmadd_ps(_k12, _r25, _sum13);
_sum00 = _mm512_fmadd_ps(_k22, _r22, _sum00);
_sum01 = _mm512_fmadd_ps(_k22, _r23, _sum01);
_sum02 = _mm512_fmadd_ps(_k22, _r24, _sum02);
_sum03 = _mm512_fmadd_ps(_k22, _r25, _sum03);
__m512 _r30 = _mm512_load_ps(r3);
__m512 _r31 = _mm512_load_ps(r3 + 16);
__m512 _r32 = _mm512_load_ps(r3 + 32);
__m512 _r33 = _mm512_load_ps(r3 + 48);
__m512 _r34 = _mm512_load_ps(r3 + 64);
__m512 _r35 = _mm512_load_ps(r3 + 80);
_sum10 = _mm512_fmadd_ps(_k20, _r30, _sum10);
_sum11 = _mm512_fmadd_ps(_k20, _r31, _sum11);
_sum12 = _mm512_fmadd_ps(_k20, _r32, _sum12);
_sum13 = _mm512_fmadd_ps(_k20, _r33, _sum13);
_sum10 = _mm512_fmadd_ps(_k21, _r31, _sum10);
_sum11 = _mm512_fmadd_ps(_k21, _r32, _sum11);
_sum12 = _mm512_fmadd_ps(_k21, _r33, _sum12);
_sum13 = _mm512_fmadd_ps(_k21, _r34, _sum13);
_sum10 = _mm512_fmadd_ps(_k22, _r32, _sum10);
_sum11 = _mm512_fmadd_ps(_k22, _r33, _sum11);
_sum12 = _mm512_fmadd_ps(_k22, _r34, _sum12);
_sum13 = _mm512_fmadd_ps(_k22, _r35, _sum13);
_mm512_store_ps(outptr0, _sum00);
_mm512_store_ps(outptr0 + 16, _sum01);
_mm512_store_ps(outptr0 + 32, _sum02);
_mm512_store_ps(outptr0 + 48, _sum03);
_mm512_store_ps(outptr1, _sum10);
_mm512_store_ps(outptr1 + 16, _sum11);
_mm512_store_ps(outptr1 + 32, _sum12);
_mm512_store_ps(outptr1 + 48, _sum13);
r0 += 64;
r1 += 64;
r2 += 64;
r3 += 64;
outptr0 += 64;
outptr1 += 64;
}
for (; j + 1 < outw; j += 2)
{
__m512 _sum00 = _bias0;
__m512 _sum01 = _bias0;
__m512 _sum10 = _bias0;
__m512 _sum11 = _bias0;
__m512 _r00 = _mm512_load_ps(r0);
__m512 _r01 = _mm512_load_ps(r0 + 16);
__m512 _r02 = _mm512_load_ps(r0 + 32);
__m512 _r03 = _mm512_load_ps(r0 + 48);
_sum00 = _mm512_fmadd_ps(_k00, _r00, _sum00);
_sum01 = _mm512_fmadd_ps(_k00, _r01, _sum01);
_sum00 = _mm512_fmadd_ps(_k01, _r01, _sum00);
_sum01 = _mm512_fmadd_ps(_k01, _r02, _sum01);
_sum00 = _mm512_fmadd_ps(_k02, _r02, _sum00);
_sum01 = _mm512_fmadd_ps(_k02, _r03, _sum01);
__m512 _r10 = _mm512_load_ps(r1);
__m512 _r11 = _mm512_load_ps(r1 + 16);
__m512 _r12 = _mm512_load_ps(r1 + 32);
__m512 _r13 = _mm512_load_ps(r1 + 48);
_sum00 = _mm512_fmadd_ps(_k10, _r10, _sum00);
_sum01 = _mm512_fmadd_ps(_k10, _r11, _sum01);
_sum10 = _mm512_fmadd_ps(_k00, _r10, _sum10);
_sum11 = _mm512_fmadd_ps(_k00, _r11, _sum11);
_sum00 = _mm512_fmadd_ps(_k11, _r11, _sum00);
_sum01 = _mm512_fmadd_ps(_k11, _r12, _sum01);
_sum10 = _mm512_fmadd_ps(_k01, _r11, _sum10);
_sum11 = _mm512_fmadd_ps(_k01, _r12, _sum11);
_sum00 = _mm512_fmadd_ps(_k12, _r12, _sum00);
_sum01 = _mm512_fmadd_ps(_k12, _r13, _sum01);
_sum10 = _mm512_fmadd_ps(_k02, _r12, _sum10);
_sum11 = _mm512_fmadd_ps(_k02, _r13, _sum11);
__m512 _r20 = _mm512_load_ps(r2);
__m512 _r21 = _mm512_load_ps(r2 + 16);
__m512 _r22 = _mm512_load_ps(r2 + 32);
__m512 _r23 = _mm512_load_ps(r2 + 48);
_sum00 = _mm512_fmadd_ps(_k20, _r20, _sum00);
_sum01 = _mm512_fmadd_ps(_k20, _r21, _sum01);
_sum10 = _mm512_fmadd_ps(_k10, _r20, _sum10);
_sum11 = _mm512_fmadd_ps(_k10, _r21, _sum11);
_sum00 = _mm512_fmadd_ps(_k21, _r21, _sum00);
_sum01 = _mm512_fmadd_ps(_k21, _r22, _sum01);
_sum10 = _mm512_fmadd_ps(_k11, _r21, _sum10);
_sum11 = _mm512_fmadd_ps(_k11, _r22, _sum11);
_sum00 = _mm512_fmadd_ps(_k22, _r22, _sum00);
_sum01 = _mm512_fmadd_ps(_k22, _r23, _sum01);
_sum10 = _mm512_fmadd_ps(_k12, _r22, _sum10);
_sum11 = _mm512_fmadd_ps(_k12, _r23, _sum11);
__m512 _r30 = _mm512_load_ps(r3);
__m512 _r31 = _mm512_load_ps(r3 + 16);
__m512 _r32 = _mm512_load_ps(r3 + 32);
__m512 _r33 = _mm512_load_ps(r3 + 48);
_sum10 = _mm512_fmadd_ps(_k20, _r30, _sum10);
_sum11 = _mm512_fmadd_ps(_k20, _r31, _sum11);
_sum10 = _mm512_fmadd_ps(_k21, _r31, _sum10);
_sum11 = _mm512_fmadd_ps(_k21, _r32, _sum11);
_sum10 = _mm512_fmadd_ps(_k22, _r32, _sum10);
_sum11 = _mm512_fmadd_ps(_k22, _r33, _sum11);
_mm512_store_ps(outptr0, _sum00);
_mm512_store_ps(outptr0 + 16, _sum01);
_mm512_store_ps(outptr1, _sum10);
_mm512_store_ps(outptr1 + 16, _sum11);
r0 += 32;
r1 += 32;
r2 += 32;
r3 += 32;
outptr0 += 32;
outptr1 += 32;
}
for (; j < outw; j++)
{
__m512 _sum0 = _bias0;
__m512 _sum1 = _bias0;
__m512 _r00 = _mm512_load_ps(r0);
__m512 _r01 = _mm512_load_ps(r0 + 16);
__m512 _r02 = _mm512_load_ps(r0 + 32);
_sum0 = _mm512_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm512_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm512_fmadd_ps(_k02, _r02, _sum0);
__m512 _r10 = _mm512_load_ps(r1);
__m512 _r11 = _mm512_load_ps(r1 + 16);
__m512 _r12 = _mm512_load_ps(r1 + 32);
_sum0 = _mm512_fmadd_ps(_k10, _r10, _sum0);
_sum1 = _mm512_fmadd_ps(_k00, _r10, _sum1);
_sum0 = _mm512_fmadd_ps(_k11, _r11, _sum0);
_sum1 = _mm512_fmadd_ps(_k01, _r11, _sum1);
_sum0 = _mm512_fmadd_ps(_k12, _r12, _sum0);
_sum1 = _mm512_fmadd_ps(_k02, _r12, _sum1);
__m512 _r20 = _mm512_load_ps(r2);
__m512 _r21 = _mm512_load_ps(r2 + 16);
__m512 _r22 = _mm512_load_ps(r2 + 32);
_sum0 = _mm512_fmadd_ps(_k20, _r20, _sum0);
_sum1 = _mm512_fmadd_ps(_k10, _r20, _sum1);
_sum0 = _mm512_fmadd_ps(_k21, _r21, _sum0);
_sum1 = _mm512_fmadd_ps(_k11, _r21, _sum1);
_sum0 = _mm512_fmadd_ps(_k22, _r22, _sum0);
_sum1 = _mm512_fmadd_ps(_k12, _r22, _sum1);
__m512 _r30 = _mm512_load_ps(r3);
__m512 _r31 = _mm512_load_ps(r3 + 16);
__m512 _r32 = _mm512_load_ps(r3 + 32);
_sum1 = _mm512_fmadd_ps(_k20, _r30, _sum1);
_sum1 = _mm512_fmadd_ps(_k21, _r31, _sum1);
_sum1 = _mm512_fmadd_ps(_k22, _r32, _sum1);
_mm512_store_ps(outptr0, _sum0);
_mm512_store_ps(outptr1, _sum1);
r0 += 16;
r1 += 16;
r2 += 16;
r3 += 16;
outptr0 += 16;
outptr1 += 16;
}
r0 += 2 * 16 + w * 16;
r1 += 2 * 16 + w * 16;
r2 += 2 * 16 + w * 16;
r3 += 2 * 16 + w * 16;
outptr0 += outw * 16;
outptr1 += outw * 16;
}
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
__m512 _sum0 = _bias0;
__m512 _sum1 = _bias0;
__m512 _sum2 = _bias0;
__m512 _sum3 = _bias0;
__m512 _r00 = _mm512_load_ps(r0);
__m512 _r01 = _mm512_load_ps(r0 + 16);
__m512 _r02 = _mm512_load_ps(r0 + 32);
__m512 _r03 = _mm512_load_ps(r0 + 48);
__m512 _r04 = _mm512_load_ps(r0 + 64);
__m512 _r05 = _mm512_load_ps(r0 + 80);
_sum0 = _mm512_fmadd_ps(_k00, _r00, _sum0);
_sum1 = _mm512_fmadd_ps(_k00, _r01, _sum1);
_sum2 = _mm512_fmadd_ps(_k00, _r02, _sum2);
_sum3 = _mm512_fmadd_ps(_k00, _r03, _sum3);
_sum0 = _mm512_fmadd_ps(_k01, _r01, _sum0);
_sum1 = _mm512_fmadd_ps(_k01, _r02, _sum1);
_sum2 = _mm512_fmadd_ps(_k01, _r03, _sum2);
_sum3 = _mm512_fmadd_ps(_k01, _r04, _sum3);
_sum0 = _mm512_fmadd_ps(_k02, _r02, _sum0);
_sum1 = _mm512_fmadd_ps(_k02, _r03, _sum1);
_sum2 = _mm512_fmadd_ps(_k02, _r04, _sum2);
_sum3 = _mm512_fmadd_ps(_k02, _r05, _sum3);
__m512 _r10 = _mm512_load_ps(r1);
__m512 _r11 = _mm512_load_ps(r1 + 16);
__m512 _r12 = _mm512_load_ps(r1 + 32);
__m512 _r13 = _mm512_load_ps(r1 + 48);
__m512 _r14 = _mm512_load_ps(r1 + 64);
__m512 _r15 = _mm512_load_ps(r1 + 80);
_sum0 = _mm512_fmadd_ps(_k10, _r10, _sum0);
_sum1 = _mm512_fmadd_ps(_k10, _r11, _sum1);
_sum2 = _mm512_fmadd_ps(_k10, _r12, _sum2);
_sum3 = _mm512_fmadd_ps(_k10, _r13, _sum3);
_sum0 = _mm512_fmadd_ps(_k11, _r11, _sum0);
_sum1 = _mm512_fmadd_ps(_k11, _r12, _sum1);
_sum2 = _mm512_fmadd_ps(_k11, _r13, _sum2);
_sum3 = _mm512_fmadd_ps(_k11, _r14, _sum3);
_sum0 = _mm512_fmadd_ps(_k12, _r12, _sum0);
_sum1 = _mm512_fmadd_ps(_k12, _r13, _sum1);
_sum2 = _mm512_fmadd_ps(_k12, _r14, _sum2);
_sum3 = _mm512_fmadd_ps(_k12, _r15, _sum3);
__m512 _r20 = _mm512_load_ps(r2);
__m512 _r21 = _mm512_load_ps(r2 + 16);
__m512 _r22 = _mm512_load_ps(r2 + 32);
__m512 _r23 = _mm512_load_ps(r2 + 48);
__m512 _r24 = _mm512_load_ps(r2 + 64);
__m512 _r25 = _mm512_load_ps(r2 + 80);
_sum0 = _mm512_fmadd_ps(_k20, _r20, _sum0);
_sum1 = _mm512_fmadd_ps(_k20, _r21, _sum1);
_sum2 = _mm512_fmadd_ps(_k20, _r22, _sum2);
_sum3 = _mm512_fmadd_ps(_k20, _r23, _sum3);
_sum0 = _mm512_fmadd_ps(_k21, _r21, _sum0);
_sum1 = _mm512_fmadd_ps(_k21, _r22, _sum1);
_sum2 = _mm512_fmadd_ps(_k21, _r23, _sum2);
_sum3 = _mm512_fmadd_ps(_k21, _r24, _sum3);
_sum0 = _mm512_fmadd_ps(_k22, _r22, _sum0);
_sum1 = _mm512_fmadd_ps(_k22, _r23, _sum1);
_sum2 = _mm512_fmadd_ps(_k22, _r24, _sum2);
_sum3 = _mm512_fmadd_ps(_k22, _r25, _sum3);
_mm512_store_ps(outptr0, _sum0);
_mm512_store_ps(outptr0 + 16, _sum1);
_mm512_store_ps(outptr0 + 32, _sum2);
_mm512_store_ps(outptr0 + 48, _sum3);
r0 += 64;
r1 += 64;
r2 += 64;
outptr0 += 64;
}
for (; j + 1 < outw; j += 2)
{
__m512 _sum0 = _bias0;
__m512 _sum1 = _bias0;
__m512 _r00 = _mm512_load_ps(r0);
__m512 _r01 = _mm512_load_ps(r0 + 16);
__m512 _r02 = _mm512_load_ps(r0 + 32);
__m512 _r03 = _mm512_load_ps(r0 + 48);
_sum0 = _mm512_fmadd_ps(_k00, _r00, _sum0);
_sum1 = _mm512_fmadd_ps(_k00, _r01, _sum1);
_sum0 = _mm512_fmadd_ps(_k01, _r01, _sum0);
_sum1 = _mm512_fmadd_ps(_k01, _r02, _sum1);
_sum0 = _mm512_fmadd_ps(_k02, _r02, _sum0);
_sum1 = _mm512_fmadd_ps(_k02, _r03, _sum1);
__m512 _r10 = _mm512_load_ps(r1);
__m512 _r11 = _mm512_load_ps(r1 + 16);
__m512 _r12 = _mm512_load_ps(r1 + 32);
__m512 _r13 = _mm512_load_ps(r1 + 48);
_sum0 = _mm512_fmadd_ps(_k10, _r10, _sum0);
_sum1 = _mm512_fmadd_ps(_k10, _r11, _sum1);
_sum0 = _mm512_fmadd_ps(_k11, _r11, _sum0);
_sum1 = _mm512_fmadd_ps(_k11, _r12, _sum1);
_sum0 = _mm512_fmadd_ps(_k12, _r12, _sum0);
_sum1 = _mm512_fmadd_ps(_k12, _r13, _sum1);
__m512 _r20 = _mm512_load_ps(r2);
__m512 _r21 = _mm512_load_ps(r2 + 16);
__m512 _r22 = _mm512_load_ps(r2 + 32);
__m512 _r23 = _mm512_load_ps(r2 + 48);
_sum0 = _mm512_fmadd_ps(_k20, _r20, _sum0);
_sum1 = _mm512_fmadd_ps(_k20, _r21, _sum1);
_sum0 = _mm512_fmadd_ps(_k21, _r21, _sum0);
_sum1 = _mm512_fmadd_ps(_k21, _r22, _sum1);
_sum0 = _mm512_fmadd_ps(_k22, _r22, _sum0);
_sum1 = _mm512_fmadd_ps(_k22, _r23, _sum1);
_mm512_store_ps(outptr0, _sum0);
_mm512_store_ps(outptr0 + 16, _sum1);
r0 += 32;
r1 += 32;
r2 += 32;
outptr0 += 32;
}
for (; j < outw; j++)
{
__m512 _sum0 = _bias0;
__m512 _r00 = _mm512_load_ps(r0);
__m512 _r01 = _mm512_load_ps(r0 + 16);
__m512 _r02 = _mm512_load_ps(r0 + 32);
_sum0 = _mm512_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm512_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm512_fmadd_ps(_k02, _r02, _sum0);
__m512 _r10 = _mm512_load_ps(r1);
__m512 _r11 = _mm512_load_ps(r1 + 16);
__m512 _r12 = _mm512_load_ps(r1 + 32);
_sum0 = _mm512_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm512_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm512_fmadd_ps(_k12, _r12, _sum0);
__m512 _r20 = _mm512_load_ps(r2);
__m512 _r21 = _mm512_load_ps(r2 + 16);
__m512 _r22 = _mm512_load_ps(r2 + 32);
_sum0 = _mm512_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm512_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm512_fmadd_ps(_k22, _r22, _sum0);
_mm512_store_ps(outptr0, _sum0);
r0 += 16;
r1 += 16;
r2 += 16;
outptr0 += 16;
}
r0 += 2 * 16;
r1 += 2 * 16;
r2 += 2 * 16;
}
}
}
static void convdw3x3s2_pack16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * 16;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
__m512 _bias0 = bias ? _mm512_loadu_ps((const float*)bias + g * 16) : _mm512_setzero_ps();
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
__m512 _k00 = _mm512_load_ps(k0);
__m512 _k01 = _mm512_load_ps(k0 + 16);
__m512 _k02 = _mm512_load_ps(k0 + 32);
__m512 _k10 = _mm512_load_ps(k0 + 48);
__m512 _k11 = _mm512_load_ps(k0 + 64);
__m512 _k12 = _mm512_load_ps(k0 + 80);
__m512 _k20 = _mm512_load_ps(k0 + 96);
__m512 _k21 = _mm512_load_ps(k0 + 112);
__m512 _k22 = _mm512_load_ps(k0 + 128);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
__m512 _sum0 = _bias0;
__m512 _sum1 = _bias0;
__m512 _sum2 = _bias0;
__m512 _sum3 = _bias0;
__m512 _r00 = _mm512_load_ps(r0);
__m512 _r01 = _mm512_load_ps(r0 + 16);
__m512 _r02 = _mm512_load_ps(r0 + 32);
__m512 _r03 = _mm512_load_ps(r0 + 48);
__m512 _r04 = _mm512_load_ps(r0 + 64);
__m512 _r05 = _mm512_load_ps(r0 + 80);
__m512 _r06 = _mm512_load_ps(r0 + 96);
__m512 _r07 = _mm512_load_ps(r0 + 112);
__m512 _r08 = _mm512_load_ps(r0 + 128);
_sum0 = _mm512_fmadd_ps(_k00, _r00, _sum0);
_sum1 = _mm512_fmadd_ps(_k00, _r02, _sum1);
_sum2 = _mm512_fmadd_ps(_k00, _r04, _sum2);
_sum3 = _mm512_fmadd_ps(_k00, _r06, _sum3);
_sum0 = _mm512_fmadd_ps(_k01, _r01, _sum0);
_sum1 = _mm512_fmadd_ps(_k01, _r03, _sum1);
_sum2 = _mm512_fmadd_ps(_k01, _r05, _sum2);
_sum3 = _mm512_fmadd_ps(_k01, _r07, _sum3);
_sum0 = _mm512_fmadd_ps(_k02, _r02, _sum0);
_sum1 = _mm512_fmadd_ps(_k02, _r04, _sum1);
_sum2 = _mm512_fmadd_ps(_k02, _r06, _sum2);
_sum3 = _mm512_fmadd_ps(_k02, _r08, _sum3);
__m512 _r10 = _mm512_load_ps(r1);
__m512 _r11 = _mm512_load_ps(r1 + 16);
__m512 _r12 = _mm512_load_ps(r1 + 32);
__m512 _r13 = _mm512_load_ps(r1 + 48);
__m512 _r14 = _mm512_load_ps(r1 + 64);
__m512 _r15 = _mm512_load_ps(r1 + 80);
__m512 _r16 = _mm512_load_ps(r1 + 96);
__m512 _r17 = _mm512_load_ps(r1 + 112);
__m512 _r18 = _mm512_load_ps(r1 + 128);
_sum0 = _mm512_fmadd_ps(_k10, _r10, _sum0);
_sum1 = _mm512_fmadd_ps(_k10, _r12, _sum1);
_sum2 = _mm512_fmadd_ps(_k10, _r14, _sum2);
_sum3 = _mm512_fmadd_ps(_k10, _r16, _sum3);
_sum0 = _mm512_fmadd_ps(_k11, _r11, _sum0);
_sum1 = _mm512_fmadd_ps(_k11, _r13, _sum1);
_sum2 = _mm512_fmadd_ps(_k11, _r15, _sum2);
_sum3 = _mm512_fmadd_ps(_k11, _r17, _sum3);
_sum0 = _mm512_fmadd_ps(_k12, _r12, _sum0);
_sum1 = _mm512_fmadd_ps(_k12, _r14, _sum1);
_sum2 = _mm512_fmadd_ps(_k12, _r16, _sum2);
_sum3 = _mm512_fmadd_ps(_k12, _r18, _sum3);
__m512 _r20 = _mm512_load_ps(r2);
__m512 _r21 = _mm512_load_ps(r2 + 16);
__m512 _r22 = _mm512_load_ps(r2 + 32);
__m512 _r23 = _mm512_load_ps(r2 + 48);
__m512 _r24 = _mm512_load_ps(r2 + 64);
__m512 _r25 = _mm512_load_ps(r2 + 80);
__m512 _r26 = _mm512_load_ps(r2 + 96);
__m512 _r27 = _mm512_load_ps(r2 + 112);
__m512 _r28 = _mm512_load_ps(r2 + 128);
_sum0 = _mm512_fmadd_ps(_k20, _r20, _sum0);
_sum1 = _mm512_fmadd_ps(_k20, _r22, _sum1);
_sum2 = _mm512_fmadd_ps(_k20, _r24, _sum2);
_sum3 = _mm512_fmadd_ps(_k20, _r26, _sum3);
_sum0 = _mm512_fmadd_ps(_k21, _r21, _sum0);
_sum1 = _mm512_fmadd_ps(_k21, _r23, _sum1);
_sum2 = _mm512_fmadd_ps(_k21, _r25, _sum2);
_sum3 = _mm512_fmadd_ps(_k21, _r27, _sum3);
_sum0 = _mm512_fmadd_ps(_k22, _r22, _sum0);
_sum1 = _mm512_fmadd_ps(_k22, _r24, _sum1);
_sum2 = _mm512_fmadd_ps(_k22, _r26, _sum2);
_sum3 = _mm512_fmadd_ps(_k22, _r28, _sum3);
_mm512_store_ps(outptr0, _sum0);
_mm512_store_ps(outptr0 + 16, _sum1);
_mm512_store_ps(outptr0 + 32, _sum2);
_mm512_store_ps(outptr0 + 48, _sum3);
r0 += 2 * 64;
r1 += 2 * 64;
r2 += 2 * 64;
outptr0 += 64;
}
for (; j + 1 < outw; j += 2)
{
__m512 _sum0 = _bias0;
__m512 _sum1 = _bias0;
__m512 _r00 = _mm512_load_ps(r0);
__m512 _r01 = _mm512_load_ps(r0 + 16);
__m512 _r02 = _mm512_load_ps(r0 + 32);
__m512 _r03 = _mm512_load_ps(r0 + 48);
__m512 _r04 = _mm512_load_ps(r0 + 64);
_sum0 = _mm512_fmadd_ps(_k00, _r00, _sum0);
_sum1 = _mm512_fmadd_ps(_k00, _r02, _sum1);
_sum0 = _mm512_fmadd_ps(_k01, _r01, _sum0);
_sum1 = _mm512_fmadd_ps(_k01, _r03, _sum1);
_sum0 = _mm512_fmadd_ps(_k02, _r02, _sum0);
_sum1 = _mm512_fmadd_ps(_k02, _r04, _sum1);
__m512 _r10 = _mm512_load_ps(r1);
__m512 _r11 = _mm512_load_ps(r1 + 16);
__m512 _r12 = _mm512_load_ps(r1 + 32);
__m512 _r13 = _mm512_load_ps(r1 + 48);
__m512 _r14 = _mm512_load_ps(r1 + 64);
_sum0 = _mm512_fmadd_ps(_k10, _r10, _sum0);
_sum1 = _mm512_fmadd_ps(_k10, _r12, _sum1);
_sum0 = _mm512_fmadd_ps(_k11, _r11, _sum0);
_sum1 = _mm512_fmadd_ps(_k11, _r13, _sum1);
_sum0 = _mm512_fmadd_ps(_k12, _r12, _sum0);
_sum1 = _mm512_fmadd_ps(_k12, _r14, _sum1);
__m512 _r20 = _mm512_load_ps(r2);
__m512 _r21 = _mm512_load_ps(r2 + 16);
__m512 _r22 = _mm512_load_ps(r2 + 32);
__m512 _r23 = _mm512_load_ps(r2 + 48);
__m512 _r24 = _mm512_load_ps(r2 + 64);
_sum0 = _mm512_fmadd_ps(_k20, _r20, _sum0);
_sum1 = _mm512_fmadd_ps(_k20, _r22, _sum1);
_sum0 = _mm512_fmadd_ps(_k21, _r21, _sum0);
_sum1 = _mm512_fmadd_ps(_k21, _r23, _sum1);
_sum0 = _mm512_fmadd_ps(_k22, _r22, _sum0);
_sum1 = _mm512_fmadd_ps(_k22, _r24, _sum1);
_mm512_store_ps(outptr0, _sum0);
_mm512_store_ps(outptr0 + 16, _sum1);
r0 += 2 * 32;
r1 += 2 * 32;
r2 += 2 * 32;
outptr0 += 32;
}
for (; j < outw; j++)
{
__m512 _sum0 = _bias0;
__m512 _r00 = _mm512_load_ps(r0);
__m512 _r01 = _mm512_load_ps(r0 + 16);
__m512 _r02 = _mm512_load_ps(r0 + 32);
_sum0 = _mm512_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm512_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm512_fmadd_ps(_k02, _r02, _sum0);
__m512 _r10 = _mm512_load_ps(r1);
__m512 _r11 = _mm512_load_ps(r1 + 16);
__m512 _r12 = _mm512_load_ps(r1 + 32);
_sum0 = _mm512_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm512_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm512_fmadd_ps(_k12, _r12, _sum0);
__m512 _r20 = _mm512_load_ps(r2);
__m512 _r21 = _mm512_load_ps(r2 + 16);
__m512 _r22 = _mm512_load_ps(r2 + 32);
_sum0 = _mm512_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm512_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm512_fmadd_ps(_k22, _r22, _sum0);
_mm512_store_ps(outptr0, _sum0);
r0 += 2 * 16;
r1 += 2 * 16;
r2 += 2 * 16;
outptr0 += 16;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
DRB061-matrixvector1-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Matrix-vector multiplication: outer-level loop parallelization
*/
#include "omprace.h"
#include <omp.h>
#define N 100
double a[N][N],v[N],v_out[N];
int mv()
{
int i,j;
#pragma omp parallel for private (i,j)
for (i = 0; i < N; i++)
{
float sum = 0.0;
for (j = 0; j < N; j++)
{
sum += a[i][j]*v[j];
}
v_out[i] = sum;
}
return 0;
}
int main()
{
omprace_init();
mv();
omprace_fini();
return 0;
}
|
Cycle.c | /*
* The MIT License
*
* Copyright 2020 The OpenNARS authors.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "Cycle.h"
//doing inference within the matched concept, returning whether decisionMaking should continue
static Decision Cycle_ActivateSensorimotorConcept(Concept *c, Event *e, long currentTime)
{
Decision decision = {0};
if(e->truth.confidence > MIN_CONFIDENCE)
{
c->usage = Usage_use(c->usage, currentTime, false);
//add event as spike to the concept:
if(e->type == EVENT_TYPE_BELIEF)
{
c->belief_spike = *e;
}
else
{
//pass spike if the concept doesn't have a satisfying motor command
decision = Decision_Suggest(c, e, currentTime);
}
}
return decision;
}
//Process an event, by creating a concept, or activating an existing
static Decision Cycle_ProcessSensorimotorEvent(Event *e, long currentTime)
{
Decision best_decision = {0};
//add a new concept for e if not yet existing
Memory_Conceptualize(&e->term, currentTime);
e->processed = true;
Event_SetTerm(e, e->term); // TODO make sure that hash needs to be calculated once instead already
IN_DEBUG( puts("Event was selected:"); Event_Print(e); )
//determine the concept it is related to
bool e_hasVariable = Variable_hasVariable(&e->term, true, true, true);
#pragma omp parallel for
for(int concept_i=0; concept_i<concepts.itemsAmount; concept_i++)
{
Event ecp = *e;
Concept *c = concepts.items[concept_i].address;
if(!e_hasVariable) //concept matched to the event which doesn't have variables
{
Substitution subs = Variable_Unify(&c->term, &e->term); //concept with variables,
if(subs.success)
{
ecp.term = e->term;
Decision decision = Cycle_ActivateSensorimotorConcept(c, &ecp, currentTime);
if(decision.execute && decision.desire >= best_decision.desire && (!best_decision.specialized || decision.specialized))
{
best_decision = decision;
}
}
}
else
{
Substitution subs = Variable_Unify(&e->term, &c->term); //event with variable matched to concept
if(subs.success)
{
bool success;
ecp.term = Variable_ApplySubstitute(e->term, subs, &success);
if(success)
{
Decision decision = Cycle_ActivateSensorimotorConcept(c, &ecp, currentTime);
if(decision.execute && decision.desire >= best_decision.desire && (!best_decision.specialized || decision.specialized))
{
best_decision = decision;
}
}
}
}
}
return best_decision;
}
void Cycle_PopEvents(Event *selectionArray, double *selectionPriority, int *selectedCnt, PriorityQueue *queue, int cnt)
{
*selectedCnt = 0;
for(int i=0; i<cnt; i++)
{
Event *e;
double priority = 0;
if(!PriorityQueue_PopMax(queue, (void**) &e, &priority))
{
assert(queue->itemsAmount == 0, "No item was popped, only acceptable reason is when it's empty");
IN_DEBUG( puts("Selecting event failed, maybe there is no event left."); )
break;
}
selectionPriority[*selectedCnt] = priority;
selectionArray[*selectedCnt] = *e; //needs to be copied because will be added in a batch
(*selectedCnt)++; //that while processing, would make recycled pointers invalid to use
}
}
//Propagate subgoals, leading to decisions
static Decision Cycle_PropagateSubgoals(long currentTime)
{
Decision best_decision = {0};
//pass goal spikes on to the next
for(int i=0; i<goalsSelectedCnt; i++)
{
Event *goal = &selectedGoals[i];
IN_DEBUG( fputs("selected goal ", stdout); Narsese_PrintTerm(&goal->term); puts(""); )
Decision decision = Cycle_ProcessSensorimotorEvent(goal, currentTime);
if(decision.execute && decision.desire > best_decision.desire && (!best_decision.specialized || decision.specialized))
{
best_decision = decision;
}
#pragma omp parallel for
for(int concept_i=0; concept_i<concepts.itemsAmount; concept_i++)
{
Concept *c = concepts.items[concept_i].address;
if(Variable_Unify(&c->term, &goal->term).success) //could be <a --> M>! matching to some <... =/> <$1 --> M>>.
{
bool revised;
c->goal_spike = Inference_RevisionAndChoice(&c->goal_spike, goal, currentTime, &revised);
selectedGoals[i] = c->goal_spike;
for(int opi=0; opi<OPERATIONS_MAX; opi++)
{
for(int j=0; j<c->precondition_beliefs[opi].itemsAmount; j++)
{
Implication *imp = &c->precondition_beliefs[opi].array[j];
if(!Memory_ImplicationValid(imp))
{
Table_Remove(&c->precondition_beliefs[opi], j);
j--;
continue;
}
Event newGoal = Inference_GoalDeduction(&c->goal_spike, imp);
Event newGoalUpdated = Inference_EventUpdate(&newGoal, currentTime);
IN_DEBUG( fputs("derived goal ", stdout); Narsese_PrintTerm(&newGoalUpdated.term); puts(""); )
Memory_AddEvent(&newGoalUpdated, currentTime, selectedGoalsPriority[i] * Truth_Expectation(newGoalUpdated.truth), 0, false, true, false, false, false);
}
}
}
}
}
return best_decision;
}
//Reinforce link between concept a and b (creating it if non-existent)
static void Cycle_ReinforceLink(Event *a, Event *b)
{
if(a->type != EVENT_TYPE_BELIEF || b->type != EVENT_TYPE_BELIEF)
{
return;
}
Term a_term_nop = Narsese_GetPreconditionWithoutOp(&a->term);
Concept *A = Memory_FindConceptByTerm(&a_term_nop);
Concept *B = Memory_FindConceptByTerm(&b->term);
if(A != NULL && B != NULL && A != B)
{
//temporal induction
if(!Stamp_checkOverlap(&a->stamp, &b->stamp))
{
bool success;
Implication precondition_implication = Inference_BeliefInduction(a, b, &success);
if(success)
{
precondition_implication.sourceConcept = A;
precondition_implication.sourceConceptId = A->id;
if(precondition_implication.truth.confidence >= MIN_CONFIDENCE)
{
bool success;
Term general_implication_term = IntroduceImplicationVariables(precondition_implication.term, &success);
if(success && Variable_hasVariable(&general_implication_term, true, true, false))
{
NAL_DerivedEvent(general_implication_term, OCCURRENCE_ETERNAL, precondition_implication.truth, precondition_implication.stamp, currentTime, 1, 1, precondition_implication.occurrenceTimeOffset, NULL, 0);
}
int operationID = Narsese_getOperationID(&a->term);
IN_DEBUG( fputs("Formed implication: ", stdout); Narsese_PrintTerm(&precondition_implication.term); Truth_Print(&precondition_implication.truth); puts("\n"); )
Implication *revised_precon = Table_AddAndRevise(&B->precondition_beliefs[operationID], &precondition_implication);
if(revised_precon != NULL)
{
revised_precon->creationTime = currentTime; //for evaluation
revised_precon->sourceConcept = A;
revised_precon->sourceConceptId = A->id;
/*IN_DEBUG( fputs("REVISED pre-condition implication: ", stdout); Implication_Print(revised_precon); )*/
Memory_printAddedImplication(&revised_precon->term, &revised_precon->truth, false, revised_precon->truth.confidence > precondition_implication.truth.confidence);
}
}
}
}
}
}
void Cycle_PushEvents(long currentTime)
{
for(int i=0; i<beliefsSelectedCnt; i++)
{
Memory_AddEvent(&selectedBeliefs[i], currentTime, selectedBeliefsPriority[i], 0, false, false, true, false, false);
}
for(int i=0; i<goalsSelectedCnt; i++)
{
Memory_AddEvent(&selectedGoals[i], currentTime, selectedGoalsPriority[i], 0, false, false, true, false, false);
}
}
void Cycle_ProcessInputBeliefEvents(long currentTime)
{
//1. process newest event
if(belief_events.itemsAmount > 0)
{
//form concepts for the sequences of different length
for(int len=0; len<MAX_SEQUENCE_LEN; len++)
{
Event *toProcess = FIFO_GetNewestSequence(&belief_events, len);
if(toProcess != NULL && !toProcess->processed && toProcess->type != EVENT_TYPE_DELETED)
{
assert(toProcess->type == EVENT_TYPE_BELIEF, "A different event type made it into belief events!");
Cycle_ProcessSensorimotorEvent(toProcess, currentTime);
Event postcondition = *toProcess;
//Mine for <(&/,precondition,operation) =/> postcondition> patterns in the FIFO:
if(len == 0) //postcondition always len1
{
int op_id = Narsese_getOperationID(&postcondition.term);
Decision_AssumptionOfFailure(op_id, currentTime); //collection of negative evidence, new way
//build link between internal derivations and external event to explain it:
for(int k=0; k<beliefsSelectedCnt; k++)
{
if(selectedBeliefs[k].occurrenceTime < postcondition.occurrenceTime)
{
Cycle_ReinforceLink(&selectedBeliefs[k], &postcondition);
}
}
for(int k=1; k<belief_events.itemsAmount; k++)
{
for(int len2=0; len2<MAX_SEQUENCE_LEN; len2++)
{
Event *precondition = FIFO_GetKthNewestSequence(&belief_events, k, len2);
if(precondition != NULL && precondition->type != EVENT_TYPE_DELETED)
{
Term precond = Narsese_GetPreconditionWithoutOp(&precondition->term); //a or (&/,a,op)
for(int i=0; i<COMPOUND_TERM_SIZE_MAX; i++)
{
if(Narsese_isOperator(precond.atoms[i]))
{
goto NoReinforce; //if there is an op in a, then a longer sequ has also, try different k
}
}
Cycle_ReinforceLink(precondition, &postcondition);
NoReinforce:;
}
}
}
}
}
}
}
}
void Cycle_ProcessInputGoalEvents(long currentTime)
{
//process goals
Decision decision = {0};
if(goal_events.itemsAmount > 0)
{
Event *goal = FIFO_GetNewestSequence(&goal_events, 0);
if(!goal->processed && goal->type!=EVENT_TYPE_DELETED)
{
assert(goal->type == EVENT_TYPE_GOAL, "A different event type made it into goal events!");
decision = Cycle_ProcessSensorimotorEvent(goal, currentTime);
}
}
//allow reasoning into the future by propagating spikes from goals back to potential current events
if(!decision.execute)
{
decision = Cycle_PropagateSubgoals(currentTime);
}
if(decision.execute && decision.operationID > 0)
{
Decision_Execute(&decision);
//reset cycling goal events after execution to avoid "residue actions"
PriorityQueue_RESET(&cycling_goal_events, cycling_goal_events.items, cycling_goal_events.maxElements);
}
}
void Cycle_Inference(long currentTime)
{
//Inferences
#if STAGE==2
for(int i=0; i<beliefsSelectedCnt; i++)
{
long countConceptsMatched = 0;
bool fired[CONCEPTS_MAX] = {0}; //whether a concept already fired
for(;;)
{
long countConceptsMatchedNew = 0;
//Adjust dynamic firing threshold: (proportional "self"-control)
double conceptPriorityThresholdCurrent = conceptPriorityThreshold;
long countConceptsMatchedAverage = Stats_countConceptsMatchedTotal / currentTime;
double set_point = BELIEF_CONCEPT_MATCH_TARGET;
double process_value = countConceptsMatchedAverage;
double error = process_value - set_point;
double increment = error*CONCEPT_THRESHOLD_ADAPTATION;
conceptPriorityThreshold = MIN(1.0, MAX(0.0, conceptPriorityThreshold + increment));
//IN_DEBUG( printf("conceptPriorityThreshold=%f\n", conceptPriorityThreshold); )
Event *e = &selectedBeliefs[i];
Term subterms_of_e[2] = {0}; //subterms up to level 1
for(int j=0; j<2; j++)
{
subterms_of_e[j] = Term_ExtractSubterm(&e->term, j+1);
}
double priority = selectedBeliefsPriority[i];
Term dummy_term = {0};
Truth dummy_truth = {0};
RuleTable_Apply(e->term, dummy_term, e->truth, dummy_truth, e->occurrenceTime, e->stamp, currentTime, priority, 1, false, NULL, 0);
IN_DEBUG( puts("Event was selected:"); Event_Print(e); )
//Main inference loop:
#pragma omp parallel for
for(int j=0; j<concepts.itemsAmount; j++)
{
Concept *c = concepts.items[j].address;
long validation_cid = c->id; //allows for lockfree rule table application (only adding to memory is locked)
if(fired[j] || c->priority < conceptPriorityThresholdCurrent)
{
continue;
}
fired[j] = true;
//first filter based on common term (semantic relationship)
bool has_common_term = false;
for(int k=0; k<2; k++)
{
Term current = Term_ExtractSubterm(&c->term, k+1);
for(int h=0; h<2; h++)
{
if(current.atoms[0] != 0 && subterms_of_e[h].atoms[0] != 0)
{
if(Term_Equal(¤t, &subterms_of_e[h]))
{
has_common_term = true;
goto PROCEED;
}
}
}
}
PROCEED:;
//second filter based on precondition implication (temporal relationship)
bool is_temporally_related = false;
for(int k=0; k<c->precondition_beliefs[0].itemsAmount; k++)
{
Implication imp = c->precondition_beliefs[0].array[k];
Term subject = Term_ExtractSubterm(&imp.term, 1);
if(Variable_Unify(&subject, &e->term).success)
{
is_temporally_related = true;
break;
}
}
if(has_common_term)
{
#pragma omp critical(stats)
{
countConceptsMatchedNew++;
countConceptsMatched++;
Stats_countConceptsMatchedTotal++;
}
}
if(has_common_term && c->belief.type != EVENT_TYPE_DELETED)
{
//use eternal belief as belief
Event* belief = &c->belief;
Event future_belief = c->predicted_belief;
//but if there is a predicted one in the event's window, use this one
if(e->occurrenceTime != OCCURRENCE_ETERNAL && future_belief.type != EVENT_TYPE_DELETED &&
abs(e->occurrenceTime - future_belief.occurrenceTime) < EVENT_BELIEF_DISTANCE) //take event as belief if it's stronger
{
future_belief.truth = Truth_Projection(future_belief.truth, future_belief.occurrenceTime, e->occurrenceTime);
future_belief.occurrenceTime = e->occurrenceTime;
belief = &future_belief;
}
//unless there is an actual belief which falls into the event's window
Event project_belief = c->belief_spike;
if(e->occurrenceTime != OCCURRENCE_ETERNAL && project_belief.type != EVENT_TYPE_DELETED &&
abs(e->occurrenceTime - project_belief.occurrenceTime) < EVENT_BELIEF_DISTANCE) //take event as belief if it's stronger
{
project_belief.truth = Truth_Projection(project_belief.truth, project_belief.occurrenceTime, e->occurrenceTime);
project_belief.occurrenceTime = e->occurrenceTime;
belief = &project_belief;
}
//Check for overlap and apply inference rules
if(!Stamp_checkOverlap(&e->stamp, &belief->stamp))
{
Stamp stamp = Stamp_make(&e->stamp, &belief->stamp);
if(PRINT_CONTROL_INFO)
{
fputs("Apply rule table on ", stdout);
Narsese_PrintTerm(&e->term);
printf(" Priority=%f\n", priority);
fputs(" and ", stdout);
Narsese_PrintTerm(&c->term);
puts("");
}
RuleTable_Apply(e->term, c->term, e->truth, belief->truth, e->occurrenceTime, stamp, currentTime, priority, c->priority, true, c, validation_cid);
}
}
if(is_temporally_related)
{
for(int i=0; i<c->precondition_beliefs[0].itemsAmount; i++)
{
Implication *imp = &c->precondition_beliefs[0].array[i];
assert(Narsese_copulaEquals(imp->term.atoms[0],'$'), "Not a valid implication term!");
Term precondition_with_op = Term_ExtractSubterm(&imp->term, 1);
Term precondition = Narsese_GetPreconditionWithoutOp(&precondition_with_op);
Substitution subs = Variable_Unify(&precondition, &e->term);
if(subs.success)
{
Implication updated_imp = *imp;
bool success;
updated_imp.term = Variable_ApplySubstitute(updated_imp.term, subs, &success);
if(success)
{
Event predicted = Inference_BeliefDeduction(e, &updated_imp);
NAL_DerivedEvent(predicted.term, predicted.occurrenceTime, predicted.truth, predicted.stamp, currentTime, priority, Truth_Expectation(imp->truth), 0, c, validation_cid);
}
}
}
}
}
if(countConceptsMatched > Stats_countConceptsMatchedMax)
{
Stats_countConceptsMatchedMax = countConceptsMatched;
}
if(countConceptsMatched >= BELIEF_CONCEPT_MATCH_TARGET || countConceptsMatchedNew == 0)
{
break;
}
}
}
#endif
}
void Cycle_RelativeForgetting(long currentTime)
{
//Apply event forgetting:
for(int i=0; i<cycling_belief_events.itemsAmount; i++)
{
cycling_belief_events.items[i].priority *= EVENT_DURABILITY;
}
for(int i=0; i<cycling_goal_events.itemsAmount; i++)
{
cycling_goal_events.items[i].priority *= EVENT_DURABILITY;
}
//Apply concept forgetting:
for(int i=0; i<concepts.itemsAmount; i++)
{
Concept *c = concepts.items[i].address;
c->priority *= CONCEPT_DURABILITY;
concepts.items[i].priority = Usage_usefulness(c->usage, currentTime); //how concept memory is sorted by, by concept usefulness
}
//BEGIN SPECIAL HANDLING FOR USER KNOWLEDGE
if(ontology_handling)
{
//BEGIN SPECIAL HANDLING FOR USER KNOWLEDGE
for(int i=0; i<concepts.itemsAmount; i++)
{
Concept *c = concepts.items[i].address;
if(c->hasUserKnowledge)
{
c->usage = Usage_use(c->usage, currentTime, false); //user implication won't be forgotten
}
}
}
//END SPECIAL HANDLING FOR USER KNOWLEDGE
//Re-sort queues
PriorityQueue_Rebuild(&concepts);
PriorityQueue_Rebuild(&cycling_belief_events);
PriorityQueue_Rebuild(&cycling_goal_events);
}
void Cycle_Perform(long currentTime)
{
Metric_send("NARNode.Cycle", 1);
//1. Retrieve BELIEF/GOAL_EVENT_SELECTIONS events from cyclings events priority queue (which includes both input and derivations)
Cycle_PopEvents(selectedGoals, selectedGoalsPriority, &goalsSelectedCnt, &cycling_goal_events, GOAL_EVENT_SELECTIONS);
Cycle_PopEvents(selectedBeliefs, selectedBeliefsPriority, &beliefsSelectedCnt, &cycling_belief_events, BELIEF_EVENT_SELECTIONS);
//2. Process incoming belief events from FIFO, building implications utilizing input sequences and in 1. retrieved events.
Cycle_ProcessInputBeliefEvents(currentTime);
//3. Process incoming goal events from FIFO, propagating subgoals according to implications, triggering decisions when above decision threshold
Cycle_ProcessInputGoalEvents(currentTime);
//4. Perform inference between in 1. retrieved events and semantically/temporally related, high-priority concepts to derive and process new events
Cycle_Inference(currentTime);
//5. Apply relative forgetting for concepts according to CONCEPT_DURABILITY and events according to BELIEF_EVENT_DURABILITY
Cycle_RelativeForgetting(currentTime);
//6. Push in 1. selected events back to the queue as well, applying relative forgetting based on BELIEF_EVENT_DURABILITY_ON_USAGE
Cycle_PushEvents(currentTime);
}
|
prand.c | //------------------------------------------------------------------------------
// GraphBLAS/Demo/Source/prand: parallel random number generator
//------------------------------------------------------------------------------
// A simple thread-safe parallel pseudo-random nuumber generator.
#include "prand.h"
//------------------------------------------------------------------------------
// prand macros
//------------------------------------------------------------------------------
// Generate the next seed, and extract a random 15-bit value from a seed.
#define PRAND_RECURENCE(seed) ((seed) * 1103515245 + 12345)
#define PRAND_15_MAX 32767
#define PRAND_15(seed) (((seed)/65536) % (PRAND_15_MAX + 1))
//------------------------------------------------------------------------------
// global types and operators
//------------------------------------------------------------------------------
// These can be shared by all threads in a user application, and thus are
// safely declared as global objects.
GrB_Type prand_type = NULL ;
GrB_UnaryOp prand_next_op = NULL ;
GrB_UnaryOp prand_iget_op = NULL ;
GrB_UnaryOp prand_xget_op = NULL ;
GrB_BinaryOp prand_dup_op = NULL ;
//------------------------------------------------------------------------------
// prand_next_op: unary operator to construct the next seed
//------------------------------------------------------------------------------
// z = f(x), where x is the old seed and z is the new seed.
void prand_next_f (prand_t *z, const prand_t *x)
{
for (int k = 0 ; k < 5 ; k++)
{
z->seed [k] = PRAND_RECURENCE (x->seed [k]) ;
}
}
//------------------------------------------------------------------------------
// prand_iget: unary operator to construct get a random integer from the seed
//------------------------------------------------------------------------------
// z = f(x), where x is a random seed, and z is an unsigned 64-bit
// pseudo-random number constructed from the seed.
void prand_iget_f (uint64_t *z, const prand_t *x)
{
uint64_t i = 0 ;
for (int k = 0 ; k < 5 ; k++)
{
i = PRAND_15_MAX * i + PRAND_15 (x->seed [k]) ;
}
(*z) = i ;
}
//------------------------------------------------------------------------------
// prand_xget: unary operator to construct get a random double from the seed
//------------------------------------------------------------------------------
// z = f(x), where x is a random seed, and z is a double precision
// pseudo-random number constructed from the seed, in the range 0 to 1.
void prand_xget_f (double *z, prand_t *x)
{
uint64_t i ;
prand_iget_f (&i, x) ;
(*z) = ((double) i) / ((double) UINT64_MAX) ;
}
//------------------------------------------------------------------------------
// prand_dup: binary operator to build a vector
//------------------------------------------------------------------------------
// This is required by GrB_Vector_build, but is never called since no
// duplicates are created. This is the SECOND operator for the prand_type.
#if defined ( __INTEL_COMPILER )
// disable icc warnings
// 869: unused parameters
#pragma warning (disable: 869 )
#elif defined ( __GNUC__ )
#pragma GCC diagnostic ignored "-Wunused-parameter"
#endif
void prand_dup_f (prand_t *z, /* unused: */ const prand_t *x, const prand_t *y)
{
(*z) = (*y) ;
}
//------------------------------------------------------------------------------
// prand_init: create the random seed type and its operators
//------------------------------------------------------------------------------
#define PRAND_FREE_ALL \
{ \
GrB_free (&prand_type) ; \
GrB_free (&prand_next_op) ; \
GrB_free (&prand_iget_op) ; \
GrB_free (&prand_xget_op) ; \
GrB_free (&prand_dup_op) ; \
}
#undef OK
#define OK(method) \
{ \
GrB_Info info = method ; \
if (info != GrB_SUCCESS) \
{ \
PRAND_FREE_ALL ; \
printf ("GraphBLAS error:\n%s\n", GrB_error ( )) ; \
return (info) ; \
} \
}
GrB_Info prand_init ( )
{
prand_type = NULL ;
prand_next_op = NULL ;
prand_iget_op = NULL ;
prand_xget_op = NULL ;
prand_dup_op = NULL ;
OK (GrB_Type_new (&prand_type, sizeof (prand_t))) ;
OK (GrB_UnaryOp_new (&prand_next_op, (GxB_unary_function) prand_next_f,
prand_type, prand_type)) ;
OK (GrB_UnaryOp_new (&prand_iget_op, (GxB_unary_function) prand_iget_f,
GrB_UINT64, prand_type)) ;
OK (GrB_UnaryOp_new (&prand_xget_op, (GxB_unary_function) prand_xget_f,
GrB_FP64, prand_type)) ;
OK (GrB_BinaryOp_new (&prand_dup_op, (GxB_binary_function) prand_dup_f,
prand_type, prand_type, prand_type)) ;
return (GrB_SUCCESS) ;
}
//------------------------------------------------------------------------------
// prand_finalize: free the random seed type and its operators
//------------------------------------------------------------------------------
GrB_Info prand_finalize ( )
{
PRAND_FREE_ALL ;
return (GrB_SUCCESS) ;
}
//------------------------------------------------------------------------------
// prand_next: get the next random numbers
//------------------------------------------------------------------------------
GrB_Info prand_next
(
GrB_Vector Seed
)
{
return (GrB_apply (Seed, NULL, NULL, prand_next_op, Seed, NULL)) ;
}
//------------------------------------------------------------------------------
// prand_seed: create a vector of random seeds
//------------------------------------------------------------------------------
// Returns a vector of random seed values.
#define PRAND_FREE_WORK \
{ \
free (I) ; \
free (X) ; \
}
#undef PRAND_FREE_ALL
#define PRAND_FREE_ALL \
{ \
PRAND_FREE_WORK ; \
GrB_free (Seed) ; \
}
GrB_Info prand_seed
(
GrB_Vector *Seed, // vector of random number seeds
int64_t seed, // scalar input seed
GrB_Index n, // size of Seed to create
int nthreads // # of threads to use (OpenMP default if <= 0)
)
{
GrB_Index *I = NULL ;
prand_t *X = NULL ;
// allocate the Seed vector
OK (GrB_Vector_new (Seed, prand_type, n)) ;
// allocate the I and X arrays
I = malloc ((n+1) * sizeof (GrB_Index)) ;
X = malloc ((n+1) * sizeof (prand_t)) ;
if (I == NULL || X == NULL)
{
PRAND_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
// determine # of threads to use
int nthreads_max = 1 ;
#ifdef _OPENMP
nthreads_max = omp_get_max_threads ( ) ;
#endif
if (nthreads <= 0 || nthreads > nthreads_max)
{
nthreads = nthreads_max ;
}
// construct the tuples for the initial seeds
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t i = 0 ; i < (int64_t) n ; i++)
{
I [i] = i ;
for (int k = 0 ; k < 5 ; k++)
{
X [i].seed [k] = (100000000*(seed) + 10*i + k + 1) ;
}
}
// build the Seed vector
OK (GrB_Vector_build_UDT (*Seed, I, X, n, prand_dup_op)) ;
// free workspace
PRAND_FREE_WORK ;
// advance to the first set of random numbers
OK (prand_next (*Seed)) ;
return (GrB_SUCCESS) ;
}
//------------------------------------------------------------------------------
// prand_print: print the Seed vector
//------------------------------------------------------------------------------
// This is meant for testing, not production use.
#undef PRAND_FREE_ALL
#define PRAND_FREE_ALL ;
GrB_Info prand_print
(
GrB_Vector Seed,
int pr // 0: print nothing, 1: print some, 2: print all
)
{
if (pr > 0)
{
GrB_Index n ;
OK (GrB_Vector_nvals (&n, Seed)) ;
printf ("\nSeed: length %g\n", (double) n) ;
prand_t x ;
for (int k = 0 ; k < 5 ; k++) x.seed [k] = -1 ;
for (int64_t i = 0 ; i < (int64_t) n ; i++)
{
if (GrB_Vector_extractElement_UDT (&x, Seed, i) == GrB_SUCCESS)
{
printf ("%g: ", (double) i) ;
for (int k = 0 ; k < 5 ; k++)
{
printf (" %.18g", (double) (x.seed [k])) ;
}
printf ("\n") ;
}
if (pr == 1 && i > 10) break ;
}
}
return (GrB_SUCCESS) ;
}
//------------------------------------------------------------------------------
// prand_iget: return a vector of random uint64 integers
//------------------------------------------------------------------------------
GrB_Info prand_iget
(
GrB_Vector X,
GrB_Vector Seed
)
{
OK (GrB_apply (X, NULL, NULL, prand_iget_op, Seed, NULL)) ;
return (prand_next (Seed)) ;
}
//------------------------------------------------------------------------------
// prand_xget: return a vector of random doubles, in range 0 to 1 inclusive
//------------------------------------------------------------------------------
GrB_Info prand_xget
(
GrB_Vector X,
GrB_Vector Seed
)
{
OK (GrB_apply (X, NULL, NULL, prand_xget_op, Seed, NULL)) ;
return (prand_next (Seed)) ;
}
|
mbpush2.c | /* C Library for Skeleton 2-1/2D Electromagnetic OpenMP PIC Code */
/* written by Viktor K. Decyk, UCLA */
#include <stdlib.h>
#include <stdio.h>
#include <complex.h>
#include <math.h>
#include "mbpush2.h"
/*--------------------------------------------------------------------*/
double ranorm() {
/* this program calculates a random number y from a gaussian distribution
with zero mean and unit variance, according to the method of
mueller and box:
y(k) = (-2*ln(x(k)))**1/2*sin(2*pi*x(k+1))
y(k+1) = (-2*ln(x(k)))**1/2*cos(2*pi*x(k+1)),
where x is a random number uniformly distributed on (0,1).
written for the ibm by viktor k. decyk, ucla
local data */
static int r1 = 885098780, r2 = 1824280461;
static int r4 = 1396483093, r5 = 55318673;
static int iflg = 0;
static double h1l = 65531.0, h1u = 32767.0, h2l = 65525.0;
static double r0 = 0.0;
int isc, i1;
double ranorm, r3, asc, bsc, temp;
if (iflg==1) {
ranorm = r0;
r0 = 0.0;
iflg = 0;
return ranorm;
}
isc = 65536;
asc = (double) isc;
bsc = asc*asc;
i1 = r1 - (r1/isc)*isc;
r3 = h1l*(double) r1 + asc*h1u*(double) i1;
i1 = r3/bsc;
r3 -= ((double) i1)*bsc;
bsc = 0.5*bsc;
i1 = r2/isc;
isc = r2 - i1*isc;
r0 = h1l*(double) r2 + asc*h1u*(double) isc;
asc = 1.0/bsc;
isc = r0*asc;
r2 = r0 - ((double) isc)*bsc;
r3 += (double) isc + 2.0*h1u*(double) i1;
isc = r3*asc;
r1 = r3 - ((double) isc)*bsc;
temp = sqrt(-2.0*log((((double) r1) + ((double) r2)*asc)*asc));
isc = 65536;
asc = (double) isc;
bsc = asc*asc;
i1 = r4 - (r4/isc)*isc;
r3 = h2l*(double) r4 + asc*h1u*(double) i1;
i1 = r3/bsc;
r3 -= ((double) i1)*bsc;
bsc = 0.5*bsc;
i1 = r5/isc;
isc = r5 - i1*isc;
r0 = h2l*(double) r5 + asc*h1u*(double) isc;
asc = 1.0/bsc;
isc = r0*asc;
r5 = r0 - ((double) isc)*bsc;
r3 += (double) isc + 2.0*h1u*(double) i1;
isc = r3*asc;
r4 = r3 - ((double) isc)*bsc;
r0 = 6.28318530717959*((((double) r4) + ((double) r5)*asc)*asc);
ranorm = temp*sin(r0);
r0 = temp*cos(r0);
iflg = 1;
return ranorm;
}
/*--------------------------------------------------------------------*/
void cdistr2h(float part[], float vtx, float vty, float vtz, float vdx,
float vdy, float vdz, int npx, int npy, int idimp, int nop,
int nx, int ny, int ipbc) {
/* for 2-1/2d code, this subroutine calculates initial particle
co-ordinates and velocities with uniform density and maxwellian
velocity with drift
part[n][0] = position x of particle n
part[n][1] = position y of particle n
part[n][2] = velocity vx of particle n
part[n][3] = velocity vy of particle n
part[n][4] = velocity vz of particle n
vtx/vty/vtz = thermal velocity of electrons in x/y/z direction
vdx/vdy/vdz = drift velocity of beam electrons in x/y/z direction
npx/npy = initial number of particles distributed in x/y direction
idimp = size of phase space = 5
nop = number of particles
nx/ny = system length in x/y direction
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
ranorm = gaussian random number with zero mean and unit variance
local data */
int j, k, k1, npxy;
float edgelx, edgely, at1, at2, at3, sum1, sum2, sum3;
double dsum1, dsum2, dsum3;
npxy = npx*npy;
/* set boundary values */
edgelx = 0.0;
edgely = 0.0;
at1 = (float) nx/(float) npx;
at2 = (float) ny/(float) npy;
if (ipbc==2) {
edgelx = 1.0;
edgely = 1.0;
at1 = (float) (nx-2)/(float) npx;
at2 = (float) (ny-2)/(float) npy;
}
else if (ipbc==3) {
edgelx = 1.0;
edgely = 0.0;
at1 = (float) (nx-2)/(float) npx;
at2 = (float) ny/(float) npy;
}
/* uniform density profile */
for (k = 0; k < npy; k++) {
k1 = idimp*npx*k;
at3 = edgely + at2*(((float) k) + 0.5);
for (j = 0; j < npx; j++) {
part[idimp*j+k1] = edgelx + at1*(((float) j) + 0.5);
part[1+idimp*j+k1] = at3;
}
}
/* maxwellian velocity distribution */
for (j = 0; j < npxy; j++) {
part[2+idimp*j] = vtx*ranorm();
part[3+idimp*j] = vty*ranorm();
part[4+idimp*j] = vtz*ranorm();
}
/* add correct drift */
dsum1 = 0.0;
dsum2 = 0.0;
dsum3 = 0.0;
for (j = 0; j < npxy; j++) {
dsum1 += part[2+idimp*j];
dsum2 += part[3+idimp*j];
dsum3 += part[4+idimp*j];
}
sum1 = dsum1;
sum2 = dsum2;
sum3 = dsum3;
at1 = 1.0/(float) npxy;
sum1 = at1*sum1 - vdx;
sum2 = at1*sum2 - vdy;
sum3 = at1*sum3 - vdz;
for (j = 0; j < npxy; j++) {
part[2+idimp*j] -= sum1;
part[3+idimp*j] -= sum2;
part[4+idimp*j] -= sum3;
}
return;
}
/*--------------------------------------------------------------------*/
void cdblkp2l(float part[], int kpic[], int *nppmx, int idimp, int nop,
int mx, int my, int mx1, int mxy1, int *irc) {
/* this subroutine finds the maximum number of particles in each tile of
mx, my to calculate size of segmented particle array ppart
linear interpolation
part = input particle array
part[n][0] = position x of particle n
part[n][1] = position y of particle n
kpic = output number of particles per tile
nppmx = return maximum number of particles in tile
idimp = size of phase space = 4
nop = number of particles
mx/my = number of grids in sorting cell in x and y
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int j, k, n, m, isum, ist, npx, ierr;
ierr = 0;
/* clear counter array */
for (k = 0; k < mxy1; k++) {
kpic[k] = 0;
}
/* find how many particles in each tile */
for (j = 0; j < nop; j++) {
n = part[idimp*j];
m = part[1+idimp*j];
n = n/mx;
m = m/my;
m = n + mx1*m;
if (m < mxy1) {
kpic[m] += 1;
}
else {
ierr = ierr > (m - mxy1 + 1) ? ierr : (m - mxy1 + 1);
}
}
/* find maximum */
isum = 0;
npx = 0;
for (k = 0; k < mxy1; k++) {
ist = kpic[k];
npx = npx > ist ? npx : ist;
isum += ist;
}
*nppmx = npx;
/* check for errors */
if (ierr > 0) {
*irc = ierr;
}
else if (isum != nop) {
*irc = -1;
}
return;
}
/*--------------------------------------------------------------------*/
void cppmovin2l(float part[], float ppart[], int kpic[], int nppmx,
int idimp, int nop, int mx, int my, int mx1, int mxy1,
int *irc) {
/* this subroutine sorts particles by x,y grid in tiles of mx, my
and copies to segmented array ppart
linear interpolation
input: all except ppart, kpic, output: ppart, kpic
part/ppart = input/output particle arrays
part[n][0] = position x of particle n in partition
part[n][1] = position y of particle n in partition
ppart[m][n][0] = position x of particle n in tile m
ppart[m][n][1] = position y of particle n in tile m
ppart[m][n][2] = velocity vx of particle n in tile m
ppart[m][n][3] = velocity vy of particle n in tile m
kpic = output number of particles per tile
nppmx = maximum number of particles in tile
idimp = size of phase space = 4
nop = number of particles
mx/my = number of grids in sorting cell in x and y
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int i, j, k, n, m, ip, ierr;
ierr = 0;
/* clear counter array */
for (k = 0; k < mxy1; k++) {
kpic[k] = 0;
}
/* find addresses of particles at each tile and reorder particles */
for (j = 0; j < nop; j++) {
n = part[idimp*j];
m = part[1+idimp*j];
n = n/mx;
m = m/my;
m = n + mx1*m;
ip = kpic[m];
if (ip < nppmx) {
for (i = 0; i < idimp; i++) {
ppart[i+idimp*(ip+nppmx*m)] = part[i+idimp*j];
}
}
else {
ierr = ierr > ip-nppmx+1 ? ierr : ip-nppmx+1;
}
kpic[m] = ip + 1;
}
if (ierr > 0)
*irc = ierr;
return;
}
/*--------------------------------------------------------------------*/
void cppcheck2l(float ppart[], int kpic[], int idimp, int nppmx, int nx,
int ny, int mx, int my, int mx1, int my1,
int *irc) {
/* this subroutine performs a sanity check to make sure particles sorted
by x,y grid in tiles of mx, my, are all within bounds.
tiles are assumed to be arranged in 2D linear memory
input: all except irc
output: irc
ppart[k][n][0] = position x of particle n in tile k
ppart[k][n][1] = position y of particle n in tile k
kpic[k] = number of reordered output particles in tile k
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
irc = particle error, returned only if error occurs, when irc > 0
local data */
int mxy1, noff, moff, npp, j, k, ist, nn, mm;
float edgelx, edgely, edgerx, edgery, dx, dy;
mxy1 = mx1*my1;
/* loop over tiles */
#pragma omp parallel for \
private(j,k,noff,moff,npp,nn,mm,ist,edgelx,edgely,edgerx,edgery,dx,dy)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
dx = ppart[idimp*(j+nppmx*k)];
dy = ppart[1+idimp*(j+nppmx*k)];
/* find particles going out of bounds */
ist = 0;
if (dx < edgelx)
ist = 1;
if (dx >= edgerx)
ist = 2;
if (dy < edgely)
ist += 3;
if (dy >= edgery)
ist += 6;
if (ist > 0)
*irc = k + 1;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cgbppush23l(float ppart[], float fxy[], float bxy[], int kpic[],
float qbm, float dt, float dtc, float *ek, int idimp,
int nppmx, int nx, int ny, int mx, int my, int nxv,
int nyv, int mx1, int mxy1, int ipbc) {
/* for 2-1/2d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, with magnetic field. Using the Boris Mover.
OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
119 flops/particle, 1 divide, 29 loads, 5 stores
input: all, output: ppart, ek
velocity equations used are:
vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fx(x(t),y(t))*dt)
vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fy(x(t),y(t))*dt)
vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fz(x(t),y(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t)), omy = (q/m)*by(x(t),y(t)), and
omz = (q/m)*bz(x(t),y(t)).
position equations used are:
x(t+dt)=x(t) + vx(t+dt/2)*dt
y(t+dt)=y(t) + vy(t+dt/2)*dt
fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t))
bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t))
are approximated by interpolation from the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y)
ppart[m][n][0] = position x of particle n in tile m
ppart[m][n][1] = position y of particle n in tile m
ppart[m][n][2] = x velocity of particle n in tile m
ppart[m][n][3] = y velocity of particle n in tile m
ppart[m][n][4] = z velocity of particle n in tile m
fxy[k][j][0] = x component of force/charge at grid (j,k)
fxy[k][j][1] = y component of force/charge at grid (j,k)
fxy[k][j][2] = z component of force/charge at grid (j,k)
that is, convolution of electric field over particle shape
bxy[k][j][0] = x component of magnetic field at grid (j,k)
bxy[k][j][1] = y component of magnetic field at grid (j,k)
bxy[k][j][2] = z component of magnetic field at grid (j,k)
that is, the convolution of magnetic field over particle shape
kpic = number of particles per tile
qbm = particle charge/mass ratio
dt = time interval between successive calculations
dtc = time interval between successive co-ordinate calculations
kinetic energy/mass at time t is also calculated, using
ek = .5*sum((vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)
idimp = size of phase space = 5
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of field arrays, must be >= nx+1
nyv = third dimension of field arrays, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
local data */
#define MXV 33
#define MYV 33
int noff, moff, npoff, npp, mxv3;
int i, j, k, nn, mm, nm;
float qtmh, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy;
float dx, dy, dz, ox, oy, oz, acx, acy, acz, omxt, omyt, omzt, omt;
float anorm, rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float x, y;
float sfxy[3*MXV*MYV], sbxy[3*MXV*MYV];
/* float sfxy[3*(mx+1)*(my+1)], sbxy[3*(mx+1)*(my+1)]; */
double sum1, sum2;
mxv3 = 3*(mx + 1);
qtmh = 0.5*qbm*dt;
sum2 = 0.0;
/* set boundary values */
edgelx = 0.0;
edgely = 0.0;
edgerx = (float) nx;
edgery = (float) ny;
if (ipbc==2) {
edgelx = 1.0;
edgely = 1.0;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
else if (ipbc==3) {
edgelx = 1.0;
edgerx = (float) (nx-1);
}
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noff,moff,npp,npoff,nn,mm,nm,x,y,dxp,dyp,amx,amy,dx,dy, \
dz,ox,oy,oz,acx,acy,acz,omxt,omyt,omzt,omt,anorm,rot1,rot2,rot3,rot4, \
rot5,rot6,rot7,rot8,rot9,sum1,sfxy,sbxy) \
reduction(+:sum2)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = nppmx*k;
/* load local fields from global array */
nn = (mx < nx-noff ? mx : nx-noff) + 1;
mm = (my < ny-moff ? my : ny-moff) + 1;
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sfxy[3*i+mxv3*j] = fxy[3*(i+noff+nxv*(j+moff))];
sfxy[1+3*i+mxv3*j] = fxy[1+3*(i+noff+nxv*(j+moff))];
sfxy[2+3*i+mxv3*j] = fxy[2+3*(i+noff+nxv*(j+moff))];
}
}
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sbxy[3*i+mxv3*j] = bxy[3*(i+noff+nxv*(j+moff))];
sbxy[1+3*i+mxv3*j] = bxy[1+3*(i+noff+nxv*(j+moff))];
sbxy[2+3*i+mxv3*j] = bxy[2+3*(i+noff+nxv*(j+moff))];
}
}
sum1 = 0.0;
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
/* find interpolation weights */
x = ppart[idimp*(j+npoff)];
y = ppart[1+idimp*(j+npoff)];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nm = 3*(nn - noff) + mxv3*(mm - moff);
amx = 1.0 - dxp;
amy = 1.0 - dyp;
/* find electric field */
nn = nm;
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dz = amx*sfxy[nn+2];
mm = nn + 3;
dx = amy*(dxp*sfxy[mm] + dx);
dy = amy*(dxp*sfxy[mm+1] + dy);
dz = amy*(dxp*sfxy[mm+2] + dz);
nn += mxv3;
acx = amx*sfxy[nn];
acy = amx*sfxy[nn+1];
acz = amx*sfxy[nn+2];
mm = nn + 3;
dx += dyp*(dxp*sfxy[mm] + acx);
dy += dyp*(dxp*sfxy[mm+1] + acy);
dz += dyp*(dxp*sfxy[mm+2] + acz);
/* find magnetic field */
nn = nm;
ox = amx*sbxy[nn];
oy = amx*sbxy[nn+1];
oz = amx*sbxy[nn+2];
mm = nn + 3;
ox = amy*(dxp*sbxy[mm] + ox);
oy = amy*(dxp*sbxy[mm+1] + oy);
oz = amy*(dxp*sbxy[mm+2] + oz);
nn += mxv3;
acx = amx*sbxy[nn];
acy = amx*sbxy[nn+1];
acz = amx*sbxy[nn+2];
mm = nn + 3;
ox += dyp*(dxp*sbxy[mm] + acx);
oy += dyp*(dxp*sbxy[mm+1] + acy);
oz += dyp*(dxp*sbxy[mm+2] + acz);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[2+idimp*(j+npoff)] + dx;
acy = ppart[3+idimp*(j+npoff)] + dy;
acz = ppart[4+idimp*(j+npoff)] + dz;
/* time-centered kinetic energy */
sum1 += (acx*acx + acy*acy + acz*acz);
/* calculate cyclotron frequency */
omxt = qtmh*ox;
omyt = qtmh*oy;
omzt = qtmh*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0/(1.0 + omt);
omt = 0.5*(1.0 - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new velocity */
dx += (rot1*acx + rot2*acy + rot3*acz)*anorm;
dy += (rot4*acx + rot5*acy + rot6*acz)*anorm;
dz += (rot7*acx + rot8*acy + rot9*acz)*anorm;
ppart[2+idimp*(j+npoff)] = dx;
ppart[3+idimp*(j+npoff)] = dy;
ppart[4+idimp*(j+npoff)] = dz;
/* new position */
dx = x + dx*dtc;
dy = y + dy*dtc;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[idimp*(j+npoff)];
ppart[2+idimp*(j+npoff)] = -ppart[2+idimp*(j+npoff)];
}
if ((dy < edgely) || (dy >= edgery)) {
dy = ppart[1+idimp*(j+npoff)];
ppart[3+idimp*(j+npoff)] = -ppart[3+idimp*(j+npoff)];
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[idimp*(j+npoff)];
ppart[2+idimp*(j+npoff)] = -ppart[2+idimp*(j+npoff)];
}
}
/* set new position */
ppart[idimp*(j+npoff)] = dx;
ppart[1+idimp*(j+npoff)] = dy;
}
sum2 += sum1;
}
/* normalize kinetic energy */
*ek += 0.5*sum2;
return;
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cgbppushf23l(float ppart[], float fxy[], float bxy[], int kpic[],
int ncl[], int ihole[], float qbm, float dt,
float dtc, float *ek, int idimp, int nppmx, int nx,
int ny, int mx, int my, int nxv, int nyv, int mx1,
int mxy1, int ntmax, int *irc) {
/* for 2-1/2d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, with magnetic field. Using the Boris Mover.
with periodic boundary conditions.
also determines list of particles which are leaving this tile
OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
119 flops/particle, 1 divide, 29 loads, 5 stores
input: all except ncl, ihole, irc, output: ppart, ncl, ihole, irc, ek
velocity equations used are:
vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fx(x(t),y(t))*dt)
vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fy(x(t),y(t))*dt)
vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fz(x(t),y(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t)), omy = (q/m)*by(x(t),y(t)), and
omz = (q/m)*bz(x(t),y(t)).
position equations used are:
x(t+dt)=x(t) + vx(t+dt/2)*dt
y(t+dt)=y(t) + vy(t+dt/2)*dt
fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t))
bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t))
are approximated by interpolation from the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y)
ppart[m][n][0] = position x of particle n in tile m
ppart[m][n][1] = position y of particle n in tile m
ppart[m][n][2] = x velocity of particle n in tile m
ppart[m][n][3] = y velocity of particle n in tile m
ppart[m][n][4] = z velocity of particle n in tile m
fxy[k][j][0] = x component of force/charge at grid (j,k)
fxy[k][j][1] = y component of force/charge at grid (j,k)
fxy[k][j][2] = z component of force/charge at grid (j,k)
that is, convolution of electric field over particle shape
bxy[k][j][0] = x component of magnetic field at grid (j,k)
bxy[k][j][1] = y component of magnetic field at grid (j,k)
bxy[k][j][2] = z component of magnetic field at grid (j,k)
that is, the convolution of magnetic field over particle shape
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = destination of particle leaving hole
ihole[k][0][0] = ih, number of holes left (error, if negative)
qbm = particle charge/mass ratio
dt = time interval between successive calculations
dtc = time interval between successive co-ordinate calculations
kinetic energy/mass at time t is also calculated, using
ek = .5*sum((vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)
idimp = size of phase space = 5
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of field arrays, must be >= nx+1
nyv = third dimension of field arrays, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
optimized version
local data */
#define MXV 33
#define MYV 33
int noff, moff, npoff, npp, mxv3;
int i, j, k, ih, nh, nn, mm, nm;
float qtmh, dxp, dyp, amx, amy, dx, dy, dz, ox, oy, oz;
float acx, acy, acz, omxt, omyt, omzt, omt, anorm;
float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float anx, any, edgelx, edgely, edgerx, edgery;
float x, y;
float sfxy[3*MXV*MYV], sbxy[3*MXV*MYV];
/* float sfxy[3*(mx+1)*(my+1)], sbxy[3*(mx+1)*(my+1)]; */
double sum1, sum2;
mxv3 = 3*(mx + 1);
qtmh = 0.5*qbm*dt;
anx = (float) nx;
any = (float) ny;
sum2 = 0.0;
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noff,moff,npp,npoff,nn,mm,nm,ih,nh,x,y,dxp,dyp,amx,amy, \
dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt,omyt,omzt,omt,anorm,rot1,rot2,rot3, \
rot4,rot5,rot6,rot7,rot8,rot9,edgelx,edgely,edgerx,edgery,sum1,sfxy, \
sbxy) \
reduction(+:sum2)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = nppmx*k;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
ih = 0;
nh = 0;
nn += 1;
mm += 1;
/* load local fields from global array */
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sfxy[3*i+mxv3*j] = fxy[3*(i+noff+nxv*(j+moff))];
sfxy[1+3*i+mxv3*j] = fxy[1+3*(i+noff+nxv*(j+moff))];
sfxy[2+3*i+mxv3*j] = fxy[2+3*(i+noff+nxv*(j+moff))];
}
}
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sbxy[3*i+mxv3*j] = bxy[3*(i+noff+nxv*(j+moff))];
sbxy[1+3*i+mxv3*j] = bxy[1+3*(i+noff+nxv*(j+moff))];
sbxy[2+3*i+mxv3*j] = bxy[2+3*(i+noff+nxv*(j+moff))];
}
}
/* clear counters */
for (j = 0; j < 8; j++) {
ncl[j+8*k] = 0;
}
sum1 = 0.0;
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
/* find interpolation weights */
x = ppart[idimp*(j+npoff)];
y = ppart[1+idimp*(j+npoff)];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nm = 3*(nn - noff) + mxv3*(mm - moff);
amx = 1.0 - dxp;
amy = 1.0 - dyp;
/* find electric field */
nn = nm;
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dz = amx*sfxy[nn+2];
mm = nn + 3;
dx = amy*(dxp*sfxy[mm] + dx);
dy = amy*(dxp*sfxy[mm+1] + dy);
dz = amy*(dxp*sfxy[mm+2] + dz);
nn += mxv3;
acx = amx*sfxy[nn];
acy = amx*sfxy[nn+1];
acz = amx*sfxy[nn+2];
mm = nn + 3;
dx += dyp*(dxp*sfxy[mm] + acx);
dy += dyp*(dxp*sfxy[mm+1] + acy);
dz += dyp*(dxp*sfxy[mm+2] + acz);
/* find magnetic field */
nn = nm;
ox = amx*sbxy[nn];
oy = amx*sbxy[nn+1];
oz = amx*sbxy[nn+2];
mm = nn + 3;
ox = amy*(dxp*sbxy[mm] + ox);
oy = amy*(dxp*sbxy[mm+1] + oy);
oz = amy*(dxp*sbxy[mm+2] + oz);
nn += mxv3;
acx = amx*sbxy[nn];
acy = amx*sbxy[nn+1];
acz = amx*sbxy[nn+2];
mm = nn + 3;
ox += dyp*(dxp*sbxy[mm] + acx);
oy += dyp*(dxp*sbxy[mm+1] + acy);
oz += dyp*(dxp*sbxy[mm+2] + acz);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[2+idimp*(j+npoff)] + dx;
acy = ppart[3+idimp*(j+npoff)] + dy;
acz = ppart[4+idimp*(j+npoff)] + dz;
/* time-centered kinetic energy */
sum1 += (acx*acx + acy*acy + acz*acz);
/* calculate cyclotron frequency */
omxt = qtmh*ox;
omyt = qtmh*oy;
omzt = qtmh*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0/(1.0 + omt);
omt = 0.5*(1.0 - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new velocity */
dx += (rot1*acx + rot2*acy + rot3*acz)*anorm;
dy += (rot4*acx + rot5*acy + rot6*acz)*anorm;
dz += (rot7*acx + rot8*acy + rot9*acz)*anorm;
ppart[2+idimp*(j+npoff)] = dx;
ppart[3+idimp*(j+npoff)] = dy;
ppart[4+idimp*(j+npoff)] = dz;
/* new position */
dx = x + dx*dtc;
dy = y + dy*dtc;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0;
}
else {
mm += 3;
}
}
/* set new position */
ppart[idimp*(j+npoff)] = dx;
ppart[1+idimp*(j+npoff)] = dy;
/* increment counters */
if (mm > 0) {
ncl[mm+8*k-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh = 1;
}
}
}
sum2 += sum1;
/* set error and end of file flag */
/* ihole overflow */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
/* normalize kinetic energy */
*ek += 0.5*sum2;
return;
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cgrbppush23l(float ppart[], float fxy[], float bxy[], int kpic[],
float qbm, float dt, float dtc, float ci, float *ek,
int idimp, int nppmx, int nx, int ny, int mx, int my,
int nxv, int nyv, int mx1, int mxy1, int ipbc) {
/* for 2-1/2d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, for relativistic particles with magnetic field
Using the Boris Mover.
OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
131 flops/particle, 4 divides, 2 sqrts, 25 loads, 5 stores
input: all, output: ppart, ek
momentum equations used are:
px(t+dt/2) = rot(1)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(2)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(3)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fx(x(t),y(t))*dt)
py(t+dt/2) = rot(4)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(5)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(6)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fy(x(t),y(t))*dt)
pz(t+dt/2) = rot(7)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(8)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(9)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fz(x(t),y(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t))*gami, omy = (q/m)*by(x(t),y(t))*gami, and
omz = (q/m)*bz(x(t),y(t))*gami,
where gami = 1./sqrt(1.+(px(t)*px(t)+py(t)*py(t)+pz(t)*pz(t))*ci*ci)
position equations used are:
x(t+dt) = x(t) + px(t+dt/2)*dtg
y(t+dt) = y(t) + py(t+dt/2)*dtg
where dtg = dtc/sqrt(1.+(px(t+dt/2)*px(t+dt/2)+py(t+dt/2)*py(t+dt/2)+
pz(t+dt/2)*pz(t+dt/2))*ci*ci)
fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t))
bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t))
are approximated by interpolation from the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y)
ppart[m][n][0] = position x of particle n in tile m
ppart[m][n][1] = position y of particle n in tile m
ppart[m][n][2] = x momentum of particle n in tile m
ppart[m][n][3] = y momentum of particle n in tile m
ppart[m][n][4] = z momentum of particle n in tile m
fxy[k][j][0] = x component of force/charge at grid (j,k)
fxy[k][j][1] = y component of force/charge at grid (j,k)
fxy[k][j][2] = z component of force/charge at grid (j,k)
that is, convolution of electric field over particle shape
bxy[k][j][0] = x component of magnetic field at grid (j,k)
bxy[k][j][1] = y component of magnetic field at grid (j,k)
bxy[k][j][2] = z component of magnetic field at grid (j,k)
that is, the convolution of magnetic field over particle shape
kpic = number of particles per tile
qbm = particle charge/mass ratio
dt = time interval between successive calculations
dtc = time interval between successive co-ordinate calculations
ci = reciprocal of velocity of light
kinetic energy/mass at time t is also calculated, using
ek = gami*sum((px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)/(1. + gami)
idimp = size of phase space = 5
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of field arrays, must be >= nx+1
nyv = third dimension of field arrays, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
local data */
#define MXV 33
#define MYV 33
int noff, moff, npoff, npp, mxv3;
int i, j, k, nn, mm, nm;
float qtmh, ci2, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy;
float dx, dy, dz, ox, oy, oz, acx, acy, acz, p2, gami, qtmg, dtg;
float omxt, omyt, omzt, omt, anorm;
float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float x, y;
float sfxy[3*MXV*MYV], sbxy[3*MXV*MYV];
/* float sfxy[3*(mx+1)*(my+1)], sbxy[3*(mx+1)*(my+1)]; */
double sum1, sum2;
mxv3 = 3*(mx + 1);
qtmh = 0.5*qbm*dt;
ci2 = ci*ci;
sum2 = 0.0;
/* set boundary values */
edgelx = 0.0;
edgely = 0.0;
edgerx = (float) nx;
edgery = (float) ny;
if (ipbc==2) {
edgelx = 1.0;
edgely = 1.0;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
else if (ipbc==3) {
edgelx = 1.0;
edgerx = (float) (nx-1);
}
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noff,moff,npp,npoff,nn,mm,nm,x,y,dxp,dyp,amx,amy,dx,dy, \
dz,ox,oy,oz,acx,acy,acz,omxt,omyt,omzt,omt,anorm,rot1,rot2,rot3,rot4, \
rot5,rot6,rot7,rot8,rot9,p2,gami,qtmg,dtg,sum1,sfxy,sbxy) \
reduction(+:sum2)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = nppmx*k;
/* load local fields from global array */
nn = (mx < nx-noff ? mx : nx-noff) + 1;
mm = (my < ny-moff ? my : ny-moff) + 1;
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sfxy[3*i+mxv3*j] = fxy[3*(i+noff+nxv*(j+moff))];
sfxy[1+3*i+mxv3*j] = fxy[1+3*(i+noff+nxv*(j+moff))];
sfxy[2+3*i+mxv3*j] = fxy[2+3*(i+noff+nxv*(j+moff))];
}
}
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sbxy[3*i+mxv3*j] = bxy[3*(i+noff+nxv*(j+moff))];
sbxy[1+3*i+mxv3*j] = bxy[1+3*(i+noff+nxv*(j+moff))];
sbxy[2+3*i+mxv3*j] = bxy[2+3*(i+noff+nxv*(j+moff))];
}
}
sum1 = 0.0;
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
/* find interpolation weights */
x = ppart[idimp*(j+npoff)];
y = ppart[1+idimp*(j+npoff)];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nm = 3*(nn - noff) + mxv3*(mm - moff);
amx = 1.0 - dxp;
amy = 1.0 - dyp;
/* find electric field */
nn = nm;
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dz = amx*sfxy[nn+2];
mm = nn + 3;
dx = amy*(dxp*sfxy[mm] + dx);
dy = amy*(dxp*sfxy[mm+1] + dy);
dz = amy*(dxp*sfxy[mm+2] + dz);
nn += mxv3;
acx = amx*sfxy[nn];
acy = amx*sfxy[nn+1];
acz = amx*sfxy[nn+2];
mm = nn + 3;
dx += dyp*(dxp*sfxy[mm] + acx);
dy += dyp*(dxp*sfxy[mm+1] + acy);
dz += dyp*(dxp*sfxy[mm+2] + acz);
/* find magnetic field */
nn = nm;
ox = amx*sbxy[nn];
oy = amx*sbxy[nn+1];
oz = amx*sbxy[nn+2];
mm = nn + 3;
ox = amy*(dxp*sbxy[mm] + ox);
oy = amy*(dxp*sbxy[mm+1] + oy);
oz = amy*(dxp*sbxy[mm+2] + oz);
nn += mxv3;
acx = amx*sbxy[nn];
acy = amx*sbxy[nn+1];
acz = amx*sbxy[nn+2];
mm = nn + 3;
ox += dyp*(dxp*sbxy[mm] + acx);
oy += dyp*(dxp*sbxy[mm+1] + acy);
oz += dyp*(dxp*sbxy[mm+2] + acz);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[2+idimp*(j+npoff)] + dx;
acy = ppart[3+idimp*(j+npoff)] + dy;
acz = ppart[4+idimp*(j+npoff)] + dz;
/* find inverse gamma */
p2 = acx*acx + acy*acy + acz*acz;
gami = 1.0/sqrtf(1.0 + p2*ci2);
/* renormalize magnetic field */
qtmg = qtmh*gami;
/* time-centered kinetic energy */
sum1 += gami*p2/(1.0 + gami);
/* calculate cyclotron frequency */
omxt = qtmg*ox;
omyt = qtmg*oy;
omzt = qtmg*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0/(1.0 + omt);
omt = 0.5*(1.0 - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new velocity */
dx += (rot1*acx + rot2*acy + rot3*acz)*anorm;
dy += (rot4*acx + rot5*acy + rot6*acz)*anorm;
dz += (rot7*acx + rot8*acy + rot9*acz)*anorm;
ppart[2+idimp*(j+npoff)] = dx;
ppart[3+idimp*(j+npoff)] = dy;
ppart[4+idimp*(j+npoff)] = dz;
/* update inverse gamma */
p2 = dx*dx + dy*dy + dz*dz;
dtg = dtc/sqrtf(1.0 + p2*ci2);
/* new position */
dx = x + dx*dtg;
dy = y + dy*dtg;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[idimp*(j+npoff)];
ppart[2+idimp*(j+npoff)] = -ppart[2+idimp*(j+npoff)];
}
if ((dy < edgely) || (dy >= edgery)) {
dy = ppart[1+idimp*(j+npoff)];
ppart[3+idimp*(j+npoff)] = -ppart[3+idimp*(j+npoff)];
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[idimp*(j+npoff)];
ppart[2+idimp*(j+npoff)] = -ppart[2+idimp*(j+npoff)];
}
}
/* set new position */
ppart[idimp*(j+npoff)] = dx;
ppart[1+idimp*(j+npoff)] = dy;
}
sum2 += sum1;
}
/* normalize kinetic energy */
*ek += sum2;
return;
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cgrbppushf23l(float ppart[], float fxy[], float bxy[], int kpic[],
int ncl[], int ihole[], float qbm, float dt,
float dtc, float ci, float *ek, int idimp, int nppmx,
int nx, int ny, int mx, int my, int nxv, int nyv,
int mx1, int mxy1, int ntmax, int *irc) {
/* for 2-1/2d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, for relativistic particles with magnetic field
with periodic boundary conditions.
Using the Boris Mover.
also determines list of particles which are leaving this tile
OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
131 flops/particle, 4 divides, 2 sqrts, 25 loads, 5 stores
input: all except ncl, ihole, irc, output: ppart, ncl, ihole, irc, ek
momentum equations used are:
px(t+dt/2) = rot(1)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(2)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(3)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fx(x(t),y(t))*dt)
py(t+dt/2) = rot(4)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(5)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(6)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fy(x(t),y(t))*dt)
pz(t+dt/2) = rot(7)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(8)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(9)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fz(x(t),y(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t))*gami, omy = (q/m)*by(x(t),y(t))*gami, and
omz = (q/m)*bz(x(t),y(t))*gami,
where gami = 1./sqrt(1.+(px(t)*px(t)+py(t)*py(t)+pz(t)*pz(t))*ci*ci)
position equations used are:
x(t+dt) = x(t) + px(t+dt/2)*dtg
y(t+dt) = y(t) + py(t+dt/2)*dtg
where dtg = dtc/sqrt(1.+(px(t+dt/2)*px(t+dt/2)+py(t+dt/2)*py(t+dt/2)+
pz(t+dt/2)*pz(t+dt/2))*ci*ci)
fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t))
bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t))
are approximated by interpolation from the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y)
ppart[m][n][0] = position x of particle n in tile m
ppart[m][n][1] = position y of particle n in tile m
ppart[m][n][2] = x momentum of particle n in tile m
ppart[m][n][3] = y momentum of particle n in tile m
ppart[m][n][4] = z momentum of particle n in tile m
fxy[k][j][0] = x component of force/charge at grid (j,k)
fxy[k][j][1] = y component of force/charge at grid (j,k)
fxy[k][j][2] = z component of force/charge at grid (j,k)
that is, convolution of electric field over particle shape
bxy[k][j][0] = x component of magnetic field at grid (j,k)
bxy[k][j][1] = y component of magnetic field at grid (j,k)
bxy[k][j][2] = z component of magnetic field at grid (j,k)
that is, the convolution of magnetic field over particle shape
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = destination of particle leaving hole
ihole[k][0][0] = ih, number of holes left (error, if negative)
qbm = particle charge/mass ratio
dt = time interval between successive calculations
dtc = time interval between successive co-ordinate calculations
ci = reciprocal of velocity of light
kinetic energy/mass at time t is also calculated, using
ek = gami*sum((px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)/(1. + gami)
idimp = size of phase space = 5
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of field arrays, must be >= nx+1
nyv = third dimension of field arrays, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
optimized version
local data */
#define MXV 33
#define MYV 33
int noff, moff, npoff, npp, mxv3;
int i, j, k, ih, nh, nn, mm, nm;
float qtmh, ci2, dxp, dyp, amx, amy, dx, dy, dz, ox, oy, oz;
float acx, acy, acz, p2, gami, qtmg, dtg, omxt, omyt, omzt, omt;
float anorm, rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float anx, any, edgelx, edgely, edgerx, edgery;
float x, y;
float sfxy[3*MXV*MYV], sbxy[3*MXV*MYV];
/* float sfxy[3*(mx+1)*(my+1)], sbxy[3*(mx+1)*(my+1)]; */
double sum1, sum2;
mxv3 = 3*(mx + 1);
qtmh = 0.5*qbm*dt;
ci2 = ci*ci;
anx = (float) nx;
any = (float) ny;
sum2 = 0.0;
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noff,moff,npp,npoff,nn,mm,nm,ih,nh,x,y,dxp,dyp,amx,amy, \
dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt,omyt,omzt,omt,anorm,rot1,rot2,rot3, \
rot4,rot5,rot6,rot7,rot8,rot9,edgelx,edgely,edgerx,edgery,p2,gami, \
qtmg,dtg,sum1,sfxy,sbxy) \
reduction(+:sum2)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = nppmx*k;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
ih = 0;
nh = 0;
nn += 1;
mm += 1;
/* load local fields from global array */
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sfxy[3*i+mxv3*j] = fxy[3*(i+noff+nxv*(j+moff))];
sfxy[1+3*i+mxv3*j] = fxy[1+3*(i+noff+nxv*(j+moff))];
sfxy[2+3*i+mxv3*j] = fxy[2+3*(i+noff+nxv*(j+moff))];
}
}
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sbxy[3*i+mxv3*j] = bxy[3*(i+noff+nxv*(j+moff))];
sbxy[1+3*i+mxv3*j] = bxy[1+3*(i+noff+nxv*(j+moff))];
sbxy[2+3*i+mxv3*j] = bxy[2+3*(i+noff+nxv*(j+moff))];
}
}
/* clear counters */
for (j = 0; j < 8; j++) {
ncl[j+8*k] = 0;
}
sum1 = 0.0;
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
/* find interpolation weights */
x = ppart[idimp*(j+npoff)];
y = ppart[1+idimp*(j+npoff)];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nm = 3*(nn - noff) + mxv3*(mm - moff);
amx = 1.0 - dxp;
amy = 1.0 - dyp;
/* find electric field */
nn = nm;
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dz = amx*sfxy[nn+2];
mm = nn + 3;
dx = amy*(dxp*sfxy[mm] + dx);
dy = amy*(dxp*sfxy[mm+1] + dy);
dz = amy*(dxp*sfxy[mm+2] + dz);
nn += mxv3;
acx = amx*sfxy[nn];
acy = amx*sfxy[nn+1];
acz = amx*sfxy[nn+2];
mm = nn + 3;
dx += dyp*(dxp*sfxy[mm] + acx);
dy += dyp*(dxp*sfxy[mm+1] + acy);
dz += dyp*(dxp*sfxy[mm+2] + acz);
/* find magnetic field */
nn = nm;
ox = amx*sbxy[nn];
oy = amx*sbxy[nn+1];
oz = amx*sbxy[nn+2];
mm = nn + 3;
ox = amy*(dxp*sbxy[mm] + ox);
oy = amy*(dxp*sbxy[mm+1] + oy);
oz = amy*(dxp*sbxy[mm+2] + oz);
nn += mxv3;
acx = amx*sbxy[nn];
acy = amx*sbxy[nn+1];
acz = amx*sbxy[nn+2];
mm = nn + 3;
ox += dyp*(dxp*sbxy[mm] + acx);
oy += dyp*(dxp*sbxy[mm+1] + acy);
oz += dyp*(dxp*sbxy[mm+2] + acz);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[2+idimp*(j+npoff)] + dx;
acy = ppart[3+idimp*(j+npoff)] + dy;
acz = ppart[4+idimp*(j+npoff)] + dz;
/* find inverse gamma */
p2 = acx*acx + acy*acy + acz*acz;
gami = 1.0/sqrtf(1.0 + p2*ci2);
/* renormalize magnetic field */
qtmg = qtmh*gami;
/* time-centered kinetic energy */
sum1 += gami*p2/(1.0 + gami);
/* calculate cyclotron frequency */
omxt = qtmg*ox;
omyt = qtmg*oy;
omzt = qtmg*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0/(1.0 + omt);
omt = 0.5*(1.0 - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new momentum */
dx += (rot1*acx + rot2*acy + rot3*acz)*anorm;
dy += (rot4*acx + rot5*acy + rot6*acz)*anorm;
dz += (rot7*acx + rot8*acy + rot9*acz)*anorm;
ppart[2+idimp*(j+npoff)] = dx;
ppart[3+idimp*(j+npoff)] = dy;
ppart[4+idimp*(j+npoff)] = dz;
/* update inverse gamma */
p2 = dx*dx + dy*dy + dz*dz;
dtg = dtc/sqrtf(1.0 + p2*ci2);
/* new position */
dx = x + dx*dtg;
dy = y + dy*dtg;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0;
}
else {
mm += 3;
}
}
/* set new position */
ppart[idimp*(j+npoff)] = dx;
ppart[1+idimp*(j+npoff)] = dy;
/* increment counters */
if (mm > 0) {
ncl[mm+8*k-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh = 1;
}
}
}
sum2 += sum1;
/* set error and end of file flag */
/* ihole overflow */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
/* normalize kinetic energy */
*ek += sum2;
return;
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cgppost2l(float ppart[], float q[], int kpic[], float qm,
int nppmx, int idimp, int mx, int my, int nxv, int nyv,
int mx1, int mxy1) {
/* for 2d code, this subroutine calculates particle charge density
using first-order linear interpolation, periodic boundaries
OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
17 flops/particle, 6 loads, 4 stores
input: all, output: q
charge density is approximated by values at the nearest grid points
q(n,m)=qm*(1.-dx)*(1.-dy)
q(n+1,m)=qm*dx*(1.-dy)
q(n,m+1)=qm*(1.-dx)*dy
q(n+1,m+1)=qm*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
ppart[m][n][0] = position x of particle n in tile m
ppart[m][n][1] = position y of particle n in tile m
q[k][j] = charge density at grid point j,k
kpic = number of particles per tile
qm = charge on particle, in units of e
nppmx = maximum number of particles in tile
idimp = size of phase space = 4
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of charge array, must be >= nx+1
nyv = second dimension of charge array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
local data */
#define MXV 33
#define MYV 33
int noff, moff, npoff, npp, mxv;
int i, j, k, nn, mm;
float x, y, dxp, dyp, amx, amy;
float sq[MXV*MYV];
/* float sq[(mx+1)*(my+1)]; */
mxv = mx + 1;
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noff,moff,npp,npoff,nn,mm,x,y,dxp,dyp,amx,amy,sq)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = nppmx*k;
/* zero out local accumulator */
for (j = 0; j < mxv*(my+1); j++) {
sq[j] = 0.0f;
}
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
/* find interpolation weights */
x = ppart[idimp*(j+npoff)];
y = ppart[1+idimp*(j+npoff)];
nn = x;
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
nn = nn - noff + mxv*(mm - moff);
amx = qm - dxp;
amy = 1.0f - dyp;
/* deposit charge within tile to local accumulator */
x = sq[nn] + amx*amy;
y = sq[nn+1] + dxp*amy;
sq[nn] = x;
sq[nn+1] = y;
nn += mxv;
x = sq[nn] + amx*dyp;
y = sq[nn+1] + dxp*dyp;
sq[nn] = x;
sq[nn+1] = y;
}
/* deposit charge to interior points in global array */
nn = nxv - noff;
mm = nyv - moff;
nn = mx < nn ? mx : nn;
mm = my < mm ? my : mm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
q[i+noff+nxv*(j+moff)] += sq[i+mxv*j];
}
}
/* deposit charge to edge points in global array */
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noff+nxv*moff] += sq[i];
if (mm > my) {
#pragma omp atomic
q[i+noff+nxv*(mm+moff-1)] += sq[i+mxv*(mm-1)];
}
}
nn = nxv - noff;
nn = mx+1 < nn ? mx+1 : nn;
for (j = 0; j < mm; j++) {
#pragma omp atomic
q[noff+nxv*(j+moff)] += sq[mxv*j];
if (nn > mx) {
#pragma omp atomic
q[nn+noff-1+nxv*(j+moff)] += sq[nn-1+mxv*j];
}
}
}
return;
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cgjppost2l(float ppart[], float cu[], int kpic[], float qm,
float dt, int nppmx, int idimp, int nx, int ny, int mx,
int my, int nxv, int nyv, int mx1, int mxy1, int ipbc) {
/* for 2-1/2d code, this subroutine calculates particle current density
using first-order linear interpolation
in addition, particle positions are advanced a half time-step
OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
41 flops/particle, 17 loads, 14 stores
input: all, output: ppart, cu
current density is approximated by values at the nearest grid points
cu(i,n,m)=qci*(1.-dx)*(1.-dy)
cu(i,n+1,m)=qci*dx*(1.-dy)
cu(i,n,m+1)=qci*(1.-dx)*dy
cu(i,n+1,m+1)=qci*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
and qci = qm*vi, where i = x,y,z
ppart[m][n][0] = position x of particle n in tile m
ppart[m][n][1] = position y of particle n in tile m
ppart[m][n][2] = x velocity of particle n in tile m
ppart[m][n][3] = y velocity of particle n in tile m
ppart[m][n][4] = z velocity of particle n in tile m
cu[k][j][i] = ith component of current density at grid point j,k
kpic = number of particles per tile
qm = charge on particle, in units of e
dt = time interval between successive calculations
nppmx = maximum number of particles in tile
idimp = size of phase space = 5
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of current array, must be >= nx+1
nyv = second dimension of current array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
local data */
#define MXV 33
#define MYV 33
int noff, moff, npoff, npp, mxv3;
int i, j, k, nn, mm;
float edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy;
float x, y, dx, dy, vx, vy, vz;
float scu[3*MXV*MYV];
/* float scu[3*(mx+1)*(my+1)]; */
mxv3 = 3*(mx + 1);
/* set boundary values */
edgelx = 0.0;
edgely = 0.0;
edgerx = (float) nx;
edgery = (float) ny;
if (ipbc==2) {
edgelx = 1.0;
edgely = 1.0;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
else if (ipbc==3) {
edgelx = 1.0;
edgerx = (float) (nx-1);
}
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noff,moff,npp,npoff,nn,mm,x,y,dxp,dyp,amx,amy,dx,dy,vx, \
vy,vz,scu)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = nppmx*k;
/* zero out local accumulator */
for (j = 0; j < mxv3*(my+1); j++) {
scu[j] = 0.0f;
}
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
/* find interpolation weights */
x = ppart[idimp*(j+npoff)];
y = ppart[1+idimp*(j+npoff)];
nn = x;
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
nn = 3*(nn - noff) + mxv3*(mm - moff);
amx = qm - dxp;
amy = 1.0 - dyp;
/* deposit current */
dx = amx*amy;
dy = dxp*amy;
vx = ppart[2+idimp*(j+npoff)];
vy = ppart[3+idimp*(j+npoff)];
vz = ppart[4+idimp*(j+npoff)];
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = amx*dyp;
mm = nn + 3;
scu[mm] += vx*dy;
scu[mm+1] += vy*dy;
scu[mm+2] += vz*dy;
dy = dxp*dyp;
nn += mxv3;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
mm = nn + 3;
scu[mm] += vx*dy;
scu[mm+1] += vy*dy;
scu[mm+2] += vz*dy;
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[idimp*(j+npoff)];
ppart[2+idimp*(j+npoff)] = -ppart[2+idimp*(j+npoff)];
}
if ((dy < edgely) || (dy >= edgery)) {
dy = ppart[1+idimp*(j+npoff)];
ppart[3+idimp*(j+npoff)] = -ppart[3+idimp*(j+npoff)];
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[idimp*(j+npoff)];
ppart[2+idimp*(j+npoff)] = -ppart[2+idimp*(j+npoff)];
}
}
/* set new position */
ppart[idimp*(j+npoff)] = dx;
ppart[1+idimp*(j+npoff)] = dy;
}
/* deposit current to interior points in global array */
nn = nxv - noff;
mm = nyv - moff;
nn = mx < nn ? mx : nn;
mm = my < mm ? my : mm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
cu[3*(i+noff+nxv*(j+moff))] += scu[3*i+mxv3*j];
cu[1+3*(i+noff+nxv*(j+moff))] += scu[1+3*i+mxv3*j];
cu[2+3*(i+noff+nxv*(j+moff))] += scu[2+3*i+mxv3*j];
}
}
/* deposit current to edge points in global array */
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[3*(i+noff+nxv*moff)] += scu[3*i];
#pragma omp atomic
cu[1+3*(i+noff+nxv*moff)] += scu[1+3*i];
#pragma omp atomic
cu[2+3*(i+noff+nxv*moff)] += scu[2+3*i];
if (mm > my) {
#pragma omp atomic
cu[3*(i+noff+nxv*(mm+moff-1))] += scu[3*i+mxv3*(mm-1)];
#pragma omp atomic
cu[1+3*(i+noff+nxv*(mm+moff-1))] += scu[1+3*i+mxv3*(mm-1)];
#pragma omp atomic
cu[2+3*(i+noff+nxv*(mm+moff-1))] += scu[2+3*i+mxv3*(mm-1)];
}
}
nn = nxv - noff;
nn = mx+1 < nn ? mx+1 : nn;
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[3*(noff+nxv*(j+moff))] += scu[mxv3*j];
#pragma omp atomic
cu[1+3*(noff+nxv*(j+moff))] += scu[1+mxv3*j];
#pragma omp atomic
cu[2+3*(noff+nxv*(j+moff))] += scu[2+mxv3*j];
if (nn > mx) {
#pragma omp atomic
cu[3*(nn+noff-1+nxv*(j+moff))] += scu[3*(nn-1)+mxv3*j];
#pragma omp atomic
cu[1+3*(nn+noff-1+nxv*(j+moff))] += scu[1+3*(nn-1)+mxv3*j];
#pragma omp atomic
cu[2+3*(nn+noff-1+nxv*(j+moff))] += scu[2+3*(nn-1)+mxv3*j];
}
}
}
return;
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cgjppostf2l(float ppart[], float cu[], int kpic[], int ncl[],
int ihole[], float qm, float dt, int nppmx, int idimp,
int nx, int ny, int mx, int my, int nxv, int nyv,
int mx1, int mxy1, int ntmax, int *irc) {
/* for 2-1/2d code, this subroutine calculates particle current density
using first-order linear interpolation
in addition, particle positions are advanced a half time-step
with periodic boundary conditions.
also determines list of particles which are leaving this tile
OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
41 flops/particle, 17 loads, 14 stores
input: all except ncl, ihole, irc,
output: ppart, cu, ncl, ihole, irc
current density is approximated by values at the nearest grid points
cu(i,n,m)=qci*(1.-dx)*(1.-dy)
cu(i,n+1,m)=qci*dx*(1.-dy)
cu(i,n,m+1)=qci*(1.-dx)*dy
cu(i,n+1,m+1)=qci*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
and qci = qm*vi, where i = x,y,z
ppart[m][n][0] = position x of particle n in tile m
ppart[m][n][1] = position y of particle n in tile m
ppart[m][n][2] = x velocity of particle n in tile m
ppart[m][n][3] = y velocity of particle n in tile m
ppart[m][n][4] = z velocity of particle n in tile m
cu[k][j][i] = ith component of current density at grid point j,k
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = destination of particle leaving hole
ihole[k][0][0] = ih, number of holes left (error, if negative)
qm = charge on particle, in units of e
dt = time interval between successive calculations
nppmx = maximum number of particles in tile
idimp = size of phase space = 5
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of current array, must be >= nx+1
nyv = second dimension of current array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
optimized version
local data */
#define MXV 33
#define MYV 33
int noff, moff, npoff, npp;
int i, j, k, ih, nh, nn, mm, mxv3;
float dxp, dyp, amx, amy;
float x, y, dx, dy, vx, vy, vz;
float anx, any, edgelx, edgely, edgerx, edgery;
float scu[3*MXV*MYV];
/* float scu[3*(mx+1)*(my+1)]; */
mxv3 = 3*(mx + 1);
anx = (float) nx;
any = (float) ny;
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noff,moff,npp,npoff,nn,mm,ih,nh,x,y,dxp,dyp,amx,amy,dx, \
dy,vx,vy,vz,edgelx,edgely,edgerx,edgery,scu)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = nppmx*k;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
ih = 0;
nh = 0;
nn += 1;
mm += 1;
/* zero out local accumulator */
for (j = 0; j < mxv3*(my+1); j++) {
scu[j] = 0.0f;
}
/* clear counters */
for (j = 0; j < 8; j++) {
ncl[j+8*k] = 0;
}
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
/* find interpolation weights */
x = ppart[idimp*(j+npoff)];
y = ppart[1+idimp*(j+npoff)];
nn = x;
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
nn = 3*(nn - noff) + mxv3*(mm - moff);
amx = qm - dxp;
amy = 1.0 - dyp;
/* deposit current */
dx = amx*amy;
dy = dxp*amy;
vx = ppart[2+idimp*(j+npoff)];
vy = ppart[3+idimp*(j+npoff)];
vz = ppart[4+idimp*(j+npoff)];
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = amx*dyp;
mm = nn + 3;
scu[mm] += vx*dy;
scu[mm+1] += vy*dy;
scu[mm+2] += vz*dy;
dy = dxp*dyp;
nn += mxv3;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
mm = nn + 3;
scu[mm] += vx*dy;
scu[mm+1] += vy*dy;
scu[mm+2] += vz*dy;
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0;
}
else {
mm += 3;
}
}
/* set new position */
ppart[idimp*(j+npoff)] = dx;
ppart[1+idimp*(j+npoff)] = dy;
/* increment counters */
if (mm > 0) {
ncl[mm+8*k-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh = 1;
}
}
}
/* deposit current to interior points in global array */
nn = nxv - noff;
mm = nyv - moff;
nn = mx < nn ? mx : nn;
mm = my < mm ? my : mm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
cu[3*(i+noff+nxv*(j+moff))] += scu[3*i+mxv3*j];
cu[1+3*(i+noff+nxv*(j+moff))] += scu[1+3*i+mxv3*j];
cu[2+3*(i+noff+nxv*(j+moff))] += scu[2+3*i+mxv3*j];
}
}
/* deposit current to edge points in global array */
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[3*(i+noff+nxv*moff)] += scu[3*i];
#pragma omp atomic
cu[1+3*(i+noff+nxv*moff)] += scu[1+3*i];
#pragma omp atomic
cu[2+3*(i+noff+nxv*moff)] += scu[2+3*i];
if (mm > my) {
#pragma omp atomic
cu[3*(i+noff+nxv*(mm+moff-1))] += scu[3*i+mxv3*(mm-1)];
#pragma omp atomic
cu[1+3*(i+noff+nxv*(mm+moff-1))] += scu[1+3*i+mxv3*(mm-1)];
#pragma omp atomic
cu[2+3*(i+noff+nxv*(mm+moff-1))] += scu[2+3*i+mxv3*(mm-1)];
}
}
nn = nxv - noff;
nn = mx+1 < nn ? mx+1 : nn;
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[3*(noff+nxv*(j+moff))] += scu[mxv3*j];
#pragma omp atomic
cu[1+3*(noff+nxv*(j+moff))] += scu[1+mxv3*j];
#pragma omp atomic
cu[2+3*(noff+nxv*(j+moff))] += scu[2+mxv3*j];
if (nn > mx) {
#pragma omp atomic
cu[3*(nn+noff-1+nxv*(j+moff))] += scu[3*(nn-1)+mxv3*j];
#pragma omp atomic
cu[1+3*(nn+noff-1+nxv*(j+moff))] += scu[1+3*(nn-1)+mxv3*j];
#pragma omp atomic
cu[2+3*(nn+noff-1+nxv*(j+moff))] += scu[2+3*(nn-1)+mxv3*j];
}
}
/* set error and end of file flag */
/* ihole overflow */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
return;
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cgrjppost2l(float ppart[], float cu[], int kpic[], float qm,
float dt, float ci, int nppmx, int idimp, int nx,
int ny, int mx, int my, int nxv, int nyv, int mx1,
int mxy1, int ipbc) {
/* for 2-1/2d code, this subroutine calculates particle current density
using first-order linear interpolation
in addition, particle positions are advanced a half time-step
OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
47 flops/particle, 1 divide, 1 sqrt, 17 loads, 14 stores
input: all, output: ppart, cu
current density is approximated by values at the nearest grid points
cu(i,n,m)=qci*(1.-dx)*(1.-dy)
cu(i,n+1,m)=qci*dx*(1.-dy)
cu(i,n,m+1)=qci*(1.-dx)*dy
cu(i,n+1,m+1)=qci*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
and qci = qm*pi*gami, where i = x,y,z
where gami = 1./sqrt(1.+sum(pi**2)*ci*ci)
ppart[m][n][0] = position x of particle n in tile m
ppart[m][n][1] = position y of particle n in tile m
ppart[m][n][2] = x momentum of particle n in tile m
ppart[m][n][3] = y momentum of particle n in tile m
ppart[m][n][4] = z momentum of particle n in tile m
cu[k][j][i] = ith component of current density at grid point j,k
kpic = number of particles per tile
qm = charge on particle, in units of e
dt = time interval between successive calculations
ci = reciprocal of velocity of light
nppmx = maximum number of particles in tile
idimp = size of phase space = 5
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of current array, must be >= nx+1
nyv = second dimension of current array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
local data */
#define MXV 33
#define MYV 33
int noff, moff, npoff, npp, mxv3;
int i, j, k, nn, mm;
float ci2, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy;
float x, y, dx, dy, vx, vy, vz, p2, gami;
float scu[3*MXV*MYV];
/* float scu[3*(mx+1)*(my+1)]; */
mxv3 = 3*(mx + 1);
ci2 = ci*ci;
/* set boundary values */
edgelx = 0.0;
edgely = 0.0;
edgerx = (float) nx;
edgery = (float) ny;
if (ipbc==2) {
edgelx = 1.0;
edgely = 1.0;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
else if (ipbc==3) {
edgelx = 1.0;
edgerx = (float) (nx-1);
}
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noff,moff,npp,npoff,nn,mm,x,y,dxp,dyp,amx,amy,dx,dy,vx, \
vy,vz,p2,gami,scu)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = nppmx*k;
/* zero out local accumulator */
for (j = 0; j < mxv3*(my+1); j++) {
scu[j] = 0.0f;
}
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
/* find interpolation weights */
x = ppart[idimp*(j+npoff)];
y = ppart[1+idimp*(j+npoff)];
nn = x;
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
/* find inverse gamma */
vx = ppart[2+idimp*(j+npoff)];
vy = ppart[3+idimp*(j+npoff)];
vz = ppart[4+idimp*(j+npoff)];
p2 = vx*vx + vy*vy + vz*vz;
gami = 1.0/sqrtf(1.0 + p2*ci2);
/* calculate weights */
nn = 3*(nn - noff) + mxv3*(mm - moff);
amx = qm - dxp;
amy = 1.0 - dyp;
/* deposit current */
dx = amx*amy;
dy = dxp*amy;
vx *= gami;
vy *= gami;
vz *= gami;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = amx*dyp;
mm = nn + 3;
scu[mm] += vx*dy;
scu[mm+1] += vy*dy;
scu[mm+2] += vz*dy;
dy = dxp*dyp;
nn += mxv3;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
mm = nn + 3;
scu[mm] += vx*dy;
scu[mm+1] += vy*dy;
scu[mm+2] += vz*dy;
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[idimp*(j+npoff)];
ppart[2+idimp*(j+npoff)] = -ppart[2+idimp*(j+npoff)];
}
if ((dy < edgely) || (dy >= edgery)) {
dy = ppart[1+idimp*(j+npoff)];
ppart[3+idimp*(j+npoff)] = -ppart[3+idimp*(j+npoff)];
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = ppart[idimp*(j+npoff)];
ppart[2+idimp*(j+npoff)] = -ppart[2+idimp*(j+npoff)];
}
}
/* set new position */
ppart[idimp*(j+npoff)] = dx;
ppart[1+idimp*(j+npoff)] = dy;
}
/* deposit current to interior points in global array */
nn = nxv - noff;
mm = nyv - moff;
nn = mx < nn ? mx : nn;
mm = my < mm ? my : mm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
cu[3*(i+noff+nxv*(j+moff))] += scu[3*i+mxv3*j];
cu[1+3*(i+noff+nxv*(j+moff))] += scu[1+3*i+mxv3*j];
cu[2+3*(i+noff+nxv*(j+moff))] += scu[2+3*i+mxv3*j];
}
}
/* deposit current to edge points in global array */
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[3*(i+noff+nxv*moff)] += scu[3*i];
#pragma omp atomic
cu[1+3*(i+noff+nxv*moff)] += scu[1+3*i];
#pragma omp atomic
cu[2+3*(i+noff+nxv*moff)] += scu[2+3*i];
if (mm > my) {
#pragma omp atomic
cu[3*(i+noff+nxv*(mm+moff-1))] += scu[3*i+mxv3*(mm-1)];
#pragma omp atomic
cu[1+3*(i+noff+nxv*(mm+moff-1))] += scu[1+3*i+mxv3*(mm-1)];
#pragma omp atomic
cu[2+3*(i+noff+nxv*(mm+moff-1))] += scu[2+3*i+mxv3*(mm-1)];
}
}
nn = nxv - noff;
nn = mx+1 < nn ? mx+1 : nn;
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[3*(noff+nxv*(j+moff))] += scu[mxv3*j];
#pragma omp atomic
cu[1+3*(noff+nxv*(j+moff))] += scu[1+mxv3*j];
#pragma omp atomic
cu[2+3*(noff+nxv*(j+moff))] += scu[2+mxv3*j];
if (nn > mx) {
#pragma omp atomic
cu[3*(nn+noff-1+nxv*(j+moff))] += scu[3*(nn-1)+mxv3*j];
#pragma omp atomic
cu[1+3*(nn+noff-1+nxv*(j+moff))] += scu[1+3*(nn-1)+mxv3*j];
#pragma omp atomic
cu[2+3*(nn+noff-1+nxv*(j+moff))] += scu[2+3*(nn-1)+mxv3*j];
}
}
}
return;
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cgrjppostf2l(float ppart[], float cu[], int kpic[], int ncl[],
int ihole[], float qm, float dt, float ci, int nppmx,
int idimp, int nx, int ny, int mx, int my, int nxv,
int nyv, int mx1, int mxy1, int ntmax, int *irc) {
/* for 2-1/2d code, this subroutine calculates particle current density
using first-order linear interpolation for relativistic particles
in addition, particle positions are advanced a half time-step
with periodic boundary conditions.
also determines list of particles which are leaving this tile
OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
47 flops/particle, 1 divide, 1 sqrt, 17 loads, 14 stores
input: all except ncl, ihole, irc,
output: ppart, cu, ncl, ihole, irc
current density is approximated by values at the nearest grid points
cu(i,n,m)=qci*(1.-dx)*(1.-dy)
cu(i,n+1,m)=qci*dx*(1.-dy)
cu(i,n,m+1)=qci*(1.-dx)*dy
cu(i,n+1,m+1)=qci*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
and qci = qm*pi*gami, where i = x,y,z
where gami = 1./sqrt(1.+sum(pi**2)*ci*ci)
ppart[m][n][0] = position x of particle n in tile m
ppart[m][n][1] = position y of particle n in tile m
ppart[m][n][2] = x momentum of particle n in tile m
ppart[m][n][3] = y momentum of particle n in tile m
ppart[m][n][4] = z momentum of particle n in tile m
cu[k][j][i] = ith component of current density at grid point j,k
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = destination of particle leaving hole
ihole[k][0][0] = ih, number of holes left (error, if negative)
qm = charge on particle, in units of e
dt = time interval between successive calculations
ci = reciprocal of velocity of light
nppmx = maximum number of particles in tile
idimp = size of phase space = 5
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of current array, must be >= nx+1
nyv = second dimension of current array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
optimized version
local data */
#define MXV 33
#define MYV 33
int noff, moff, npoff, npp;
int i, j, k, ih, nh, nn, mm, mxv3;
float ci2, dxp, dyp, amx, amy;
float x, y, dx, dy, vx, vy, vz, p2, gami;
float anx, any, edgelx, edgely, edgerx, edgery;
float scu[3*MXV*MYV];
/* float scu[3*(mx+1)*(my+1)]; */
mxv3 = 3*(mx + 1);
ci2 = ci*ci;
anx = (float) nx;
any = (float) ny;
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noff,moff,npp,npoff,nn,mm,ih,nh,x,y,dxp,dyp,amx,amy,dx, \
dy,vx,vy,vz,edgelx,edgely,edgerx,edgery,p2,gami,scu)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = nppmx*k;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
ih = 0;
nh = 0;
nn += 1;
mm += 1;
/* zero out local accumulator */
for (j = 0; j < mxv3*(my+1); j++) {
scu[j] = 0.0f;
}
/* clear counters */
for (j = 0; j < 8; j++) {
ncl[j+8*k] = 0;
}
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
/* find interpolation weights */
x = ppart[idimp*(j+npoff)];
y = ppart[1+idimp*(j+npoff)];
nn = x;
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
/* find inverse gamma */
vx = ppart[2+idimp*(j+npoff)];
vy = ppart[3+idimp*(j+npoff)];
vz = ppart[4+idimp*(j+npoff)];
p2 = vx*vx + vy*vy + vz*vz;
gami = 1.0/sqrtf(1.0 + p2*ci2);
/* calculate weights */
nn = 3*(nn - noff) + mxv3*(mm - moff);
amx = qm - dxp;
amy = 1.0 - dyp;
/* deposit current */
dx = amx*amy;
dy = dxp*amy;
vx *= gami;
vy *= gami;
vz *= gami;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = amx*dyp;
mm = nn + 3;
scu[mm] += vx*dy;
scu[mm+1] += vy*dy;
scu[mm+2] += vz*dy;
dy = dxp*dyp;
nn += mxv3;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
mm = nn + 3;
scu[mm] += vx*dy;
scu[mm+1] += vy*dy;
scu[mm+2] += vz*dy;
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0;
}
else {
mm += 3;
}
}
/* set new position */
ppart[idimp*(j+npoff)] = dx;
ppart[1+idimp*(j+npoff)] = dy;
/* increment counters */
if (mm > 0) {
ncl[mm+8*k-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh = 1;
}
}
}
/* deposit current to interior points in global array */
nn = nxv - noff;
mm = nyv - moff;
nn = mx < nn ? mx : nn;
mm = my < mm ? my : mm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
cu[3*(i+noff+nxv*(j+moff))] += scu[3*i+mxv3*j];
cu[1+3*(i+noff+nxv*(j+moff))] += scu[1+3*i+mxv3*j];
cu[2+3*(i+noff+nxv*(j+moff))] += scu[2+3*i+mxv3*j];
}
}
/* deposit current to edge points in global array */
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[3*(i+noff+nxv*moff)] += scu[3*i];
#pragma omp atomic
cu[1+3*(i+noff+nxv*moff)] += scu[1+3*i];
#pragma omp atomic
cu[2+3*(i+noff+nxv*moff)] += scu[2+3*i];
if (mm > my) {
#pragma omp atomic
cu[3*(i+noff+nxv*(mm+moff-1))] += scu[3*i+mxv3*(mm-1)];
#pragma omp atomic
cu[1+3*(i+noff+nxv*(mm+moff-1))] += scu[1+3*i+mxv3*(mm-1)];
#pragma omp atomic
cu[2+3*(i+noff+nxv*(mm+moff-1))] += scu[2+3*i+mxv3*(mm-1)];
}
}
nn = nxv - noff;
nn = mx+1 < nn ? mx+1 : nn;
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[3*(noff+nxv*(j+moff))] += scu[mxv3*j];
#pragma omp atomic
cu[1+3*(noff+nxv*(j+moff))] += scu[1+mxv3*j];
#pragma omp atomic
cu[2+3*(noff+nxv*(j+moff))] += scu[2+mxv3*j];
if (nn > mx) {
#pragma omp atomic
cu[3*(nn+noff-1+nxv*(j+moff))] += scu[3*(nn-1)+mxv3*j];
#pragma omp atomic
cu[1+3*(nn+noff-1+nxv*(j+moff))] += scu[1+3*(nn-1)+mxv3*j];
#pragma omp atomic
cu[2+3*(nn+noff-1+nxv*(j+moff))] += scu[2+3*(nn-1)+mxv3*j];
}
}
/* set error and end of file flag */
/* ihole overflow */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
return;
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cpporder2l(float ppart[], float ppbuff[], int kpic[], int ncl[],
int ihole[], int idimp, int nppmx, int nx, int ny,
int mx, int my, int mx1, int my1, int npbmx, int ntmax,
int *irc) {
/* this subroutine sorts particles by x,y grid in tiles of mx, my
linear interpolation, with periodic boundary conditions
tiles are assumed to be arranged in 2D linear memory
algorithm has 3 steps. first, one finds particles leaving tile and
stores their number in each directon, location, and destination in ncl
and ihole. second, a prefix scan of ncl is performed and departing
particles are buffered in ppbuff in direction order. finally, we copy
the incoming particles from other tiles into ppart.
input: all except ppbuff, ncl, ihole, irc
output: ppart, ppbuff, kpic, ncl, ihole, irc
ppart[k][n][0] = position x of particle n in tile k
ppart[k][n][1] = position y of particle n in tile k
ppbuff[k][n][i] = i co-ordinate of particle n in tile k
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = direction destination of particle leaving hole
all for tile k
ihole[k][0][0] = ih, number of holes left (error, if negative)
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
npbmx = size of buffer array ppbuff
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int mxy1, noff, moff, npp, ncoff;
int i, j, k, ii, kx, ky, ih, nh, ist, nn, mm, isum;
int ip, j1, j2, kxl, kxr, kk, kl, kr;
float anx, any, edgelx, edgely, edgerx, edgery, dx, dy;
int ks[8];
mxy1 = mx1*my1;
anx = (float) nx;
any = (float) ny;
/* find and count particles leaving tiles and determine destination */
/* update ppart, ihole, ncl */
/* loop over tiles */
#pragma omp parallel for \
private(j,k,noff,moff,npp,nn,mm,ih,nh,ist,dx,dy,edgelx,edgely,edgerx, \
edgery)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
ih = 0;
nh = 0;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
/* clear counters */
for (j = 0; j < 8; j++) {
ncl[j+8*k] = 0;
}
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
dx = ppart[idimp*(j+nppmx*k)];
dy = ppart[1+idimp*(j+nppmx*k)];
/* find particles going out of bounds */
ist = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* ist = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
ppart[idimp*(j+nppmx*k)] = dx - anx;
ist = 2;
}
else if (dx < edgelx) {
if (dx < 0.0) {
dx += anx;
if (dx < anx)
ist = 1;
else
dx = 0.0;
ppart[idimp*(j+nppmx*k)] = dx;
}
else {
ist = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
ppart[1+idimp*(j+nppmx*k)] = dy - any;
ist += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
ist += 3;
else
dy = 0.0;
ppart[1+idimp*(j+nppmx*k)] = dy;
}
else {
ist += 3;
}
}
if (ist > 0) {
ncl[ist+8*k-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = ist;
}
else {
nh = 1;
}
}
}
/* set error and end of file flag */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
/* ihole overflow */
if (*irc > 0)
return;
/* buffer particles that are leaving tile: update ppbuff, ncl */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,isum,ist,nh,ip,j1,ii)
for (k = 0; k < mxy1; k++) {
/* find address offset for ordered ppbuff array */
isum = 0;
for (j = 0; j < 8; j++) {
ist = ncl[j+8*k];
ncl[j+8*k] = isum;
isum += ist;
}
nh = ihole[2*(ntmax+1)*k];
ip = 0;
/* loop over particles leaving tile */
for (j = 0; j < nh; j++) {
/* buffer particles that are leaving tile, in direction order */
j1 = ihole[2*(j+1+(ntmax+1)*k)] - 1;
ist = ihole[1+2*(j+1+(ntmax+1)*k)];
ii = ncl[ist+8*k-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[i+idimp*(ii+npbmx*k)]
= ppart[i+idimp*(j1+nppmx*k)];
}
}
else {
ip = 1;
}
ncl[ist+8*k-1] = ii + 1;
}
/* set error */
if (ip > 0)
*irc = ncl[7+8*k];
}
/* ppbuff overflow */
if (*irc > 0)
return;
/* copy incoming particles from buffer into ppart: update ppart, kpic */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,ii,kk,npp,kx,ky,kl,kr,kxl,kxr,ih,nh,ncoff,ist,j1,j2,ip,ks)
for (k = 0; k < mxy1; k++) {
npp = kpic[k];
ky = k/mx1;
/* loop over tiles in y, assume periodic boundary conditions */
kk = ky*mx1;
/* find tile above */
kl = ky - 1;
if (kl < 0)
kl += my1;
kl = kl*mx1;
/* find tile below */
kr = ky + 1;
if (kr >= my1)
kr -= my1;
kr = kr*mx1;
/* loop over tiles in x, assume periodic boundary conditions */
kx = k - ky*mx1;
kxl = kx - 1;
if (kxl < 0)
kxl += mx1;
kxr = kx + 1;
if (kxr >= mx1)
kxr -= mx1;
/* find tile number for different directions */
ks[0] = kxr + kk;
ks[1] = kxl + kk;
ks[2] = kx + kr;
ks[3] = kxr + kr;
ks[4] = kxl + kr;
ks[5] = kx + kl;
ks[6] = kxr + kl;
ks[7] = kxl + kl;
/* loop over directions */
nh = ihole[2*(ntmax+1)*k];
ncoff = 0;
ih = 0;
ist = 0;
j1 = 0;
for (ii = 0; ii < 8; ii++) {
if (ii > 0)
ncoff = ncl[ii-1+8*ks[ii]];
/* ip = number of particles coming from direction ii */
ip = ncl[ii+8*ks[ii]] - ncoff;
for (j = 0; j < ip; j++) {
ih += 1;
/* insert incoming particles into holes */
if (ih <= nh) {
j1 = ihole[2*(ih+(ntmax+1)*k)] - 1;
}
/* place overflow at end of array */
else {
j1 = npp;
npp += 1;
}
if (j1 < nppmx) {
for (i = 0; i < idimp; i++) {
ppart[i+idimp*(j1+nppmx*k)]
= ppbuff[i+idimp*(j+ncoff+npbmx*ks[ii])];
}
}
else {
ist = 1;
}
}
}
/* set error */
if (ist > 0)
*irc = j1+1;
/* fill up remaining holes in particle array with particles from bottom */
if (ih < nh) {
ip = nh - ih;
for (j = 0; j < ip; j++) {
j1 = npp - j - 1;
j2 = ihole[2*(nh-j+(ntmax+1)*k)] - 1;
if (j1 > j2) {
/* move particle only if it is below current hole */
for (i = 0; i < idimp; i++) {
ppart[i+idimp*(j2+nppmx*k)]
= ppart[i+idimp*(j1+nppmx*k)];
}
}
}
npp -= ip;
}
kpic[k] = npp;
}
return;
}
/*--------------------------------------------------------------------*/
void cpporderf2l(float ppart[], float ppbuff[], int kpic[], int ncl[],
int ihole[], int idimp, int nppmx, int mx1, int my1,
int npbmx, int ntmax, int *irc) {
/* this subroutine sorts particles by x,y grid in tiles of mx, my
linear interpolation, with periodic boundary conditions
tiles are assumed to be arranged in 2D linear memory
the algorithm has 2 steps. first, a prefix scan of ncl is performed
and departing particles are buffered in ppbuff in direction order.
then we copy the incoming particles from other tiles into ppart.
it assumes that the number, location, and destination of particles
leaving a tile have been previously stored in ncl and ihole by the
cgppushf2l procedure.
input: all except ppbuff, irc
output: ppart, ppbuff, kpic, ncl, irc
ppart[k][n][0] = position x of particle n in tile k
ppart[k][n][1] = position y of particle n in tile k
ppbuff[k][n][i] = i co-ordinate of particle n in tile k
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = direction destination of particle leaving hole
all for tile k
ihole[k][0][0] = ih, number of holes left (error, if negative)
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
npbmx = size of buffer array ppbuff
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int mxy1, npp, ncoff;
int i, j, k, ii, kx, ky, ih, nh, ist, isum;
int ip, j1, j2, kxl, kxr, kk, kl, kr;
int ks[8];
mxy1 = mx1*my1;
/* buffer particles that are leaving tile: update ppbuff, ncl */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,isum,ist,nh,ip,j1,ii)
for (k = 0; k < mxy1; k++) {
/* find address offset for ordered ppbuff array */
isum = 0;
for (j = 0; j < 8; j++) {
ist = ncl[j+8*k];
ncl[j+8*k] = isum;
isum += ist;
}
nh = ihole[2*(ntmax+1)*k];
ip = 0;
/* loop over particles leaving tile */
for (j = 0; j < nh; j++) {
/* buffer particles that are leaving tile, in direction order */
j1 = ihole[2*(j+1+(ntmax+1)*k)] - 1;
ist = ihole[1+2*(j+1+(ntmax+1)*k)];
ii = ncl[ist+8*k-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[i+idimp*(ii+npbmx*k)]
= ppart[i+idimp*(j1+nppmx*k)];
}
}
else {
ip = 1;
}
ncl[ist+8*k-1] = ii + 1;
}
/* set error */
if (ip > 0)
*irc = ncl[7+8*k];
}
/* ppbuff overflow */
if (*irc > 0)
return;
/* copy incoming particles from buffer into ppart: update ppart, kpic */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,ii,kk,npp,kx,ky,kl,kr,kxl,kxr,ih,nh,ncoff,ist,j1,j2,ip,ks)
for (k = 0; k < mxy1; k++) {
npp = kpic[k];
ky = k/mx1;
/* loop over tiles in y, assume periodic boundary conditions */
kk = ky*mx1;
/* find tile above */
kl = ky - 1;
if (kl < 0)
kl += my1;
kl = kl*mx1;
/* find tile below */
kr = ky + 1;
if (kr >= my1)
kr -= my1;
kr = kr*mx1;
/* loop over tiles in x, assume periodic boundary conditions */
kx = k - ky*mx1;
kxl = kx - 1;
if (kxl < 0)
kxl += mx1;
kxr = kx + 1;
if (kxr >= mx1)
kxr -= mx1;
/* find tile number for different directions */
ks[0] = kxr + kk;
ks[1] = kxl + kk;
ks[2] = kx + kr;
ks[3] = kxr + kr;
ks[4] = kxl + kr;
ks[5] = kx + kl;
ks[6] = kxr + kl;
ks[7] = kxl + kl;
/* loop over directions */
nh = ihole[2*(ntmax+1)*k];
ncoff = 0;
ih = 0;
ist = 0;
j1 = 0;
for (ii = 0; ii < 8; ii++) {
if (ii > 0)
ncoff = ncl[ii-1+8*ks[ii]];
/* ip = number of particles coming from direction ii */
ip = ncl[ii+8*ks[ii]] - ncoff;
for (j = 0; j < ip; j++) {
ih += 1;
/* insert incoming particles into holes */
if (ih <= nh) {
j1 = ihole[2*(ih+(ntmax+1)*k)] - 1;
}
/* place overflow at end of array */
else {
j1 = npp;
npp += 1;
}
if (j1 < nppmx) {
for (i = 0; i < idimp; i++) {
ppart[i+idimp*(j1+nppmx*k)]
= ppbuff[i+idimp*(j+ncoff+npbmx*ks[ii])];
}
}
else {
ist = 1;
}
}
}
/* set error */
if (ist > 0)
*irc = j1+1;
/* fill up remaining holes in particle array with particles from bottom */
if (ih < nh) {
ip = nh - ih;
for (j = 0; j < ip; j++) {
j1 = npp - j - 1;
j2 = ihole[2*(nh-j+(ntmax+1)*k)] - 1;
if (j1 > j2) {
/* move particle only if it is below current hole */
for (i = 0; i < idimp; i++) {
ppart[i+idimp*(j2+nppmx*k)]
= ppart[i+idimp*(j1+nppmx*k)];
}
}
}
npp -= ip;
}
kpic[k] = npp;
}
return;
}
/*--------------------------------------------------------------------*/
void cbguard2l(float bxy[], int nx, int ny, int nxe, int nye) {
/* replicate extended periodic vector field bxy
linear interpolation
nx/ny = system length in x/y direction
nxe = first dimension of field arrays, must be >= nx+1
nxe = second dimension of field arrays, must be >= ny+1
local data */
int j, k;
/* copy edges of extended field */
for (k = 0; k < ny; k++) {
bxy[3*nx+3*nxe*k] = bxy[3*nxe*k];
bxy[1+3*nx+3*nxe*k] = bxy[1+3*nxe*k];
bxy[2+3*nx+3*nxe*k] = bxy[2+3*nxe*k];
}
for (j = 0; j < nx; j++) {
bxy[3*j+3*nxe*ny] = bxy[3*j];
bxy[1+3*j+3*nxe*ny] = bxy[1+3*j];
bxy[2+3*j+3*nxe*ny] = bxy[2+3*j];
}
bxy[3*nx+3*nxe*ny] = bxy[0];
bxy[1+3*nx+3*nxe*ny] = bxy[1];
bxy[2+3*nx+3*nxe*ny] = bxy[2];
return;
}
/*--------------------------------------------------------------------*/
void cacguard2l(float cu[], int nx, int ny, int nxe, int nye) {
/* accumulate extended periodic vector field cu
linear interpolation
nx/ny = system length in x/y direction
nxe = first dimension of field arrays, must be >= nx+1
nxe = second dimension of field arrays, must be >= ny+1
local data */
int j, k;
/* accumulate edges of extended field */
for (k = 0; k < ny; k++) {
cu[3*nxe*k] += cu[3*nx+3*nxe*k];
cu[1+3*nxe*k] += cu[1+3*nx+3*nxe*k];
cu[2+3*nxe*k] += cu[2+3*nx+3*nxe*k];
cu[3*nx+3*nxe*k] = 0.0;
cu[1+3*nx+3*nxe*k] = 0.0;
cu[2+3*nx+3*nxe*k] = 0.0;
}
for (j = 0; j < nx; j++) {
cu[3*j] += cu[3*j+3*nxe*ny];
cu[1+3*j] += cu[1+3*j+3*nxe*ny];
cu[2+3*j] += cu[2+3*j+3*nxe*ny];
cu[3*j+3*nxe*ny] = 0.0;
cu[1+3*j+3*nxe*ny] = 0.0;
cu[2+3*j+3*nxe*ny] = 0.0;
}
cu[0] += cu[3*nx+3*nxe*ny];
cu[1] += cu[1+3*nx+3*nxe*ny];
cu[2] += cu[2+3*nx+3*nxe*ny];
cu[3*nx+3*nxe*ny] = 0.0;
cu[1+3*nx+3*nxe*ny] = 0.0;
cu[2+3*nx+3*nxe*ny] = 0.0;
return;
}
/*--------------------------------------------------------------------*/
void caguard2l(float q[], int nx, int ny, int nxe, int nye) {
/* accumulate extended periodic scalar field q
linear interpolation
nx/ny = system length in x/y direction
nxe = first dimension of field arrays, must be >= nx+1
nxe = second dimension of field arrays, must be >= ny+1
local data */
int j, k;
/* accumulate edges of extended field */
for (k = 0; k < ny; k++) {
q[nxe*k] += q[nx+nxe*k];
q[nx+nxe*k] = 0.0;
}
for (j = 0; j < nx; j++) {
q[j] += q[j+nxe*ny];
q[j+nxe*ny] = 0.0;
}
q[0] += q[nx+nxe*ny];
q[nx+nxe*ny] = 0.0;
return;
}
/*--------------------------------------------------------------------*/
void cmpois23(float complex q[], float complex fxy[], int isign,
float complex ffc[], float ax, float ay, float affp,
float *we, int nx, int ny, int nxvh, int nyv, int nxhd,
int nyhd) {
/* this subroutine solves 2-1/2d poisson's equation in fourier space for
force/charge (or convolution of electric field over particle shape)
with periodic boundary conditions. Zeros out z component.
for isign = 0, input: isign,ax,ay,affp,nx,ny,nxvh,nyhd, output: ffc
for isign /= 0, input: q,ffc,isign,nx,ny,nxvh,nyhd, output: fxy,we
approximate flop count is: 26*nxc*nyc + 12*(nxc + nyc)
where nxc = nx/2 - 1, nyc = ny/2 - 1
equation used is:
fx[ky][kx] = -sqrt(-1)*kx*g[ky][kx]*s[ky][kx]*q[ky][kx],
fy[ky][kx] = -sqrt(-1)*ky*g[ky][kx]*s[ky][kx]*q[ky][kx],
fz[ky][kx] = zero,
where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers,
g[ky][kx] = (affp/(kx**2+ky**2))*s[ky][kx],
s[ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2)/2), except for
fx(kx=pi) = fy(kx=pi) = fx(ky=pi) = fy(ky=pi) = 0, and
fx(kx=0,ky=0) = fy(kx=0,ky=0) = 0.
q[k][j] = complex charge density for fourier mode (j,k)
fxy[k][j][0] = x component of complex force/charge,
fxy[k][j][1] = y component of complex force/charge,
fxy[k][j][2] = zero,
all for fourier mode (j,k)
if isign = 0, form factor array is prepared
if isign is not equal to 0, force/charge is calculated
cimag(ffc[k][j]) = finite-size particle shape factor s
for fourier mode (j,k)
creal(ffc[k][j]) = potential green's function g
for fourier mode (j,k)
ax/ay = half-width of particle in x/y direction
affp = normalization constant = nx*ny/np, where np=number of particles
electric field energy is also calculated, using
we = nx*ny*sum((affp/(kx**2+ky**2))*|q[ky][kx]*s[ky][kx]|**2)
nx/ny = system length in x/y direction
nxvh = first dimension of field arrays, must be >= nxh
nyv = second dimension of field arrays, must be >= ny
nxhd = first dimension of form factor array, must be >= nxh
nyhd = second dimension of form factor array, must be >= nyh
local data */
int nxh, nyh, j, k, k1, kk, kj;
float dnx, dny, dkx, dky, at1, at2, at3, at4;
float complex zero, zt1, zt2;
double wp, sum1;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
zero = 0.0 + 0.0*_Complex_I;
if (isign != 0)
goto L30;
/* prepare form factor array */
for (k = 0; k < nyh; k++) {
dky = dny*(float) k;
kk = nxhd*k;
at1 = dky*dky;
at2 = pow((dky*ay),2);
for (j = 0; j < nxh; j++) {
dkx = dnx*(float) j;
at3 = dkx*dkx + at1;
at4 = exp(-0.5*(pow((dkx*ax),2) + at2));
if (at3==0.0) {
ffc[j+kk] = affp + 1.0*_Complex_I;
}
else {
ffc[j+kk] = (affp*at4/at3) + at4*_Complex_I;
}
}
}
return;
/* calculate force/charge and sum field energy */
L30: sum1 = 0.0;
/* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */
#pragma omp parallel for \
private(j,k,k1,kk,kj,dky,at1,at2,at3,zt1,zt2,wp) \
reduction(+:sum1)
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
wp = 0.0;
for (j = 1; j < nxh; j++) {
at1 = crealf(ffc[j+kk])*cimagf(ffc[j+kk]);
at2 = at1*dnx*(float) j;
at3 = dky*at1;
zt1 = cimagf(q[j+kj]) - crealf(q[j+kj])*_Complex_I;
zt2 = cimagf(q[j+k1]) - crealf(q[j+k1])*_Complex_I;
fxy[3*j+3*kj] = at2*zt1;
fxy[1+3*j+3*kj] = at3*zt1;
fxy[2+3*j+3*kj] = zero;
fxy[3*j+3*k1] = at2*zt2;
fxy[1+3*j+3*k1] = -at3*zt2;
fxy[2+3*j+3*k1] = zero;
wp += at1*(q[j+kj]*conjf(q[j+kj]) + q[j+k1]*conjf(q[j+k1]));
}
/* mode numbers kx = 0, nx/2 */
at1 = crealf(ffc[kk])*cimagf(ffc[kk]);
at3 = at1*dny*(float) k;
zt1 = cimagf(q[kj]) - crealf(q[kj])*_Complex_I;
fxy[3*kj] = zero;
fxy[1+3*kj] = at3*zt1;
fxy[2+3*kj] = zero;
fxy[3*k1] = zero;
fxy[1+3*k1] = zero;
fxy[2+3*k1] = zero;
wp += at1*(q[kj]*conjf(q[kj]));
sum1 += wp;
}
wp = 0.0;
/* mode numbers ky = 0, ny/2 */
k1 = 3*nxvh*nyh;
for (j = 1; j < nxh; j++) {
at1 = crealf(ffc[j])*cimagf(ffc[j]);
at2 = at1*dnx*(float) j;
zt1 = cimagf(q[j]) - crealf(q[j])*_Complex_I;
fxy[3*j] = at2*zt1;
fxy[1+3*j] = zero;
fxy[2+3*j] = zero;
fxy[3*j+k1] = zero;
fxy[1+3*j+k1] = zero;
fxy[2+3*j+k1] = zero;
wp += at1*(q[j]*conjf(q[j]));
}
fxy[0] = zero;
fxy[1] = zero;
fxy[2] = zero;
fxy[k1] = zero;
fxy[1+k1] = zero;
fxy[2+k1] = zero;
sum1 += wp;
*we = sum1*(float) (nx*ny);
return;
}
/*--------------------------------------------------------------------*/
void cmcuperp2(float complex cu[], int nx, int ny, int nxvh, int nyv) {
/* this subroutine calculates the transverse current in fourier space
input: all, output: cu
approximate flop count is: 36*nxc*nyc
and nxc*nyc divides
where nxc = nx/2 - 1, nyc = ny/2 - 1
the transverse current is calculated using the equation:
cux[ky][kx] = cux[ky][kx]
-kx*(kx*cux[ky][kx]+ky*cuy[ky][kx])/(kx*kx+ky*ky)
cuy[ky][kx] = cuy[ky][kx]
-ky*(kx*cux[ky][kx]+ky*cuy[ky][kx])/(kx*kx+ky*ky)
where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers,
except for cux(kx=pi) = cuy(kx=pi) = 0, cux(ky=pi) = cuy(ky=pi) = 0,
and cux(kx=0,ky=0) = cuy(kx=0,ky=0) = 0.
cu[k][j][i] = complex current density for fourier mode (j,k)
nx/ny = system length in x/y direction
nxvh = first dimension of current array, must be >= nxh
nyv = second dimension of current array, must be >= ny
local data */
int nxh, nyh, j, k, k1, kj;
float dnx, dny, dkx, dky, dky2, at1;
float complex zero, zt1;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
zero = 0.0 + 0.0*_Complex_I;
/* calculate transverse part of current */
/* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */
#pragma omp parallel for private(j,k,k1,kj,dky,dky2,dkx,at1,zt1)
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
dky2 = dky*dky;
kj = nxvh*k;
k1 = nxvh*ny - kj;
for (j = 1; j < nxh; j++) {
dkx = dnx*(float) j;
at1 = 1./(dkx*dkx + dky2);
zt1 = at1*(dkx*cu[3*j+3*kj] + dky*cu[1+3*j+3*kj]);
cu[3*j+3*kj] -= dkx*zt1;
cu[1+3*j+3*kj] -= dky*zt1;
zt1 = at1*(dkx*cu[3*j+3*k1] - dky*cu[1+3*j+3*k1]);
cu[3*j+3*k1] -= dkx*zt1;
cu[1+3*j+3*k1] += dky*zt1;
}
/* mode numbers kx = 0, nx/2 */
cu[1+3*kj] = zero;
cu[3*k1] = zero;
cu[1+3*k1] = zero;
}
/* mode numbers ky = 0, ny/2 */
k1 = 3*nxvh*nyh;
for (j = 1; j < nxh; j++) {
cu[3*j] = zero;
cu[3*j+k1] = zero;
cu[1+3*j+k1] = zero;
}
cu[0] = zero;
cu[1] = zero;
cu[k1] = zero;
cu[1+k1] = zero;
return;
}
/*--------------------------------------------------------------------*/
void cmibpois23(float complex cu[], float complex bxy[],
float complex ffc[], float ci, float *wm, int nx,
int ny, int nxvh, int nyv, int nxhd, int nyhd) {
/* this subroutine solves 2-1/2d poisson's equation in fourier space for
magnetic field, with periodic boundary conditions.
input: cu,ffc,ci,nx,ny,nxv,nyhd, output: bxy,wm
approximate flop count is: 90*nxc*nyc + 40*(nxc + nyc)
where nxc = nx/2 - 1, nyc = ny/2 - 1
the magnetic field is calculated using the equations:
bx[ky][kx] = ci*ci*sqrt(-1)*g[ky][kx]*ky*cuz[ky][kx],
by[ky][kx] = -ci*ci*sqrt(-1)*g[ky][kx]*kx*cuz[ky][kx],
bz[ky][kx] = ci*ci*sqrt(-1)*g[ky][kx]*(kx*cuy[ky][kx]-ky*cux[ky][kx]),
where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers,
g[ky][kx] = (affp/(kx**2+ky**2))*s[ky][kx],
s[ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2)/2), except for
bx(kx=pi) = by(kx=pi) = bz(kx=pi) = bx(ky=pi) = by(ky=pi) = bz(ky=pi)
= 0, and bx(kx=0,ky=0) = by(kx=0,ky=0) = bz(kx=0,ky=0) = 0.
cu[k][j][i] = complex current density for fourier mode (j,k)
bxy[k][j][i] = i component of complex magnetic field
all for fourier mode (j,k)
cimag(ffc[k][j]) = finite-size particle shape factor s
for fourier mode (j,k)
creal(ffc[k][j]) = potential green's function g
for fourier mode (j,k)
ci = reciprocal of velocity of light
magnetic field energy is also calculated, using
wm = nx*ny*sum((affp/(kx**2+ky**2))*ci*ci*
|cu[ky][kx]*s[ky][kx]|**2), where
affp = normalization constant = nx*ny/np, where np=number of particles
this expression is valid only if the current is divergence-free
nx/ny = system length in x/y direction
nxvh = first dimension of field arrays, must be >= nxh
nyv = second dimension of field arrays, must be >= ny
nxhd = first dimension of form factor array, must be >= nxh
nyhd = second dimension of form factor array, must be >= nyh
local data */
int nxh, nyh, j, k, k1, kk, kj;
float dnx, dny, dky, ci2, at1, at2, at3;
float complex zero, zt1, zt2, zt3;
double wp, sum1;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
zero = 0.0 + 0.0*_Complex_I;
ci2 = ci*ci;
/* calculate magnetic field and sum field energy */
sum1 = 0.0;
/* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */
#pragma omp parallel for \
private(j,k,k1,kk,kj,dky,at1,at2,at3,zt1,zt2,zt3,wp) \
reduction(+:sum1)
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
wp = 0.0;
for (j = 1; j < nxh; j++) {
at1 = ci2*crealf(ffc[j+kk]);
at2 = at1*dnx*(float) j;
at3 = dky*at1;
at1 = at1*cimagf(ffc[j+kk]);
zt1 = -cimagf(cu[2+3*j+3*kj])
+ crealf(cu[2+3*j+3*kj])*_Complex_I;
zt2 = -cimagf(cu[1+3*j+3*kj])
+ crealf(cu[1+3*j+3*kj])*_Complex_I;
zt3 = -cimagf(cu[3*j+3*kj]) + crealf(cu[3*j+3*kj])*_Complex_I;
bxy[3*j+3*kj] = at3*zt1;
bxy[1+3*j+3*kj] = -at2*zt1;
bxy[2+3*j+3*kj] = at2*zt2 - at3*zt3;
zt1 = -cimagf(cu[2+3*j+3*k1])
+ crealf(cu[2+3*j+3*k1])*_Complex_I;
zt2 = -cimagf(cu[1+3*j+3*k1])
+ crealf(cu[1+3*j+3*k1])*_Complex_I;
zt3 = -cimagf(cu[3*j+3*k1]) + crealf(cu[3*j+3*k1])*_Complex_I;
bxy[3*j+3*k1] = -at3*zt1;
bxy[1+3*j+3*k1] = -at2*zt1;
bxy[2+3*j+3*k1] = at2*zt2 + at3*zt3;
wp += at1*(cu[3*j+3*kj]*conjf(cu[3*j+3*kj])
+ cu[1+3*j+3*kj]*conjf(cu[1+3*j+3*kj])
+ cu[2+3*j+3*kj]*conjf(cu[2+3*j+3*kj])
+ cu[3*j+3*k1]*conjf(cu[3*j+3*k1])
+ cu[1+3*j+3*k1]*conjf(cu[1+3*j+3*k1])
+ cu[2+3*j+3*k1]*conjf(cu[2+3*j+3*k1]));
}
/* mode numbers kx = 0, nx/2 */
at1 = ci2*crealf(ffc[kk]);
at3 = at1*dny*(float) k;
at1 = at1*cimagf(ffc[kk]);
zt1 = -cimagf(cu[2+3*kj]) + crealf(cu[2+3*kj])*_Complex_I;
zt3 = -cimagf(cu[3*kj]) + crealf(cu[3*kj])*_Complex_I;
bxy[3*kj] = at3*zt1;
bxy[1+3*kj] = zero;
bxy[2+3*kj] = -at3*zt3;
bxy[3*k1] = zero;
bxy[1+3*k1] = zero;
bxy[2+3*k1] = zero;
wp += at1*(cu[3*kj]*conjf(cu[3*kj]) + cu[1+3*kj]*conjf(cu[1+3*kj])
+ cu[2+3*kj]*conjf(cu[2+3*kj]));
sum1 += wp;
}
wp = 0.0;
/* mode numbers ky = 0, ny/2 */
k1 = 3*nxvh*nyh;
for (j = 1; j < nxh; j++) {
at1 = ci2*crealf(ffc[j]);
at2 = at1*dnx*(float) j;
at1 = at1*cimagf(ffc[j]);
zt1 = -cimagf(cu[2+3*j]) + crealf(cu[2+3*j])*_Complex_I;
zt2 = -cimagf(cu[1+3*j]) + crealf(cu[1+3*j])*_Complex_I;
bxy[3*j] = zero;
bxy[1+3*j] = -at2*zt1;
bxy[2+3*j] = at2*zt2;
bxy[3*j+k1] = zero;
bxy[1+3*j+k1] = zero;
bxy[2+3*j+k1] = zero;
wp += at1*(cu[3*j]*conjf(cu[3*j]) + cu[1+3*j]*conjf(cu[1+3*j])
+ cu[2+3*j]*conjf(cu[2+3*j]));
}
bxy[0] = zero;
bxy[1] = zero;
bxy[2] = zero;
bxy[k1] = zero;
bxy[1+k1] = zero;
bxy[2+k1] = zero;
sum1 += wp;
*wm = sum1*(float) (nx*ny);
return;
}
/*--------------------------------------------------------------------*/
void cmmaxwel2(float complex exy[], float complex bxy[],
float complex cu[], float complex ffc[], float ci,
float dt, float *wf, float *wm, int nx, int ny, int nxvh,
int nyv, int nxhd, int nyhd) {
/* this subroutine solves 2-1/2d maxwell's equation in fourier space for
transverse electric and magnetic fields with periodic boundary
conditions
input: all, output: wf, wm, exy, bxy
approximate flop count is: 286*nxc*nyc + 84*(nxc + nyc)
where nxc = nx/2 - 1, nyc = ny/2 - 1
the magnetic field is first updated half a step using the equations:
bx[ky][kx] = bx[ky][kx] - .5*dt*sqrt(-1)*ky*ez[ky][kx]
by[ky][kx] = by[ky][kx] + .5*dt*sqrt(-1)*kx*ez[ky][kx]
bz[ky][kx] = bz[ky][kx] - .5*dt*sqrt(-1)*(kx*ey[ky][kx]-ky*ex[ky][kx])
the electric field is then updated a whole step using the equations:
ex[ky][kx] = ex[ky][kx] + c2*dt*sqrt(-1)*ky*bz[ky][kx]
- affp*dt*cux[ky][kx]*s[ky][kx]
ey[ky][kx] = ey[ky][kx] - c2*dt*sqrt(-1)*kx*bz[ky][kx]
- affp*dt*cuy[ky][kx]*s[ky][kx]
ez[ky][kx] = ez[ky][kx] + c2*dt*sqrt(-1)*(kx*by[ky][kx]-ky*bx[ky][kx])
- affp*dt*cuz[ky][kx]*s[ky][kx]
the magnetic field is finally updated the remaining half step with
the new electric field and the previous magnetic field equations.
where kx = 2pi*j/nx, ky = 2pi*k/ny, c2 = 1./(ci*ci)
and s[ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2)
j,k = fourier mode numbers, except for
ex(kx=pi) = ey(kx=pi) = ez(kx=pi) = 0,
ex(ky=pi) = ey(ky=pi) = ex(ky=pi) = 0,
ex(kx=0,ky=0) = ey(kx=0,ky=0) = ez(kx=0,ky=0) = 0.
and similarly for bx, by, bz.
cu[k][j][i] = complex current density
exy[k][j][i] = complex transverse electric field
bxy[k][j][i] = complex magnetic field
for component i, all for fourier mode (j,k)
creal(ffc[0][0]) = affp = normalization constant = nx*ny/np,
where np=number of particles
cimag(ffc[k][j]) = finite-size particle shape factor s.
s[ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2)/2)
for fourier mode (j-1,k-1)
ci = reciprocal of velocity of light
dt = time interval between successive calculations
transverse electric field energy is also calculated, using
wf = nx*ny**sum((1/affp)*|exy[ky][kx]|**2)
magnetic field energy is also calculated, using
wm = nx*ny**sum((c2/affp)*|bxy[ky][kx]|**2)
nx/ny = system length in x/y direction
nxvh = first dimension of field arrays, must be >= nxh
nyv = second dimension of field arrays, must be >= ny
nxhd = first dimension of form factor array, must be >= nxh
nyhd = second dimension of form factor array, must be >= nyh
local data */
int nxh, nyh, j, k, k1, kk, kj;
float dnx, dny, dth, c2, cdt, affp, anorm, dkx, dky, afdt, adt;
float complex zero, zt1, zt2, zt3, zt4, zt5, zt6, zt7, zt8, zt9;
double wp, ws, sum1, sum2;
if (ci <= 0.0)
return;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
dth = 0.5*dt;
c2 = 1.0/(ci*ci);
cdt = c2*dt;
affp = creal(ffc[0]);
adt = affp*dt;
zero = 0.0 + 0.0*_Complex_I;
anorm = 1.0/affp;
/* update electromagnetic field and sum field energies */
sum1 = 0.0;
sum2 = 0.0;
/* calculate the electromagnetic fields */
/* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */
#pragma omp parallel for \
private(j,k,k1,kk,kj,dky,dkx,afdt,zt1,zt2,zt3,zt4,zt5,zt6,zt7,zt8, \
zt9,ws,wp) \
reduction(+:sum1,sum2)
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
ws = 0.0;
wp = 0.0;
for (j = 1; j < nxh; j++) {
dkx = dnx*(float) j;
afdt = adt*cimagf(ffc[j+kk]);
/* update magnetic field half time step, ky > 0 */
zt1 = -cimagf(exy[2+3*j+3*kj])
+ crealf(exy[2+3*j+3*kj])*_Complex_I;
zt2 = -cimagf(exy[1+3*j+3*kj])
+ crealf(exy[1+3*j+3*kj])*_Complex_I;
zt3 = -cimagf(exy[3*j+3*kj]) + crealf(exy[3*j+3*kj])*_Complex_I;
zt4 = bxy[3*j+3*kj] - dth*(dky*zt1);
zt5 = bxy[1+3*j+3*kj] + dth*(dkx*zt1);
zt6 = bxy[2+3*j+3*kj] - dth*(dkx*zt2 - dky*zt3);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exy[3*j+3*kj] + cdt*(dky*zt1) - afdt*cu[3*j+3*kj];
zt8 = exy[1+3*j+3*kj] - cdt*(dkx*zt1) - afdt*cu[1+3*j+3*kj];
zt9 = exy[2+3*j+3*kj] + cdt*(dkx*zt2 - dky*zt3)
- afdt*cu[2+3*j+3*kj];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exy[3*j+3*kj] = zt7;
exy[1+3*j+3*kj] = zt8;
exy[2+3*j+3*kj] = zt9;
ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9));
zt4 -= dth*(dky*zt1);
zt5 += dth*(dkx*zt1);
zt6 -= dth*(dkx*zt2 - dky*zt3);
bxy[3*j+3*kj] = zt4;
bxy[1+3*j+3*kj] = zt5;
bxy[2+3*j+3*kj] = zt6;
wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6));
/* update magnetic field half time step, ky < 0 */
zt1 = -cimagf(exy[2+3*j+3*k1])
+ crealf(exy[2+3*j+3*k1])*_Complex_I;
zt2 = -cimagf(exy[1+3*j+3*k1])
+ crealf(exy[1+3*j+3*k1])*_Complex_I;
zt3 = -cimagf(exy[3*j+3*k1]) + crealf(exy[3*j+3*k1])*_Complex_I;
zt4 = bxy[3*j+3*k1] + dth*(dky*zt1);
zt5 = bxy[1+3*j+3*k1] + dth*(dkx*zt1);
zt6 = bxy[2+3*j+3*k1] - dth*(dkx*zt2 + dky*zt3);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exy[3*j+3*k1] - cdt*(dky*zt1) - afdt*cu[3*j+3*k1];
zt8 = exy[1+3*j+3*k1] - cdt*(dkx*zt1) - afdt*cu[1+3*j+3*k1];
zt9 = exy[2+3*j+3*k1] + cdt*(dkx*zt2 + dky*zt3)
- afdt*cu[2+3*j+3*k1];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exy[3*j+3*k1] = zt7;
exy[1+3*j+3*k1] = zt8;
exy[2+3*j+3*k1] = zt9;
ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9));
zt4 += dth*(dky*zt1);
zt5 += dth*(dkx*zt1);
zt6 -= dth*(dkx*zt2 + dky*zt3);
bxy[3*j+3*k1] = zt4;
bxy[1+3*j+3*k1] = zt5;
bxy[2+3*j+3*k1] = zt6;
wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6));
}
/* mode numbers kx = 0, nx/2 */
afdt = adt*cimagf(ffc[kk]);
/* update magnetic field half time step */
zt1 = -cimagf(exy[2+3*kj]) + crealf(exy[2+3*kj])*_Complex_I;
zt3 = -cimagf(exy[3*kj]) + crealf(exy[3*kj])*_Complex_I;
zt4 = bxy[3*kj] - dth*(dky*zt1);
zt6 = bxy[2+3*kj] + dth*(dky*zt3);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exy[3*kj] + cdt*(dky*zt1) - afdt*cu[3*kj];
zt9 = exy[2+3*kj] - cdt*(dky*zt3) - afdt*cu[2+3*kj];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exy[3*kj] = zt7;
exy[1+3*kj] = zero;
exy[2+3*kj] = zt9;
ws += anorm*(zt7*conjf(zt7) + zt9*conjf(zt9));
zt4 -= dth*(dky*zt1);
zt6 += dth*(dky*zt3);
bxy[3*kj] = zt4;
bxy[1+3*kj] = zero;
bxy[2+3*kj] = zt6;
wp += anorm*(zt4*conjf(zt4) + zt6*conjf(zt6));
bxy[3*k1] = zero;
bxy[1+3*k1] = zero;
bxy[2+3*k1] = zero;
exy[3*k1] = zero;
exy[1+3*k1] = zero;
exy[2+3*k1] = zero;
sum1 += ws;
sum2 += wp;
}
ws = 0.0;
wp = 0.0;
/* mode numbers ky = 0, ny/2 */
k1 = 3*nxvh*nyh;
for (j = 1; j < nxh; j++) {
dkx = dnx*(float) j;
afdt = adt*cimagf(ffc[j]);
/* update magnetic field half time step */
zt1 = -cimagf(exy[2+3*j]) + crealf(exy[2+3*j])*_Complex_I;
zt2 = -cimagf(exy[1+3*j]) + crealf(exy[1+3*j])*_Complex_I;
zt5 = bxy[1+3*j] + dth*(dkx*zt1);
zt6 = bxy[2+3*j] - dth*(dkx*zt2);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt8 = exy[1+3*j] - cdt*(dkx*zt1) - afdt*cu[1+3*j];
zt9 = exy[2+3*j] + cdt*(dkx*zt2) - afdt*cu[2+3*j];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
exy[3*j] = zero;
exy[1+3*j] = zt8;
exy[2+3*j] = zt9;
ws += anorm*(zt8*conjf(zt8) + zt9*conjf(zt9));
zt5 += dth*(dkx*zt1);
zt6 -= dth*(dkx*zt2);
bxy[3*j] = zero;
bxy[1+3*j] = zt5;
bxy[2+3*j] = zt6;
wp += anorm*(zt5*conjf(zt5) + zt6*conjf(zt6));
bxy[3*j+k1] = zero;
bxy[1+3*j+k1] = zero;
bxy[2+3*j+k1] = zero;
exy[3*j+k1] = zero;
exy[1+3*j+k1] = zero;
exy[2+3*j+k1] = zero;
}
bxy[0] = zero;
bxy[1] = zero;
bxy[2] = zero;
exy[0] = zero;
exy[1] = zero;
exy[2] = zero;
bxy[k1] = zero;
bxy[1+k1] = zero;
bxy[2+k1] = zero;
exy[k1] = zero;
exy[1+k1] = zero;
exy[2+k1] = zero;
sum1 += ws;
sum2 += wp;
*wf = sum1*(float) (nx*ny);
*wm = sum2*c2*(float) (nx*ny);
return;
}
/*--------------------------------------------------------------------*/
void cmemfield2(float complex fxy[], float complex exy[],
float complex ffc[], int isign, int nx, int ny,
int nxvh, int nyv, int nxhd, int nyhd) {
/* this subroutine either adds complex vector fields if isign > 0
or copies complex vector fields if isign < 0
includes additional smoothing
local data */
int i, j, k, nxh, nyh, k1, kk, kj;
float at1;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
/* add the fields */
if (isign > 0) {
#pragma omp parallel for private(i,j,k,k1,kk,kj,at1)
for (k = 1; k < nyh; k++) {
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
for (j = 0; j < nxh; j++) {
at1 = cimagf(ffc[j+kk]);
for (i = 0; i < 3; i++) {
fxy[i+3*j+3*kj] += exy[i+3*j+3*kj]*at1;
fxy[i+3*j+3*k1] += exy[i+3*j+3*k1]*at1;
}
}
}
k1 = 3*nxvh*nyh;
for (j = 0; j < nxh; j++) {
at1 = cimagf(ffc[j]);
for (i = 0; i < 3; i++) {
fxy[i+3*j] += exy[i+3*j]*at1;
fxy[i+3*j+k1] += exy[i+3*j+k1]*at1;
}
}
}
/* copy the fields */
else if (isign < 0) {
#pragma omp parallel for private(i,j,k,k1,kk,kj,at1)
for (k = 1; k < nyh; k++) {
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
for (j = 0; j < nxh; j++) {
at1 = cimagf(ffc[j+kk]);
for (i = 0; i < 3; i++) {
fxy[i+3*j+3*kj] = exy[i+3*j+3*kj]*at1;
fxy[i+3*j+3*k1] = exy[i+3*j+3*k1]*at1;
}
}
}
k1 = 3*nxvh*nyh;
for (j = 0; j < nxh; j++) {
at1 = cimagf(ffc[j]);
for (i = 0; i < 3; i++) {
fxy[i+3*j] = exy[i+3*j]*at1;
fxy[i+3*j+k1] = exy[i+3*j+k1]*at1;
}
}
}
return;
}
/*--------------------------------------------------------------------*/
void cwfft2rinit(int mixup[], float complex sct[], int indx, int indy,
int nxhyd, int nxyhd) {
/* this subroutine calculates tables needed by a two dimensional
real to complex fast fourier transform and its inverse.
input: indx, indy, nxhyd, nxyhd
output: mixup, sct
mixup = array of bit reversed addresses
sct = sine/cosine table
indx/indy = exponent which determines length in x/y direction,
where nx=2**indx, ny=2**indy
nxhyd = maximum of (nx/2,ny)
nxyhd = one half of maximum of (nx,ny)
written by viktor k. decyk, ucla
local data */
int indx1, indx1y, nx, ny, nxy, nxhy, nxyh;
int j, k, lb, ll, jb, it;
float dnxy, arg;
indx1 = indx - 1;
indx1y = indx1 > indy ? indx1 : indy;
nx = 1L<<indx;
ny = 1L<<indy;
nxy = nx > ny ? nx : ny;
nxhy = 1L<<indx1y;
/* bit-reverse index table: mixup[j] = 1 + reversed bits of j */
for (j = 0; j < nxhy; j++) {
lb = j;
ll = 0;
for (k = 0; k < indx1y; k++) {
jb = lb/2;
it = lb - 2*jb;
lb = jb;
ll = 2*ll + it;
}
mixup[j] = ll + 1;
}
/* sine/cosine table for the angles 2*n*pi/nxy */
nxyh = nxy/2;
dnxy = 6.28318530717959/(float) nxy;
for (j = 0; j < nxyh; j++) {
arg = dnxy*(float) j;
sct[j] = cosf(arg) - sinf(arg)*_Complex_I;
}
return;
}
/*--------------------------------------------------------------------*/
void cfft2rmxx(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int nyi,
int nyp, int nxhd, int nyd, int nxhyd, int nxyhd) {
/* this subroutine performs the x part of a two dimensional real to
complex fast fourier transform and its inverse, for a subset of y,
using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny
indx/indy = exponent which determines length in x/y direction,
where nx=2**indx, ny=2**indy
if isign = -1, an inverse fourier transform in x is performed
f[m][n] = (1/nx*ny)*sum(f[k][j]*exp(-sqrt(-1)*2pi*n*j/nx))
if isign = 1, a forward fourier transform in x is performed
f[k][j] = sum(f[m][n]*exp(sqrt(-1)*2pi*n*j/nx))
mixup = array of bit reversed addresses
sct = sine/cosine table
nyi = initial y index used
nyp = number of y indices used
nxhd = first dimension of f >= nx/2
nyd = second dimension of f >= ny
nxhyd = maximum of (nx/2,ny)
nxyhd = maximum of (nx,ny)/2
fourier coefficients are stored as follows:
f[k][j] = real, imaginary part of mode j,k, where
0 <= j < nx/2 and 0 <= k < ny, except for
f[k][1] = real, imaginary part of mode nx/2,k, where
ny/2+1 <= k < ny, and
imag(f[0][0]) = real part of mode nx/2,0 and
imag(f[0][ny/2]) = real part of mode nx/2,ny/2
written by viktor k. decyk, ucla
local data */
int indx1, indx1y, nx, nxh, nxhh, ny, nxy, nxhy, nyt;
int nrx, i, j, k, l, j1, j2, k1, k2, ns, ns2, km, kmr, nrxb, joff;
float ani;
float complex t1, t2, t3;
if (isign==0)
return;
indx1 = indx - 1;
indx1y = indx1 > indy ? indx1 : indy;
nx = 1L<<indx;
nxh = nx/2;
nxhh = nx/4;
ny = 1L<<indy;
nxy = nx > ny ? nx : ny;
nxhy = 1L<<indx1y;
nyt = nyi + nyp - 1;
if (isign > 0)
goto L70;
/* inverse fourier transform */
nrxb = nxhy/nxh;
nrx = nxy/nxh;
#pragma omp parallel for \
private(i,j,k,l,ns,ns2,km,kmr,k1,k2,j1,j2,joff,ani,t1,t2,t3)
for (i = nyi-1; i < nyt; i++) {
joff = nxhd*i;
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
t1 = f[j1+joff];
f[j1+joff] = f[j+joff];
f[j+joff] = t1;
}
}
/* then transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = j + k1;
j2 = j + k2;
t1 = sct[kmr*j];
t2 = t1*f[j2+joff];
f[j2+joff] = f[j1+joff] - t2;
f[j1+joff] += t2;
}
}
ns = ns2;
}
/* unscramble coefficients and normalize */
kmr = nxy/nx;
ani = 0.5/(((float) nx)*((float) ny));
for (j = 1; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I;
t2 = conjf(f[nxh-j+joff]);
t1 = f[j+joff] + t2;
t2 = (f[j+joff] - t2)*t3;
f[j+joff] = ani*(t1 + t2);
f[nxh-j+joff] = ani*conjf(t1 - t2);
}
ani = 2.0*ani;
f[nxhh+joff] = ani*conjf(f[nxhh+joff]);
f[joff] = ani*((crealf(f[joff]) + cimagf(f[joff]))
+ (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I);
}
return;
/* forward fourier transform */
L70: nrxb = nxhy/nxh;
nrx = nxy/nxh;
#pragma omp parallel for \
private(i,j,k,l,ns,ns2,km,kmr,k1,k2,j1,j2,joff,t1,t2,t3)
for (i = nyi-1; i < nyt; i++) {
joff = nxhd*i;
/* scramble coefficients */
kmr = nxy/nx;
for (j = 1; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I;
t2 = conjf(f[nxh-j+joff]);
t1 = f[j+joff] + t2;
t2 = (f[j+joff] - t2)*t3;
f[j+joff] = t1 + t2;
f[nxh-j+joff] = conjf(t1 - t2);
}
f[nxhh+joff] = 2.0*conjf(f[nxhh+joff]);
f[joff] = (crealf(f[joff]) + cimagf(f[joff]))
+ (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I;
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
t1 = f[j1+joff];
f[j1+joff] = f[j+joff];
f[j+joff] = t1;
}
}
/* then transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = j + k1;
j2 = j + k2;
t1 = conjf(sct[kmr*j]);
t2 = t1*f[j2+joff];
f[j2+joff] = f[j1+joff] - t2;
f[j1+joff] += t2;
}
}
ns = ns2;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cfft2rmxy(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int nxi,
int nxp, int nxhd, int nyd, int nxhyd, int nxyhd) {
/* this subroutine performs the y part of a two dimensional real to
complex fast fourier transform and its inverse, for a subset of x,
using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny
indx/indy = exponent which determines length in x/y direction,
where nx=2**indx, ny=2**indy
if isign = -1, an inverse fourier transform in y is performed
f[m][n] = sum(f[k][j]*exp(-sqrt(-1)*2pi*m*k/ny))
if isign = 1, a forward fourier transform in y is performed
f[k][j] = sum(f[m][n]*exp(sqrt(-1)*2pi*m*k/ny))
mixup = array of bit reversed addresses
sct = sine/cosine table
nxi = initial x index used
nxp = number of x indices used
nxhd = first dimension of f >= nx/2
nyd = second dimension of f >= ny
nxhyd = maximum of (nx/2,ny)
nxyhd = maximum of (nx,ny)/2
fourier coefficients are stored as follows:
f[k][j] = real, imaginary part of mode j,k, where
0 <= j < nx/2 and 0 <= k < ny, except for
f[k][1] = real, imaginary part of mode nx/2,k, where
ny/2+1 <= k < ny, and
imag(f[0][0]) = real part of mode nx/2,0 and
imag(f[0][ny/2]) = real part of mode nx/2,ny/2
written by viktor k. decyk, ucla
local data */
int indx1, indx1y, nx, ny, nyh, nxy, nxhy, nxt;
int nry, i, j, k, l, j1, j2, k1, k2, ns, ns2, km, kmr, nryb, koff;
float complex t1, t2;
if (isign==0)
return;
indx1 = indx - 1;
indx1y = indx1 > indy ? indx1 : indy;
nx = 1L<<indx;
ny = 1L<<indy;
nyh = ny/2;
nxy = nx > ny ? nx : ny;
nxhy = 1L<<indx1y;
nxt = nxi + nxp - 1;
if (isign > 0)
goto L70;
/* inverse fourier transform */
nryb = nxhy/ny;
nry = nxy/ny;
#pragma omp parallel for \
private(i,j,k,l,ns,ns2,km,kmr,k1,k2,j1,j2,koff,t1,t2)
for (i = nxi-1; i < nxt; i++) {
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
koff = nxhd*k;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = nxhd*k1;
t1 = f[i+k1];
f[i+k1] = f[i+koff];
f[i+koff] = t1;
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhd*(j + k1);
j2 = nxhd*(j + k2);
t1 = sct[kmr*j];
t2 = t1*f[i+j2];
f[i+j2] = f[i+j1] - t2;
f[i+j1] += t2;
}
}
ns = ns2;
}
}
/* unscramble modes kx = 0, nx/2 */
if (nxi==1) {
for (k = 1; k < nyh; k++) {
koff = nxhd*k;
k1 = nxhd*ny - koff;
t1 = f[k1];
f[k1] = 0.5*(cimagf(f[koff] + t1)
+ crealf(f[koff] - t1)*_Complex_I);
f[koff] = 0.5*(crealf(f[koff] + t1)
+ cimagf(f[koff] - t1)*_Complex_I);
}
}
return;
/* forward fourier transform */
L70: nryb = nxhy/ny;
nry = nxy/ny;
/* scramble modes kx = 0, nx/2 */
if (nxi==1) {
for (k = 1; k < nyh; k++) {
koff = nxhd*k;
k1 = nxhd*ny - koff;
t1 = cimagf(f[k1]) + crealf(f[k1])*_Complex_I;
f[k1] = conjf(f[koff] - t1);
f[koff] += t1;
}
}
#pragma omp parallel for \
private(i,j,k,l,ns,ns2,km,kmr,k1,k2,j1,j2,koff,t1,t2)
for (i = nxi-1; i < nxt; i++) {
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
koff = nxhd*k;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = nxhd*k1;
t1 = f[i+k1];
f[i+k1] = f[i+koff];
f[i+koff] = t1;
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhd*(j + k1);
j2 = nxhd*(j + k2);
t1 = conjf(sct[kmr*j]);
t2 = t1*f[i+j2];
f[i+j2] = f[i+j1] - t2;
f[i+j1] += t2;
}
}
ns = ns2;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cfft2rm3x(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int nyi,
int nyp, int nxhd, int nyd, int nxhyd, int nxyhd) {
/* this subroutine performs the x part of 3 two dimensional real to
complex fast fourier transforms, and their inverses, for a subset of
y, using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny
indx/indy = exponent which determines length in x/y direction,
where nx=2**indx, ny=2**indy
if isign = -1, two inverse fourier transforms are performed
f[m][n][0:2] = (1/nx*ny)*sum(f[k][j][0:2]*
exp(-sqrt(-1)*2pi*n*j/nx)*exp(-sqrt(-1)*2pi*m*k/ny))
if isign = 1, two forward fourier transforms are performed
f[k][j][0:2] = sum(f[m][n][0:2]*exp(sqrt(-1)*2pi*n*j/nx)*
exp(sqrt(-1)*2pi*m*k/ny))
mixup = array of bit reversed addresses
sct = sine/cosine table
nyi = initial y index used
nyp = number of y indices used
nxhd = second dimension of f >= nx/2
nyd = third dimension of f >= ny
nxhyd = maximum of (nx/2,ny)
nxyhd = maximum of (nx,ny)/2
fourier coefficients are stored as follows:
f[k][j][0:2] = real, imaginary part of mode j,k, where
0 <= j < nx/2 and 0 <= k < ny, except for
f[k][1][0:2] = real, imaginary part of mode nx/2,k, where
ny/2+1 <= k < ny, and
imag(f[0][0][0:2]) = real part of mode nx/2,0 and
imag(f[0][ny/2][0:2]) = real part of mode nx/2,ny/2
written by viktor k. decyk, ucla
local data */
int indx1, indx1y, nx, nxh, nxhh, ny, nxy, nxhy, nyt;
int nrx, i, j, k, l, jj, j1, j2, k1, k2, ns, ns2, km, kmr, joff;
int nrxb;
float at1, at2, ani;
float complex t1, t2, t3, t4;
if (isign==0)
return;
indx1 = indx - 1;
indx1y = indx1 > indy ? indx1 : indy;
nx = 1L<<indx;
nxh = nx/2;
nxhh = nx/4;
ny = 1L<<indy;
nxy = nx > ny ? nx : ny;
nxhy = 1L<<indx1y;
nyt = nyi + nyp - 1;
if (isign > 0)
goto L100;
/* inverse fourier transform */
nrxb = nxhy/nxh;
nrx = nxy/nxh;
#pragma omp parallel for \
private(i,j,k,l,ns,ns2,km,kmr,k1,k2,jj,j1,j2,joff,at1,at2,ani,t1,t2,t3 \
,t4)
for (i = nyi-1; i < nyt; i++) {
joff = 3*nxhd*i;
/* swap complex components */
for (j = 0; j < nxh; j++) {
at1 = crealf(f[2+3*j+joff]);
f[2+3*j+joff] = crealf(f[1+3*j+joff])
+ cimagf(f[2+3*j+joff])*_Complex_I;
at2 = cimagf(f[1+3*j+joff]);
f[1+3*j+joff] = cimagf(f[3*j+joff]) + at1*_Complex_I;
f[3*j+joff] = crealf(f[3*j+joff]) + at2*_Complex_I;
}
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
t1 = f[3*j1+joff];
t2 = f[1+3*j1+joff];
t3 = f[2+3*j1+joff];
f[3*j1+joff] = f[3*j+joff];
f[1+3*j1+joff] = f[1+3*j+joff];
f[2+3*j1+joff] = f[2+3*j+joff];
f[3*j+joff] = t1;
f[1+3*j+joff] = t2;
f[2+3*j+joff] = t3;
}
}
/* then transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = j + k1;
j2 = j + k2;
t1 = sct[kmr*j];
t2 = t1*f[3*j2+joff];
t3 = t1*f[1+3*j2+joff];
t4 = t1*f[2+3*j2+joff];
f[3*j2+joff] = f[3*j1+joff] - t2;
f[1+3*j2+joff] = f[1+3*j1+joff] - t3;
f[2+3*j2+joff] = f[2+3*j1+joff] - t4;
f[3*j1+joff] += t2;
f[1+3*j1+joff] += t3;
f[2+3*j1+joff] += t4;
}
}
ns = ns2;
}
/* unscramble coefficients and normalize */
kmr = nxy/nx;
ani = 0.5/(((float) nx)*((float) ny));
for (j = 1; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I;
for (jj = 0; jj < 3; jj++) {
t2 = conjf(f[jj+3*(nxh-j)+joff]);
t1 = f[jj+3*j+joff] + t2;
t2 = (f[jj+3*j+joff] - t2)*t3;
f[jj+3*j+joff] = ani*(t1 + t2);
f[jj+3*(nxh-j)+joff] = ani*conjf(t1 - t2);
}
}
ani = 2.0*ani;
for (jj = 0; jj < 3; jj++) {
f[jj+3*nxhh+joff] = ani*conjf(f[jj+3*nxhh+joff]);
f[jj+joff] = ani*((crealf(f[jj+joff]) + cimagf(f[jj+joff]))
+ (crealf(f[jj+joff]) - cimagf(f[jj+joff]))*_Complex_I);
}
}
return;
/* forward fourier transform */
L100: nrxb = nxhy/nxh;
nrx = nxy/nxh;
#pragma omp parallel for \
private(i,j,k,l,ns,ns2,km,kmr,k1,k2,jj,j1,j2,joff,at1,at2,t1,t2,t3,t4)
for (i = nyi-1; i < nyt; i++) {
joff = 3*nxhd*i;
/* scramble coefficients */
kmr = nxy/nx;
for (j = 1; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I;
for (jj = 0; jj < 3; jj++) {
t2 = conjf(f[jj+3*(nxh-j)+joff]);
t1 = f[jj+3*j+joff] + t2;
t2 = (f[jj+3*j+joff] - t2)*t3;
f[jj+3*j+joff] = t1 + t2;
f[jj+3*(nxh-j)+joff] = conjf(t1 - t2);
}
}
for (jj = 0; jj < 3; jj++) {
f[jj+3*nxhh+joff] = 2.0*conjf(f[jj+3*nxhh+joff]);
f[jj+joff] = (crealf(f[jj+joff]) + cimagf(f[jj+joff]))
+ (crealf(f[jj+joff]) - cimagf(f[jj+joff]))*_Complex_I;
}
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
t1 = f[3*j1+joff];
t2 = f[1+3*j1+joff];
t3 = f[2+3*j1+joff];
f[3*j1+joff] = f[3*j+joff];
f[1+3*j1+joff] = f[1+3*j+joff];
f[2+3*j1+joff] = f[2+3*j+joff];
f[3*j+joff] = t1;
f[1+3*j+joff] = t2;
f[2+3*j+joff] = t3;
}
}
/* then transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = j + k1;
j2 = j + k2;
t1 = conjf(sct[kmr*j]);
t2 = t1*f[3*j2+joff];
t3 = t1*f[1+3*j2+joff];
t4 = t1*f[2+3*j2+joff];
f[3*j2+joff] = f[3*j1+joff] - t2;
f[1+3*j2+joff] = f[1+3*j1+joff] - t3;
f[2+3*j2+joff] = f[2+3*j1+joff] - t4;
f[3*j1+joff] += t2;
f[1+3*j1+joff] += t3;
f[2+3*j1+joff] += t4;
}
}
ns = ns2;
}
/* swap complex components */
for (j = 0; j < nxh; j++) {
at1 = crealf(f[2+3*j+joff]);
f[2+3*j+joff] = cimagf(f[1+3*j+joff])
+ cimagf(f[2+3*j+joff])*_Complex_I;
at2 = crealf(f[1+3*j+joff]);
f[1+3*j+joff] = at1 + cimagf(f[3*j+joff])*_Complex_I;
f[3*j+joff] = crealf(f[3*j+joff]) + at2*_Complex_I;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cfft2rm3y(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int nxi,
int nxp, int nxhd, int nyd, int nxhyd, int nxyhd) {
/* this subroutine performs the y part of 3 two dimensional real to
complex fast fourier transforms, and their inverses, for a subset of
x, using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny
indx/indy = exponent which determines length in x/y direction,
where nx=2**indx, ny=2**indy
if isign = -1, two inverse fourier transforms are performed
f[m][n][0:2] = (1/nx*ny)*sum(f[k][j][0:2] *
exp(-sqrt(-1)*2pi*n*j/nx)*exp(-sqrt(-1)*2pi*m*k/ny))
if isign = 1, two forward fourier transforms are performed
f[k][j][0:2] = sum(f[m][n][0:2]*exp(sqrt(-1)*2pi*n*j/nx)*
exp(sqrt(-1)*2pi*m*k/ny))
mixup = array of bit reversed addresses
sct = sine/cosine table
nxi = initial x index used
nxp = number of x indices used
nxhd = second dimension of f >= nx/2
nyd = third dimension of f >= ny
nxhyd = maximum of (nx/2,ny)
nxyhd = maximum of (nx,ny)/2
fourier coefficients are stored as follows:
f[k][j][0:2] = real, imaginary part of mode j,k, where
0 <= j < nx/2 and 0 <= k < ny, except for
f[k][1][0:2] = real, imaginary part of mode nx/2,k, where
ny/2+1 <= k < ny, and
imag(f[0][0][0:2]) = real part of mode nx/2,0 and
imag(f[0][ny/2][0:2]) = real part of mode nx/2,ny/2
written by viktor k. decyk, ucla
local data */
int indx1, indx1y, nx, ny, nyh, nxy, nxhy, nxt;
int nry, i, j, k, l, jj, j1, j2, k1, k2, ns, ns2, km, kmr, koff;
int nryb;
float complex t1, t2, t3, t4;
if (isign==0)
return;
indx1 = indx - 1;
indx1y = indx1 > indy ? indx1 : indy;
nx = 1L<<indx;
ny = 1L<<indy;
nyh = ny/2;
nxy = nx > ny ? nx : ny;
nxhy = 1L<<indx1y;
nxt = nxi + nxp - 1;
if (isign > 0)
goto L80;
/* inverse fourier transform */
nryb = nxhy/ny;
nry = nxy/ny;
#pragma omp parallel for \
private(i,j,k,l,ns,ns2,km,kmr,k1,k2,jj,j1,j2,koff,t1,t2,t3,t4)
for (i = nxi-1; i < nxt; i++) {
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
koff = 3*nxhd*k;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = 3*nxhd*k1;
t1 = f[3*i+k1];
t2 = f[1+3*i+k1];
t3 = f[2+3*i+k1];
f[3*i+k1] = f[3*i+koff];
f[1+3*i+k1] = f[1+3*i+koff];
f[2+3*i+k1] = f[2+3*i+koff];
f[3*i+koff] = t1;
f[1+3*i+koff] = t2;
f[2+3*i+koff] = t3;
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = 3*nxhd*(j + k1);
j2 = 3*nxhd*(j + k2);
t1 = sct[kmr*j];
t2 = t1*f[3*i+j2];
t3 = t1*f[1+3*i+j2];
t4 = t1*f[2+3*i+j2];
f[3*i+j2] = f[3*i+j1] - t2;
f[1+3*i+j2] = f[1+3*i+j1] - t3;
f[2+3*i+j2] = f[2+3*i+j1] - t4;
f[3*i+j1] += t2;
f[1+3*i+j1] += t3;
f[2+3*i+j1] += t4;
}
}
ns = ns2;
}
}
/* unscramble modes kx = 0, nx/2 */
if (nxi==1) {
for (k = 1; k < nyh; k++) {
koff = 3*nxhd*k;
k1 = 3*nxhd*ny - koff;
for (jj = 0; jj < 3; jj++) {
t1 = f[jj+k1];
f[jj+k1] = 0.5*(cimagf(f[jj+koff] + t1)
+ crealf(f[jj+koff] - t1)*_Complex_I);
f[jj+koff] = 0.5*(crealf(f[jj+koff] + t1)
+ cimagf(f[jj+koff] - t1)*_Complex_I);
}
}
}
return;
/* forward fourier transform */
L80: nryb = nxhy/ny;
nry = nxy/ny;
/* scramble modes kx = 0, nx/2 */
if (nxi==1) {
for (k = 1; k < nyh; k++) {
koff = 3*nxhd*k;
k1 = 3*nxhd*ny - koff;
for (jj = 0; jj < 3; jj++) {
t1 = cimagf(f[jj+k1]) + crealf(f[jj+k1])*_Complex_I;
f[jj+k1] = conjf(f[jj+koff] - t1);
f[jj+koff] += t1;
}
}
}
#pragma omp parallel for \
private(i,j,k,l,ns,ns2,km,kmr,k1,k2,jj,j1,j2,koff,t1,t2,t3,t4)
for (i = nxi-1; i < nxt; i++) {
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
koff = 3*nxhd*k;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = 3*nxhd*k1;
t1 = f[3*i+k1];
t2 = f[1+3*i+k1];
t3 = f[2+3*i+k1];
f[3*i+k1] = f[3*i+koff];
f[1+3*i+k1] = f[1+3*i+koff];
f[2+3*i+k1] = f[2+3*i+koff];
f[3*i+koff] = t1;
f[1+3*i+koff] = t2;
f[2+3*i+koff] = t3;
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = 3*nxhd*(j + k1);
j2 = 3*nxhd*(j + k2);
t1 = conjf(sct[kmr*j]);
t2 = t1*f[3*i+j2];
t3 = t1*f[1+3*i+j2];
t4 = t1*f[2+3*i+j2];
f[3*i+j2] = f[3*i+j1] - t2;
f[1+3*i+j2] = f[1+3*i+j1] - t3;
f[2+3*i+j2] = f[2+3*i+j1] - t4;
f[3*i+j1] += t2;
f[1+3*i+j1] += t3;
f[2+3*i+j1] += t4;
}
}
ns = ns2;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cwfft2rmx(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int nxhd,
int nyd, int nxhyd, int nxyhd) {
/* wrapper function for real to complex fft, with packed data */
/* parallelized with OpenMP */
/* local data */
int nxh, ny;
static int nxi = 1, nyi = 1;
/* calculate range of indices */
nxh = 1L<<(indx - 1);
ny = 1L<<indy;
/* inverse fourier transform */
if (isign < 0) {
/* perform x fft */
cfft2rmxx(f,isign,mixup,sct,indx,indy,nyi,ny,nxhd,nyd,nxhyd,
nxyhd);
/* perform y fft */
cfft2rmxy(f,isign,mixup,sct,indx,indy,nxi,nxh,nxhd,nyd,nxhyd,
nxyhd);
}
/* forward fourier transform */
else if (isign > 0) {
/* perform y fft */
cfft2rmxy(f,isign,mixup,sct,indx,indy,nxi,nxh,nxhd,nyd,nxhyd,
nxyhd);
/* perform x fft */
cfft2rmxx(f,isign,mixup,sct,indx,indy,nyi,ny,nxhd,nyd,nxhyd,
nxyhd);
}
return;
}
/*--------------------------------------------------------------------*/
void cwfft2rm3(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int nxhd,
int nyd, int nxhyd, int nxyhd) {
/* wrapper function for 3 2d real to complex ffts */
/* local data */
int nxh, ny;
static int nxi = 1, nyi = 1;
/* calculate range of indices */
nxh = 1L<<(indx - 1);
ny = 1L<<indy;
/* inverse fourier transform */
if (isign < 0) {
/* perform x fft */
cfft2rm3x(f,isign,mixup,sct,indx,indy,nyi,ny,nxhd,nyd,nxhyd,
nxyhd);
/* perform y fft */
cfft2rm3y(f,isign,mixup,sct,indx,indy,nxi,nxh,nxhd,nyd,nxhyd,
nxyhd);
}
/* forward fourier transform */
else if (isign > 0) {
/* perform y fft */
cfft2rm3y(f,isign,mixup,sct,indx,indy,nxi,nxh,nxhd,nyd,nxhyd,
nxyhd);
/* perform x fft */
cfft2rm3x(f,isign,mixup,sct,indx,indy,nyi,ny,nxhd,nyd,nxhyd,
nxyhd);
}
return;
}
/* Interfaces to Fortran */
/*--------------------------------------------------------------------*/
void cdistr2h_(float *part, float *vtx, float *vty, float *vtz,
float *vdx, float *vdy, float *vdz, int *npx, int *npy,
int *idimp, int *nop, int *nx, int *ny, int *ipbc) {
cdistr2h(part,*vtx,*vty,*vtz,*vdx,*vdy,*vdz,*npx,*npy,*idimp,*nop,
*nx,*ny,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void cdblkp2l_(float *part, int *kpic, int *nppmx, int *idimp, int *nop,
int *mx, int *my, int *mx1, int *mxy1, int *irc) {
cdblkp2l(part,kpic,nppmx,*idimp,*nop,*mx,*my,*mx1,*mxy1,irc);
return;
}
/*--------------------------------------------------------------------*/
void cppmovin2l_(float *part, float *ppart, int *kpic, int *nppmx,
int *idimp, int *nop, int *mx, int *my, int *mx1,
int *mxy1, int *irc) {
cppmovin2l(part,ppart,kpic,*nppmx,*idimp,*nop,*mx,*my,*mx1,*mxy1,
irc);
return;
}
/*--------------------------------------------------------------------*/
void cppcheck2l_(float *ppart, int *kpic, int *idimp, int *nppmx,
int *nx, int *ny, int *mx, int *my, int *mx1, int *my1,
int *irc) {
cppcheck2l(ppart,kpic,*idimp,*nppmx,*nx,*ny,*mx,*my,*mx1,*my1,irc);
return;
}
/*--------------------------------------------------------------------*/
void cgbppush23l_(float *ppart, float *fxy, float *bxy, int *kpic,
float *qbm, float *dt, float *dtc, float *ek,
int *idimp, int *nppmx, int *nx, int *ny, int *mx,
int *my, int *nxv, int *nyv, int *mx1, int *mxy1,
int *ipbc) {
cgbppush23l(ppart,fxy,bxy,kpic,*qbm,*dt,*dtc,ek,*idimp,*nppmx,*nx,
*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void cgbppushf23l_(float *ppart, float *fxy, float *bxy, int *kpic,
int *ncl, int *ihole, float *qbm, float *dt,
float *dtc, float *ek, int *idimp, int *nppmx,
int *nx, int *ny, int *mx, int *my, int *nxv,
int *nyv, int *mx1, int *mxy1, int *ntmax,
int *irc) {
cgbppushf23l(ppart,fxy,bxy,kpic,ncl,ihole,*qbm,*dt,*dtc,ek,*idimp,
*nppmx,*nx,*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cgrbppush23l_(float *ppart, float *fxy, float *bxy, int *kpic,
float *qbm, float *dt, float *dtc, float *ci,
float *ek, int *idimp, int *nppmx, int *nx, int *ny,
int *mx, int *my, int *nxv, int *nyv, int *mx1,
int *mxy1, int *ipbc) {
cgrbppush23l(ppart,fxy,bxy,kpic,*qbm,*dt,*dtc,*ci,ek,*idimp,*nppmx,
*nx,*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void cgrbppushf23l_(float *ppart, float *fxy, float *bxy, int *kpic,
int *ncl, int *ihole, float *qbm, float *dt,
float *dtc, float *ci, float *ek, int *idimp,
int *nppmx, int *nx, int *ny, int *mx, int *my,
int *nxv, int *nyv, int *mx1, int *mxy1, int *ntmax,
int *irc) {
cgrbppushf23l(ppart,fxy,bxy,kpic,ncl,ihole,*qbm,*dt,*dtc,*ci,ek,
*idimp,*nppmx,*nx,*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,
*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cgppost2l_(float *ppart, float *q, int *kpic, float *qm,
int *nppmx, int *idimp, int *mx, int *my, int *nxv,
int *nyv, int *mx1, int *mxy1) {
cgppost2l(ppart,q,kpic,*qm,*nppmx,*idimp,*mx,*my,*nxv,*nyv,*mx1,
*mxy1);
return;
}
/*--------------------------------------------------------------------*/
void cgjppost2l_(float *ppart, float *cu, int *kpic, float *qm,
float *dt, int *nppmx, int *idimp, int *nx, int *ny,
int *mx, int *my, int *nxv, int *nyv, int *mx1,
int *mxy1, int *ipbc) {
cgjppost2l(ppart,cu,kpic,*qm,*dt,*nppmx,*idimp,*nx,*ny,*mx,*my,*nxv,
*nyv,*mx1,*mxy1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void cgjppostf2l_(float *ppart, float *cu, int *kpic, int *ncl,
int *ihole, float *qm, float *dt, int *nppmx,
int *idimp, int *nx, int *ny, int *mx, int *my,
int *nxv, int *nyv, int *mx1, int *mxy1, int *ntmax,
int *irc) {
cgjppostf2l(ppart,cu,kpic,ncl,ihole,*qm,*dt,*nppmx,*idimp,*nx,*ny,
*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cgrjppost2l_(float *ppart, float *cu, int *kpic, float *qm,
float *dt, float *ci, int *nppmx, int *idimp, int *nx,
int *ny, int *mx, int *my, int *nxv, int *nyv,
int *mx1, int *mxy1, int *ipbc) {
cgrjppost2l(ppart,cu,kpic,*qm,*dt,*ci,*nppmx,*idimp,*nx,*ny,*mx,*my,
*nxv,*nyv,*mx1,*mxy1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void cgrjppostf2l_(float *ppart, float *cu, int *kpic, int *ncl,
int *ihole, float *qm, float *dt, float *ci,
int *nppmx, int *idimp, int *nx, int *ny, int *mx,
int *my, int *nxv, int *nyv, int *mx1, int *mxy1,
int *ntmax, int *irc) {
cgrjppostf2l(ppart,cu,kpic,ncl,ihole,*qm,*dt,*ci,*nppmx,*idimp,*nx,
*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cpporder2l_(float *ppart, float *ppbuff, int *kpic, int *ncl,
int *ihole, int *idimp, int *nppmx, int *nx, int *ny,
int *mx, int *my, int *mx1, int *my1, int *npbmx,
int *ntmax, int *irc) {
cpporder2l(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*nx,*ny,*mx,*my,
*mx1,*my1,*npbmx,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cpporderf2l_(float *ppart, float *ppbuff, int *kpic, int *ncl,
int *ihole, int *idimp, int *nppmx, int *mx1,
int *my1, int *npbmx, int *ntmax, int *irc) {
cpporderf2l(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*mx1,*my1,
*npbmx,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cbguard2l_(float *bxy, int *nx, int *ny, int *nxe, int *nye) {
cbguard2l(bxy,*nx,*ny,*nxe,*nye);
return;
}
/*--------------------------------------------------------------------*/
void cacguard2l_(float *cu, int *nx, int *ny, int *nxe, int *nye) {
cacguard2l(cu,*nx,*ny,*nxe,*nye);
return;
}
/*--------------------------------------------------------------------*/
void caguard2l_(float *q, int *nx, int *ny, int *nxe, int *nye) {
caguard2l(q,*nx,*ny,*nxe,*nye);
return;
}
/*--------------------------------------------------------------------*/
void cmpois23_(float complex *q, float complex *fxy, int *isign,
float complex *ffc, float *ax, float *ay, float *affp,
float *we, int *nx, int *ny, int *nxvh, int *nyv,
int *nxhd, int *nyhd) {
cmpois23(q,fxy,*isign,ffc,*ax,*ay,*affp,we,*nx,*ny,*nxvh,*nyv,*nxhd,
*nyhd);
return;
}
/*--------------------------------------------------------------------*/
void cmcuperp2_(float complex *cu, int *nx, int *ny, int *nxvh,
int *nyv) {
cmcuperp2(cu,*nx,*ny,*nxvh,*nyv);
return;
}
/*--------------------------------------------------------------------*/
void cmibpois23_(float complex *cu, float complex *bxy,
float complex *ffc, float *ci, float *wm, int *nx,
int *ny, int *nxvh, int *nyv, int *nxhd, int *nyhd) {
cmibpois23(cu,bxy,ffc,*ci,wm,*nx,*ny,*nxvh,*nyv,*nxhd,*nyhd);
return;
}
/*--------------------------------------------------------------------*/
void cmmaxwel2_(float complex *exy, float complex *bxy,
float complex *cu, float complex *ffc, float *ci,
float *dt, float *wf, float *wm, int *nx, int *ny,
int *nxvh, int *nyv, int *nxhd, int *nyhd) {
cmmaxwel2(exy,bxy,cu,ffc,*ci,*dt,wf,wm,*nx,*ny,*nxvh,*nyv,*nxhd,
*nyhd);
return;
}
/*--------------------------------------------------------------------*/
void cmemfield2_(float complex *fxy, float complex *exy,
float complex *ffc, int *isign, int *nx, int *ny,
int *nxvh, int *nyv, int *nxhd, int *nyhd) {
cmemfield2(fxy,exy,ffc,*isign,*nx,*ny,*nxvh,*nyv,*nxhd,*nyhd);
return;
}
/*--------------------------------------------------------------------*/
void cwfft2rinit_(int *mixup, float complex *sct, int *indx, int *indy,
int *nxhyd, int *nxyhd) {
cwfft2rinit(mixup,sct,*indx,*indy,*nxhyd,*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cfft2rmxx_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *nyi,
int *nyp, int *nxhd, int *nyd, int *nxhyd, int *nxyhd) {
cfft2rmxx(f,*isign,mixup,sct,*indx,*indy,*nyi,*nyp,*nxhd,*nyd,*nxhyd,
*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cfft2rmxy_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *nxi,
int *nxp, int *nxhd, int *nyd, int *nxhyd, int *nxyhd) {
cfft2rmxy(f,*isign,mixup,sct,*indx,*indy,*nxi,*nxp,*nxhd,*nyd,*nxhyd,
*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cfft2rm3x_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *nyi,
int *nyp, int *nxhd, int *nyd, int *nxhyd, int *nxyhd) {
cfft2rm3x(f,*isign,mixup,sct,*indx,*indy,*nyi,*nyp,*nxhd,*nyd,*nxhyd,
*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cfft2rm3y_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *nxi,
int *nxp, int *nxhd, int *nyd, int *nxhyd, int *nxyhd) {
cfft2rm3y(f,*isign,mixup,sct,*indx,*indy,*nxi,*nxp,*nxhd,*nyd,*nxhyd,
*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cwfft2rmx_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *nxhd,
int *nyd, int *nxhyd, int *nxyhd) {
cwfft2rmx(f,*isign,mixup,sct,*indx,*indy,*nxhd,*nyd,*nxhyd,*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cwfft2rm3_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *nxhd,
int *nyd, int *nxhyd, int *nxyhd) {
cwfft2rm3(f,*isign,mixup,sct,*indx,*indy,*nxhd,*nyd,*nxhyd,*nxyhd);
return;
}
|
distort.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD IIIII SSSSS TTTTT OOO RRRR TTTTT %
% D D I SS T O O R R T %
% D D I SSS T O O RRRR T %
% D D I SS T O O R R T %
% DDDD IIIII SSSSS T OOO R R T %
% %
% %
% MagickCore Image Distortion Methods %
% %
% Software Design %
% John Cristy %
% Anthony Thyssen %
% June 2007 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite-private.h"
#include "magick/distort.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/list.h"
#include "magick/matrix.h"
#include "magick/memory_.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/registry.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/shear.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/transform.h"
/*
Numerous internal routines for image distortions.
*/
static inline double MagickMin(const double x,const double y)
{
return( x < y ? x : y);
}
static inline double MagickMax(const double x,const double y)
{
return( x > y ? x : y);
}
static inline void AffineArgsToCoefficients(double *affine)
{
/* map external sx,ry,rx,sy,tx,ty to internal c0,c2,c4,c1,c3,c5 */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=affine[1]; tmp[1]=affine[2]; tmp[2]=affine[3]; tmp[3]=affine[4];
affine[3]=tmp[0]; affine[1]=tmp[1]; affine[4]=tmp[2]; affine[2]=tmp[3];
}
static inline void CoefficientsToAffineArgs(double *coeff)
{
/* map internal c0,c1,c2,c3,c4,c5 to external sx,ry,rx,sy,tx,ty */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=coeff[3]; tmp[1]=coeff[1]; tmp[2]=coeff[4]; tmp[3]=coeff[2];
coeff[1]=tmp[0]; coeff[2]=tmp[1]; coeff[3]=tmp[2]; coeff[4]=tmp[3];
}
static void InvertAffineCoefficients(const double *coeff,double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 50 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[1]*coeff[3]);
inverse[0]=determinant*coeff[4];
inverse[1]=determinant*(-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[2]*coeff[4]);
inverse[3]=determinant*(-coeff[3]);
inverse[4]=determinant*coeff[0];
inverse[5]=determinant*(coeff[2]*coeff[3]-coeff[0]*coeff[5]);
}
static void InvertPerspectiveCoefficients(const double *coeff,
double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 53 */
double determinant;
determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[3]*coeff[1]);
inverse[0]=determinant*(coeff[4]-coeff[7]*coeff[5]);
inverse[1]=determinant*(coeff[7]*coeff[2]-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[4]*coeff[2]);
inverse[3]=determinant*(coeff[6]*coeff[5]-coeff[3]);
inverse[4]=determinant*(coeff[0]-coeff[6]*coeff[2]);
inverse[5]=determinant*(coeff[3]*coeff[2]-coeff[0]*coeff[5]);
inverse[6]=determinant*(coeff[3]*coeff[7]-coeff[6]*coeff[4]);
inverse[7]=determinant*(coeff[6]*coeff[1]-coeff[0]*coeff[7]);
}
/*
* Polynomial Term Defining Functions
*
* Order must either be an integer, or 1.5 to produce
* the 2 number_valuesal polynomial function...
* affine 1 (3) u = c0 + c1*x + c2*y
* bilinear 1.5 (4) u = '' + c3*x*y
* quadratic 2 (6) u = '' + c4*x*x + c5*y*y
* cubic 3 (10) u = '' + c6*x^3 + c7*x*x*y + c8*x*y*y + c9*y^3
* quartic 4 (15) u = '' + c10*x^4 + ... + c14*y^4
* quintic 5 (21) u = '' + c15*x^5 + ... + c20*y^5
* number in parenthesis minimum number of points needed.
* Anything beyond quintic, has not been implemented until
* a more automated way of determining terms is found.
* Note the slight re-ordering of the terms for a quadratic polynomial
* which is to allow the use of a bi-linear (order=1.5) polynomial.
* All the later polynomials are ordered simply from x^N to y^N
*/
static size_t poly_number_terms(double order)
{
/* Return the number of terms for a 2d polynomial */
if ( order < 1 || order > 5 ||
( order != floor(order) && (order-1.5) > MagickEpsilon) )
return 0; /* invalid polynomial order */
return((size_t) floor((order+1)*(order+2)/2));
}
static double poly_basis_fn(ssize_t n, double x, double y)
{
/* Return the result for this polynomial term */
switch(n) {
case 0: return( 1.0 ); /* constant */
case 1: return( x );
case 2: return( y ); /* affine order = 1 terms = 3 */
case 3: return( x*y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x*x );
case 5: return( y*y ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x*x );
case 7: return( x*x*y );
case 8: return( x*y*y );
case 9: return( y*y*y ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x*x );
case 11: return( x*x*x*y );
case 12: return( x*x*y*y );
case 13: return( x*y*y*y );
case 14: return( y*y*y*y ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x*x );
case 16: return( x*x*x*x*y );
case 17: return( x*x*x*y*y );
case 18: return( x*x*y*y*y );
case 19: return( x*y*y*y*y );
case 20: return( y*y*y*y*y ); /* quintic order = 5 terms = 21 */
}
return( 0 ); /* should never happen */
}
static const char *poly_basis_str(ssize_t n)
{
/* return the result for this polynomial term */
switch(n) {
case 0: return(""); /* constant */
case 1: return("*ii");
case 2: return("*jj"); /* affine order = 1 terms = 3 */
case 3: return("*ii*jj"); /* bilinear order = 1.5 terms = 4 */
case 4: return("*ii*ii");
case 5: return("*jj*jj"); /* quadratic order = 2 terms = 6 */
case 6: return("*ii*ii*ii");
case 7: return("*ii*ii*jj");
case 8: return("*ii*jj*jj");
case 9: return("*jj*jj*jj"); /* cubic order = 3 terms = 10 */
case 10: return("*ii*ii*ii*ii");
case 11: return("*ii*ii*ii*jj");
case 12: return("*ii*ii*jj*jj");
case 13: return("*ii*jj*jj*jj");
case 14: return("*jj*jj*jj*jj"); /* quartic order = 4 terms = 15 */
case 15: return("*ii*ii*ii*ii*ii");
case 16: return("*ii*ii*ii*ii*jj");
case 17: return("*ii*ii*ii*jj*jj");
case 18: return("*ii*ii*jj*jj*jj");
case 19: return("*ii*jj*jj*jj*jj");
case 20: return("*jj*jj*jj*jj*jj"); /* quintic order = 5 terms = 21 */
}
return( "UNKNOWN" ); /* should never happen */
}
static double poly_basis_dx(ssize_t n, double x, double y)
{
/* polynomial term for x derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 1.0 );
case 2: return( 0.0 ); /* affine order = 1 terms = 3 */
case 3: return( y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x );
case 5: return( 0.0 ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x );
case 7: return( x*y );
case 8: return( y*y );
case 9: return( 0.0 ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x );
case 11: return( x*x*y );
case 12: return( x*y*y );
case 13: return( y*y*y );
case 14: return( 0.0 ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x );
case 16: return( x*x*x*y );
case 17: return( x*x*y*y );
case 18: return( x*y*y*y );
case 19: return( y*y*y*y );
case 20: return( 0.0 ); /* quintic order = 5 terms = 21 */
}
return( 0.0 ); /* should never happen */
}
static double poly_basis_dy(ssize_t n, double x, double y)
{
/* polynomial term for y derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 0.0 );
case 2: return( 1.0 ); /* affine order = 1 terms = 3 */
case 3: return( x ); /* bilinear order = 1.5 terms = 4 */
case 4: return( 0.0 );
case 5: return( y ); /* quadratic order = 2 terms = 6 */
default: return( poly_basis_dx(n-1,x,y) ); /* weird but true */
}
/* NOTE: the only reason that last is not true for 'quadratic'
is due to the re-arrangement of terms to allow for 'bilinear'
*/
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A f f i n e T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AffineTransformImage() transforms an image as dictated by the affine matrix.
% It allocates the memory necessary for the new Image structure and returns
% a pointer to the new image.
%
% The format of the AffineTransformImage method is:
%
% Image *AffineTransformImage(const Image *image,
% AffineMatrix *affine_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o affine_matrix: the affine matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AffineTransformImage(const Image *image,
const AffineMatrix *affine_matrix,ExceptionInfo *exception)
{
double
distort[6];
Image
*deskew_image;
/*
Affine transform image.
*/
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(affine_matrix != (AffineMatrix *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
distort[0]=affine_matrix->sx;
distort[1]=affine_matrix->rx;
distort[2]=affine_matrix->ry;
distort[3]=affine_matrix->sy;
distort[4]=affine_matrix->tx;
distort[5]=affine_matrix->ty;
deskew_image=DistortImage(image,AffineProjectionDistortion,6,distort,
MagickTrue,exception);
return(deskew_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e n e r a t e C o e f f i c i e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GenerateCoefficients() takes user provided input arguments and generates
% the coefficients, needed to apply the specific distortion for either
% distorting images (generally using control points) or generating a color
% gradient from sparsely separated color points.
%
% The format of the GenerateCoefficients() method is:
%
% Image *GenerateCoefficients(const Image *image,DistortImageMethod method,
% const size_t number_arguments,const double *arguments,
% size_t number_values, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion/ sparse gradient
%
% o number_arguments: the number of arguments given.
%
% o arguments: the arguments for this distortion method.
%
% o number_values: the style and format of given control points, (caller type)
% 0: 2 dimensional mapping of control points (Distort)
% Format: u,v,x,y where u,v is the 'source' of the
% the color to be plotted, for DistortImage()
% N: Interpolation of control points with N values (usally r,g,b)
% Format: x,y,r,g,b mapping x,y to color values r,g,b
% IN future, variable number of values may be given (1 to N)
%
% o exception: return any errors or warnings in this structure
%
% Note that the returned array of double values must be freed by the
% calling method using RelinquishMagickMemory(). This however may change in
% the future to require a more 'method' specific method.
%
% Because of this this method should not be classed as stable or used
% outside other MagickCore library methods.
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
static double *GenerateCoefficients(const Image *image,
DistortImageMethod *method,const size_t number_arguments,
const double *arguments,size_t number_values,ExceptionInfo *exception)
{
double
*coeff;
register size_t
i;
size_t
number_coeff, /* number of coefficients to return (array size) */
cp_size, /* number floating point numbers per control point */
cp_x,cp_y, /* the x,y indexes for control point */
cp_values; /* index of values for this control point */
/* number_values Number of values given per control point */
if ( number_values == 0 ) {
/* Image distortion using control points (or other distortion)
That is generate a mapping so that x,y->u,v given u,v,x,y
*/
number_values = 2; /* special case: two values of u,v */
cp_values = 0; /* the values i,j are BEFORE the destination CP x,y */
cp_x = 2; /* location of x,y in input control values */
cp_y = 3;
/* NOTE: cp_values, also used for later 'reverse map distort' tests */
}
else {
cp_x = 0; /* location of x,y in input control values */
cp_y = 1;
cp_values = 2; /* and the other values are after x,y */
/* Typically in this case the values are R,G,B color values */
}
cp_size = number_values+2; /* each CP defintion involves this many numbers */
/* If not enough control point pairs are found for specific distortions
fall back to Affine distortion (allowing 0 to 3 point pairs)
*/
if ( number_arguments < 4*cp_size &&
( *method == BilinearForwardDistortion
|| *method == BilinearReverseDistortion
|| *method == PerspectiveDistortion
) )
*method = AffineDistortion;
number_coeff=0;
switch (*method) {
case AffineDistortion:
/* also BarycentricColorInterpolate: */
number_coeff=3*number_values;
break;
case PolynomialDistortion:
/* number of coefficents depend on the given polynomal 'order' */
if ( number_arguments <= 1 && (number_arguments-1)%cp_size != 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Polynomial",
"Invalid number of args: order [CPs]...");
return((double *) NULL);
}
i = poly_number_terms(arguments[0]);
number_coeff = 2 + i*number_values;
if ( i == 0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Polynomial",
"Invalid order, should be interger 1 to 5, or 1.5");
return((double *) NULL);
}
if ( number_arguments < 1+i*cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Polynomial", (double) i);
return((double *) NULL);
}
break;
case BilinearReverseDistortion:
number_coeff=4*number_values;
break;
/*
The rest are constants as they are only used for image distorts
*/
case BilinearForwardDistortion:
number_coeff=10; /* 2*4 coeff plus 2 constants */
cp_x = 0; /* Reverse src/dest coords for forward mapping */
cp_y = 1;
cp_values = 2;
break;
#if 0
case QuadraterialDistortion:
number_coeff=19; /* BilinearForward + BilinearReverse */
#endif
break;
case ShepardsDistortion:
number_coeff=1; /* The power factor to use */
break;
case ArcDistortion:
number_coeff=5;
break;
case ScaleRotateTranslateDistortion:
case AffineProjectionDistortion:
case Plane2CylinderDistortion:
case Cylinder2PlaneDistortion:
number_coeff=6;
break;
case PolarDistortion:
case DePolarDistortion:
number_coeff=8;
break;
case PerspectiveDistortion:
case PerspectiveProjectionDistortion:
number_coeff=9;
break;
case BarrelDistortion:
case BarrelInverseDistortion:
number_coeff=10;
break;
default:
perror("unknown method given"); /* just fail assertion */
}
/* allocate the array of coefficients needed */
coeff = (double *) AcquireQuantumMemory(number_coeff,sizeof(*coeff));
if (coeff == (double *) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "GenerateCoefficients");
return((double *) NULL);
}
/* zero out coefficients array */
for (i=0; i < number_coeff; i++)
coeff[i] = 0.0;
switch (*method)
{
case AffineDistortion:
{
/* Affine Distortion
v = c0*x + c1*y + c2
for each 'value' given
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Affine", 1.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* handle special cases of not enough arguments */
if ( number_arguments == cp_size ) {
/* Only 1 CP Set Given */
if ( cp_values == 0 ) {
/* image distortion - translate the image */
coeff[0] = 1.0;
coeff[2] = arguments[0] - arguments[2];
coeff[4] = 1.0;
coeff[5] = arguments[1] - arguments[3];
}
else {
/* sparse gradient - use the values directly */
for (i=0; i<number_values; i++)
coeff[i*3+2] = arguments[cp_values+i];
}
}
else {
/* 2 or more points (usally 3) given.
Solve a least squares simultaneous equation for coefficients.
*/
double
**matrix,
**vectors,
terms[3];
MagickBooleanType
status;
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(3UL,3UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*3]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),3UL,number_values);
}
if ( number_arguments == 2*cp_size ) {
/* Only two pairs were given, but we need 3 to solve the affine.
Fake extra coordinates by rotating p1 around p0 by 90 degrees.
x2 = x0 - (y1-y0) y2 = y0 + (x1-x0)
*/
terms[0] = arguments[cp_x]
- ( arguments[cp_size+cp_y] - arguments[cp_y] ); /* x2 */
terms[1] = arguments[cp_y] +
+ ( arguments[cp_size+cp_x] - arguments[cp_x] ); /* y2 */
terms[2] = 1; /* 1 */
if ( cp_values == 0 ) {
/* Image Distortion - rotate the u,v coordients too */
double
uv2[2];
uv2[0] = arguments[0] - arguments[5] + arguments[1]; /* u2 */
uv2[1] = arguments[1] + arguments[4] - arguments[0]; /* v2 */
LeastSquaresAddTerms(matrix,vectors,terms,uv2,3UL,2UL);
}
else {
/* Sparse Gradient - use values of p0 for linear gradient */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[cp_values]),3UL,number_values);
}
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,3UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
}
return(coeff);
}
case AffineProjectionDistortion:
{
/*
Arguments: Affine Matrix (forward mapping)
Arguments sx, rx, ry, sy, tx, ty
Where u = sx*x + ry*y + tx
v = rx*x + sy*y + ty
Returns coefficients (in there inverse form) ordered as...
sx ry tx rx sy ty
AffineProjection Distortion Notes...
+ Will only work with a 2 number_values for Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
double inverse[8];
if (number_arguments != 6) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs 6 coeff values'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* FUTURE: trap test for sx*sy-rx*ry == 0 (determinant = 0, no inverse) */
for(i=0; i<6UL; i++ )
inverse[i] = arguments[i];
AffineArgsToCoefficients(inverse); /* map into coefficents */
InvertAffineCoefficients(inverse, coeff); /* invert */
*method = AffineDistortion;
return(coeff);
}
case ScaleRotateTranslateDistortion:
{
/* Scale, Rotate and Translate Distortion
An alternative Affine Distortion
Argument options, by number of arguments given:
7: x,y, sx,sy, a, nx,ny
6: x,y, s, a, nx,ny
5: x,y, sx,sy, a
4: x,y, s, a
3: x,y, a
2: s, a
1: a
Where actions are (in order of application)
x,y 'center' of transforms (default = image center)
sx,sy scale image by this amount (default = 1)
a angle of rotation (argument required)
nx,ny move 'center' here (default = x,y or no movement)
And convert to affine mapping coefficients
ScaleRotateTranslate Distortion Notes...
+ Does not use a set of CPs in any normal way
+ Will only work with a 2 number_valuesal Image Distortion
+ Cannot be used for generating a sparse gradient (interpolation)
*/
double
cosine, sine,
x,y,sx,sy,a,nx,ny;
/* set default center, and default scale */
x = nx = (double)(image->columns)/2.0 + (double)image->page.x;
y = ny = (double)(image->rows)/2.0 + (double)image->page.y;
sx = sy = 1.0;
switch ( number_arguments ) {
case 0:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs at least 1 argument'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
case 1:
a = arguments[0];
break;
case 2:
sx = sy = arguments[0];
a = arguments[1];
break;
default:
x = nx = arguments[0];
y = ny = arguments[1];
switch ( number_arguments ) {
case 3:
a = arguments[2];
break;
case 4:
sx = sy = arguments[2];
a = arguments[3];
break;
case 5:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
break;
case 6:
sx = sy = arguments[2];
a = arguments[3];
nx = arguments[4];
ny = arguments[5];
break;
case 7:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
nx = arguments[5];
ny = arguments[6];
break;
default:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Too Many Arguments (7 or less)'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
break;
}
/* Trap if sx or sy == 0 -- image is scaled out of existance! */
if ( fabs(sx) < MagickEpsilon || fabs(sy) < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Zero Scale Given'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* Save the given arguments as an affine distortion */
a=DegreesToRadians(a); cosine=cos(a); sine=sin(a);
*method = AffineDistortion;
coeff[0]=cosine/sx;
coeff[1]=sine/sx;
coeff[2]=x-nx*coeff[0]-ny*coeff[1];
coeff[3]=(-sine)/sy;
coeff[4]=cosine/sy;
coeff[5]=y-nx*coeff[3]-ny*coeff[4];
return(coeff);
}
case PerspectiveDistortion:
{ /*
Perspective Distortion (a ratio of affine distortions)
p(x,y) c0*x + c1*y + c2
u = ------ = ------------------
r(x,y) c6*x + c7*y + 1
q(x,y) c3*x + c4*y + c5
v = ------ = ------------------
r(x,y) c6*x + c7*y + 1
c8 = Sign of 'r', or the denominator affine, for the actual image.
This determines what part of the distorted image is 'ground'
side of the horizon, the other part is 'sky' or invalid.
Valid values are +1.0 or -1.0 only.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
Perspective Distortion Notes...
+ Can be thought of as ratio of 3 affine transformations
+ Not separatable: r() or c6 and c7 are used by both equations
+ All 8 coefficients must be determined simultaniously
+ Will only work with a 2 number_valuesal Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
+ It is not linear, but is simple to generate an inverse
+ All lines within an image remain lines.
+ but distances between points may vary.
*/
double
**matrix,
*vectors[1],
terms[8];
size_t
cp_u = cp_values,
cp_v = cp_values+1;
MagickBooleanType
status;
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* fake 1x8 vectors matrix directly using the coefficients array */
vectors[0] = &(coeff[0]);
/* 8x8 least-squares matrix (zeroed) */
matrix = AcquireMagickMatrix(8UL,8UL);
if (matrix == (double **) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* Add control points for least squares solving */
for (i=0; i < number_arguments; i+=4) {
terms[0]=arguments[i+cp_x]; /* c0*x */
terms[1]=arguments[i+cp_y]; /* c1*y */
terms[2]=1.0; /* c2*1 */
terms[3]=0.0;
terms[4]=0.0;
terms[5]=0.0;
terms[6]=-terms[0]*arguments[i+cp_u]; /* 1/(c6*x) */
terms[7]=-terms[1]*arguments[i+cp_u]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_u]),
8UL,1UL);
terms[0]=0.0;
terms[1]=0.0;
terms[2]=0.0;
terms[3]=arguments[i+cp_x]; /* c3*x */
terms[4]=arguments[i+cp_y]; /* c4*y */
terms[5]=1.0; /* c5*1 */
terms[6]=-terms[3]*arguments[i+cp_v]; /* 1/(c6*x) */
terms[7]=-terms[4]*arguments[i+cp_v]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_v]),
8UL,1UL);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,8UL,1UL);
matrix = RelinquishMagickMatrix(matrix, 8UL);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image coordinate (first control point) in
destination for determination of what part of view is 'ground'.
*/
coeff[8] = coeff[6]*arguments[cp_x]
+ coeff[7]*arguments[cp_y] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
return(coeff);
}
case PerspectiveProjectionDistortion:
{
/*
Arguments: Perspective Coefficents (forward mapping)
*/
if (number_arguments != 8) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'Needs 8 coefficient values'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
return((double *) NULL);
}
/* FUTURE: trap test c0*c4-c3*c1 == 0 (determinate = 0, no inverse) */
InvertPerspectiveCoefficients(arguments, coeff);
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image cocodinate in destination for determination.
For a forward mapped perspective the images 0,0 coord will map to
c2,c5 in the distorted image, so set the sign of denominator of that.
*/
coeff[8] = coeff[6]*arguments[2]
+ coeff[7]*arguments[5] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
*method = PerspectiveDistortion;
return(coeff);
}
case BilinearForwardDistortion:
case BilinearReverseDistortion:
{
/* Bilinear Distortion (Forward mapping)
v = c0*x + c1*y + c2*x*y + c3;
for each 'value' given
This is actually a simple polynomial Distortion! The difference
however is when we need to reverse the above equation to generate a
BilinearForwardDistortion (see below).
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
double
**matrix,
**vectors,
terms[4];
MagickBooleanType
status;
/* check the number of arguments */
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(4UL,4UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x4 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*4]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = terms[0]*terms[1]; /* x*y */
terms[3] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),4UL,number_values);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,4UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( *method == BilinearForwardDistortion ) {
/* Bilinear Forward Mapped Distortion
The above least-squares solved for coefficents but in the forward
direction, due to changes to indexing constants.
i = c0*x + c1*y + c2*x*y + c3;
j = c4*x + c5*y + c6*x*y + c7;
where i,j are in the destination image, NOT the source.
Reverse Pixel mapping however needs to use reverse of these
functions. It required a full page of algbra to work out the
reversed mapping formula, but resolves down to the following...
c8 = c0*c5-c1*c4;
c9 = 2*(c2*c5-c1*c6); // '2*a' in the quadratic formula
i = i - c3; j = j - c7;
b = c6*i - c2*j + c8; // So that a*y^2 + b*y + c == 0
c = c4*i - c0*j; // y = ( -b +- sqrt(bb - 4ac) ) / (2*a)
r = b*b - c9*(c+c);
if ( c9 != 0 )
y = ( -b + sqrt(r) ) / c9;
else
y = -c/b;
x = ( i - c1*y) / ( c1 - c2*y );
NB: if 'r' is negative there is no solution!
NB: the sign of the sqrt() should be negative if image becomes
flipped or flopped, or crosses over itself.
NB: techniqually coefficient c5 is not needed, anymore,
but kept for completness.
See Anthony Thyssen <A.Thyssen@griffith.edu.au>
or Fred Weinhaus <fmw@alink.net> for more details.
*/
coeff[8] = coeff[0]*coeff[5] - coeff[1]*coeff[4];
coeff[9] = 2*(coeff[2]*coeff[5] - coeff[1]*coeff[6]);
}
return(coeff);
}
#if 0
case QuadrilateralDistortion:
{
/* Map a Quadrilateral to a unit square using BilinearReverse
Then map that unit square back to the final Quadrilateral
using BilinearForward.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
/* UNDER CONSTRUCTION */
return(coeff);
}
#endif
case PolynomialDistortion:
{
/* Polynomial Distortion
First two coefficents are used to hole global polynomal information
c0 = Order of the polynimial being created
c1 = number_of_terms in one polynomial equation
Rest of the coefficients map to the equations....
v = c0 + c1*x + c2*y + c3*x*y + c4*x^2 + c5*y^2 + c6*x^3 + ...
for each 'value' (number_values of them) given.
As such total coefficients = 2 + number_terms * number_values
Input Arguments are sets of control points...
For Distort Images order [u,v, x,y] ...
For Sparse Gradients order [x,y, r,g,b] ...
Polynomial Distortion Notes...
+ UNDER DEVELOPMENT -- Do not expect this to remain as is.
+ Currently polynomial is a reversed mapped distortion.
+ Order 1.5 is fudged to map into a bilinear distortion.
though it is not the same order as that distortion.
*/
double
**matrix,
**vectors,
*terms;
size_t
nterms; /* number of polynomial terms per number_values */
register ssize_t
j;
MagickBooleanType
status;
/* first two coefficients hold polynomial order information */
coeff[0] = arguments[0];
coeff[1] = (double) poly_number_terms(arguments[0]);
nterms = (size_t) coeff[1];
/* create matrix, a fake vectors matrix, and least sqs terms */
matrix = AcquireMagickMatrix(nterms,nterms);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
terms = (double *) AcquireQuantumMemory(nterms, sizeof(*terms));
if (matrix == (double **) NULL ||
vectors == (double **) NULL ||
terms == (double *) NULL )
{
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
terms = (double *) RelinquishMagickMemory(terms);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[2+i*nterms]);
/* Add given control point pairs for least squares solving */
for (i=1; i < number_arguments; i+=cp_size) { /* NB: start = 1 not 0 */
for (j=0; j < (ssize_t) nterms; j++)
terms[j] = poly_basis_fn(j,arguments[i+cp_x],arguments[i+cp_y]);
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),nterms,number_values);
}
terms = (double *) RelinquishMagickMemory(terms);
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,nterms,number_values);
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
return(coeff);
}
case ArcDistortion:
{
/* Arc Distortion
Args: arc_width rotate top_edge_radius bottom_edge_radius
All but first argument are optional
arc_width The angle over which to arc the image side-to-side
rotate Angle to rotate image from vertical center
top_radius Set top edge of source image at this radius
bottom_radius Set bootom edge to this radius (radial scaling)
By default, if the radii arguments are nor provided the image radius
is calculated so the horizontal center-line is fits the given arc
without scaling.
The output image size is ALWAYS adjusted to contain the whole image,
and an offset is given to position image relative to the 0,0 point of
the origin, allowing users to use relative positioning onto larger
background (via -flatten).
The arguments are converted to these coefficients
c0: angle for center of source image
c1: angle scale for mapping to source image
c2: radius for top of source image
c3: radius scale for mapping source image
c4: centerline of arc within source image
Note the coefficients use a center angle, so asymptotic join is
furthest from both sides of the source image. This also means that
for arc angles greater than 360 the sides of the image will be
trimmed equally.
Arc Distortion Notes...
+ Does not use a set of CPs
+ Will only work with Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
if ( number_arguments >= 1 && arguments[0] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Arc Angle Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( number_arguments >= 3 && arguments[2] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Outer Radius Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
coeff[0] = -MagickPI2; /* -90, place at top! */
if ( number_arguments >= 1 )
coeff[1] = DegreesToRadians(arguments[0]);
else
coeff[1] = MagickPI2; /* zero arguments - center is at top */
if ( number_arguments >= 2 )
coeff[0] += DegreesToRadians(arguments[1]);
coeff[0] /= Magick2PI; /* normalize radians */
coeff[0] -= MagickRound(coeff[0]);
coeff[0] *= Magick2PI; /* de-normalize back to radians */
coeff[3] = (double)image->rows-1;
coeff[2] = (double)image->columns/coeff[1] + coeff[3]/2.0;
if ( number_arguments >= 3 ) {
if ( number_arguments >= 4 )
coeff[3] = arguments[2] - arguments[3];
else
coeff[3] *= arguments[2]/coeff[2];
coeff[2] = arguments[2];
}
coeff[4] = ((double)image->columns-1.0)/2.0;
return(coeff);
}
case PolarDistortion:
case DePolarDistortion:
{
/* (De)Polar Distortion (same set of arguments)
Args: Rmax, Rmin, Xcenter,Ycenter, Afrom,Ato
DePolar can also have the extra arguments of Width, Height
Coefficients 0 to 5 is the sanatized version first 6 input args
Coefficient 6 is the angle to coord ratio and visa-versa
Coefficient 7 is the radius to coord ratio and visa-versa
WARNING: It is possible for Radius max<min and/or Angle from>to
*/
if ( number_arguments == 3
|| ( number_arguments > 6 && *method == PolarDistortion )
|| number_arguments > 8 ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* Rmax - if 0 calculate appropriate value */
if ( number_arguments >= 1 )
coeff[0] = arguments[0];
else
coeff[0] = 0.0;
/* Rmin - usally 0 */
coeff[1] = number_arguments >= 2 ? arguments[1] : 0.0;
/* Center X,Y */
if ( number_arguments >= 4 ) {
coeff[2] = arguments[2];
coeff[3] = arguments[3];
}
else { /* center of actual image */
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
}
/* Angle from,to - about polar center 0 is downward */
coeff[4] = -MagickPI;
if ( number_arguments >= 5 )
coeff[4] = DegreesToRadians(arguments[4]);
coeff[5] = coeff[4];
if ( number_arguments >= 6 )
coeff[5] = DegreesToRadians(arguments[5]);
if ( fabs(coeff[4]-coeff[5]) < MagickEpsilon )
coeff[5] += Magick2PI; /* same angle is a full circle */
/* if radius 0 or negative, its a special value... */
if ( coeff[0] < MagickEpsilon ) {
/* Use closest edge if radius == 0 */
if ( fabs(coeff[0]) < MagickEpsilon ) {
coeff[0]=MagickMin(fabs(coeff[2]-image->page.x),
fabs(coeff[3]-image->page.y));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[2]-image->page.x-image->columns));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[3]-image->page.y-image->rows));
}
/* furthest diagonal if radius == -1 */
if ( fabs(-1.0-coeff[0]) < MagickEpsilon ) {
double rx,ry;
rx = coeff[2]-image->page.x;
ry = coeff[3]-image->page.y;
coeff[0] = rx*rx+ry*ry;
ry = coeff[3]-image->page.y-image->rows;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
rx = coeff[2]-image->page.x-image->columns;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
ry = coeff[3]-image->page.y;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
coeff[0] = sqrt(coeff[0]);
}
}
/* IF Rmax <= 0 or Rmin < 0 OR Rmax < Rmin, THEN error */
if ( coeff[0] < MagickEpsilon || coeff[1] < -MagickEpsilon
|| (coeff[0]-coeff[1]) < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid Radius",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* converstion ratios */
if ( *method == PolarDistortion ) {
coeff[6]=(double) image->columns/(coeff[5]-coeff[4]);
coeff[7]=(double) image->rows/(coeff[0]-coeff[1]);
}
else { /* *method == DePolarDistortion */
coeff[6]=(coeff[5]-coeff[4])/image->columns;
coeff[7]=(coeff[0]-coeff[1])/image->rows;
}
return(coeff);
}
case Cylinder2PlaneDistortion:
case Plane2CylinderDistortion:
{
/* 3D Cylinder to/from a Tangential Plane
Projection between a clinder and flat plain from a point on the
center line of the cylinder.
The two surfaces coincide in 3D space at the given centers of
distortion (perpendicular to projection point) on both images.
Args: FOV_arc_width
Coefficents: FOV(radians), Radius, center_x,y, dest_center_x,y
FOV (Field Of View) the angular field of view of the distortion,
across the width of the image, in degrees. The centers are the
points of least distortion in the input and resulting images.
These centers are however determined later.
Coeff 0 is the FOV angle of view of image width in radians
Coeff 1 is calculated radius of cylinder.
Coeff 2,3 center of distortion of input image
Coefficents 4,5 Center of Distortion of dest (determined later)
*/
if ( arguments[0] < MagickEpsilon || arguments[0] > 160.0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid FOV Angle",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
coeff[0] = DegreesToRadians(arguments[0]);
if ( *method == Cylinder2PlaneDistortion )
/* image is curved around cylinder, so FOV angle (in radians)
* scales directly to image X coordinate, according to its radius.
*/
coeff[1] = (double) image->columns/coeff[0];
else
/* radius is distance away from an image with this angular FOV */
coeff[1] = (double) image->columns / ( 2 * tan(coeff[0]/2) );
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
coeff[4] = coeff[2];
coeff[5] = coeff[3]; /* assuming image size is the same */
return(coeff);
}
case BarrelDistortion:
case BarrelInverseDistortion:
{
/* Barrel Distortion
Rs=(A*Rd^3 + B*Rd^2 + C*Rd + D)*Rd
BarrelInv Distortion
Rs=Rd/(A*Rd^3 + B*Rd^2 + C*Rd + D)
Where Rd is the normalized radius from corner to middle of image
Input Arguments are one of the following forms (number of arguments)...
3: A,B,C
4: A,B,C,D
5: A,B,C X,Y
6: A,B,C,D X,Y
8: Ax,Bx,Cx,Dx Ay,By,Cy,Dy
10: Ax,Bx,Cx,Dx Ay,By,Cy,Dy X,Y
Returns 10 coefficent values, which are de-normalized (pixel scale)
Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, Xc, Yc
*/
/* Radius de-normalization scaling factor */
double
rscale = 2.0/MagickMin((double) image->columns,(double) image->rows);
/* sanity check number of args must = 3,4,5,6,8,10 or error */
if ( (number_arguments < 3) || (number_arguments == 7) ||
(number_arguments == 9) || (number_arguments > 10) )
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* A,B,C,D coefficients */
coeff[0] = arguments[0];
coeff[1] = arguments[1];
coeff[2] = arguments[2];
if ((number_arguments == 3) || (number_arguments == 5) )
coeff[3] = 1.0 - coeff[0] - coeff[1] - coeff[2];
else
coeff[3] = arguments[3];
/* de-normalize the coefficients */
coeff[0] *= pow(rscale,3.0);
coeff[1] *= rscale*rscale;
coeff[2] *= rscale;
/* Y coefficients: as given OR same as X coefficients */
if ( number_arguments >= 8 ) {
coeff[4] = arguments[4] * pow(rscale,3.0);
coeff[5] = arguments[5] * rscale*rscale;
coeff[6] = arguments[6] * rscale;
coeff[7] = arguments[7];
}
else {
coeff[4] = coeff[0];
coeff[5] = coeff[1];
coeff[6] = coeff[2];
coeff[7] = coeff[3];
}
/* X,Y Center of Distortion (image coodinates) */
if ( number_arguments == 5 ) {
coeff[8] = arguments[3];
coeff[9] = arguments[4];
}
else if ( number_arguments == 6 ) {
coeff[8] = arguments[4];
coeff[9] = arguments[5];
}
else if ( number_arguments == 10 ) {
coeff[8] = arguments[8];
coeff[9] = arguments[9];
}
else {
/* center of the image provided (image coodinates) */
coeff[8] = (double)image->columns/2.0 + image->page.x;
coeff[9] = (double)image->rows/2.0 + image->page.y;
}
return(coeff);
}
case ShepardsDistortion:
{
/* Shepards Distortion input arguments are the coefficents!
Just check the number of arguments is valid!
Args: u1,v1, x1,y1, ...
OR : u1,v1, r1,g1,c1, ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'requires CP's (4 numbers each)'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* User defined weighting power for Shepard's Method */
{ const char *artifact=GetImageArtifact(image,"shepards:power");
if ( artifact != (const char *) NULL ) {
coeff[0]=StringToDouble(artifact,(char **) NULL) / 2.0;
if ( coeff[0] < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument","%s", "-define shepards:power" );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
}
else
coeff[0]=1.0; /* Default power of 2 (Inverse Squared) */
}
return(coeff);
}
default:
break;
}
/* you should never reach this point */
perror("no method handler"); /* just fail assertion */
return((double *) NULL);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s t o r t R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortResizeImage() resize image using the equivalent but slower image
% distortion operator. The filter is applied using a EWA cylindrical
% resampling. But like resize the final image size is limited to whole pixels
% with no effects by virtual-pixels on the result.
%
% Note that images containing a transparency channel will be twice as slow to
% resize as images one without transparency.
%
% The format of the DistortResizeImage method is:
%
% Image *AdaptiveResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *DistortResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
#define DistortResizeImageTag "Distort/Image"
Image
*resize_image,
*tmp_image;
RectangleInfo
crop_area;
double
distort_args[12];
VirtualPixelMethod
vp_save;
/*
Distort resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
/* Do not short-circuit this resize if final image size is unchanged */
(void) ResetMagickMemory(distort_args,0,12*sizeof(double));
distort_args[4]=(double) image->columns;
distort_args[6]=(double) columns;
distort_args[9]=(double) image->rows;
distort_args[11]=(double) rows;
vp_save=GetImageVirtualPixelMethod(image);
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if ( tmp_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod);
if (image->matte == MagickFalse)
{
/*
Image has not transparency channel, so we free to use it
*/
(void) SetImageAlphaChannel(tmp_image,SetAlphaChannel);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel);
InheritException(exception,&image->exception);
}
else
{
/*
Image has transparency so handle colors and alpha separatly.
Basically we need to separate Virtual-Pixel alpha in the resized
image, so only the actual original images alpha channel is used.
*/
Image
*resize_alpha;
/* distort alpha channel separately */
(void) SeparateImageChannel(tmp_image,TrueAlphaChannel);
(void) SetImageAlphaChannel(tmp_image,OpaqueAlphaChannel);
resize_alpha=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_alpha == (Image *) NULL )
return((Image *) NULL);
/* distort the actual image containing alpha + VP alpha */
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if ( tmp_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,
TransparentVirtualPixelMethod);
(void) SetImageVirtualPixelMethod(tmp_image,
TransparentVirtualPixelMethod);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_image == (Image *) NULL)
{
resize_alpha=DestroyImage(resize_alpha);
return((Image *) NULL);
}
/* replace resize images alpha with the separally distorted alpha */
(void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel);
(void) SetImageAlphaChannel(resize_alpha,DeactivateAlphaChannel);
(void) CompositeImage(resize_image,CopyOpacityCompositeOp,resize_alpha,
0,0);
InheritException(exception,&resize_image->exception);
resize_alpha=DestroyImage(resize_alpha);
}
(void) SetImageVirtualPixelMethod(resize_image,vp_save);
/*
Clean up the results of the Distortion
*/
crop_area.width=columns;
crop_area.height=rows;
crop_area.x=0;
crop_area.y=0;
tmp_image=resize_image;
resize_image=CropImage(tmp_image,&crop_area,exception);
tmp_image=DestroyImage(tmp_image);
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D i s t o r t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortImage() distorts an image using various distortion methods, by
% mapping color lookups of the source image to a new destination image
% usally of the same size as the source image, unless 'bestfit' is set to
% true.
%
% If 'bestfit' is enabled, and distortion allows it, the destination image is
% adjusted to ensure the whole source 'image' will just fit within the final
% destination image, which will be sized and offset accordingly. Also in
% many cases the virtual offset of the source image will be taken into
% account in the mapping.
%
% If the '-verbose' control option has been set print to standard error the
% equicelent '-fx' formula with coefficients for the function, if practical.
%
% The format of the DistortImage() method is:
%
% Image *DistortImage(const Image *image,const DistortImageMethod method,
% const size_t number_arguments,const double *arguments,
% MagickBooleanType bestfit, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion.
%
% ArcDistortion always ignores source image offset, and always
% 'bestfit' the destination image with the top left corner offset
% relative to the polar mapping center.
%
% Affine, Perspective, and Bilinear, do least squares fitting of the
% distrotion when more than the minimum number of control point pairs
% are provided.
%
% Perspective, and Bilinear, fall back to a Affine distortion when less
% than 4 control point pairs are provided. While Affine distortions
% let you use any number of control point pairs, that is Zero pairs is
% a No-Op (viewport only) distortion, one pair is a translation and
% two pairs of control points do a scale-rotate-translate, without any
% shearing.
%
% o number_arguments: the number of arguments given.
%
% o arguments: an array of floating point arguments for this method.
%
% o bestfit: Attempt to 'bestfit' the size of the resulting image.
% This also forces the resulting image to be a 'layered' virtual
% canvas image. Can be overridden using 'distort:viewport' setting.
%
% o exception: return any errors or warnings in this structure
%
% Extra Controls from Image meta-data (artifacts)...
%
% o "verbose"
% Output to stderr alternatives, internal coefficents, and FX
% equivalents for the distortion operation (if feasible).
% This forms an extra check of the distortion method, and allows users
% access to the internal constants IM calculates for the distortion.
%
% o "distort:viewport"
% Directly set the output image canvas area and offest to use for the
% resulting image, rather than use the original images canvas, or a
% calculated 'bestfit' canvas.
%
% o "distort:scale"
% Scale the size of the output canvas by this amount to provide a
% method of Zooming, and for super-sampling the results.
%
% Other settings that can effect results include
%
% o 'interpolate' For source image lookups (scale enlargements)
%
% o 'filter' Set filter to use for area-resampling (scale shrinking).
% Set to 'point' to turn off and use 'interpolate' lookup
% instead
%
*/
MagickExport Image *DistortImage(const Image *image,DistortImageMethod method,
const size_t number_arguments,const double *arguments,
MagickBooleanType bestfit,ExceptionInfo *exception)
{
#define DistortImageTag "Distort/Image"
double
*coeff,
output_scaling;
Image
*distort_image;
RectangleInfo
geometry; /* geometry of the distorted space viewport */
MagickBooleanType
viewport_given;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
/*
Handle Special Compound Distortions
*/
if ( method == ResizeDistortion )
{
if ( number_arguments != 2 )
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Resize",
"Invalid number of args: 2 only");
return((Image *) NULL);
}
distort_image=DistortResizeImage(image,(size_t)arguments[0],
(size_t)arguments[1], exception);
return(distort_image);
}
/*
Convert input arguments (usually as control points for reverse mapping)
into mapping coefficients to apply the distortion.
Note that some distortions are mapped to other distortions,
and as such do not require specific code after this point.
*/
coeff = GenerateCoefficients(image, &method, number_arguments,
arguments, 0, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Determine the size and offset for a 'bestfit' destination.
Usally the four corners of the source image is enough.
*/
/* default output image bounds, when no 'bestfit' is requested */
geometry.width=image->columns;
geometry.height=image->rows;
geometry.x=0;
geometry.y=0;
if ( method == ArcDistortion ) {
bestfit = MagickTrue; /* always calculate a 'best fit' viewport */
}
/* Work out the 'best fit', (required for ArcDistortion) */
if ( bestfit ) {
PointInfo
s,d,min,max; /* source, dest coords --mapping--> min, max coords */
MagickBooleanType
fix_bounds = MagickTrue; /* enlarge bounds for VP handling */
s.x=s.y=min.x=max.x=min.y=max.y=0.0; /* keep compiler happy */
/* defines to figure out the bounds of the distorted image */
#define InitalBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = max.x = p.x; \
min.y = max.y = p.y; \
}
#define ExpandBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = MagickMin(min.x,p.x); \
max.x = MagickMax(max.x,p.x); \
min.y = MagickMin(min.y,p.y); \
max.y = MagickMax(max.y,p.y); \
}
switch (method)
{
case AffineDistortion:
{ double inverse[6];
InvertAffineCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
break;
}
case PerspectiveDistortion:
{ double inverse[8], scale;
InvertPerspectiveCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=PerceptibleReciprocal(scale);
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
break;
}
case ArcDistortion:
{ double a, ca, sa;
/* Forward Map Corners */
a = coeff[0]-coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
InitalBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
a = coeff[0]+coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
/* Orthogonal points along top of arc */
for( a=(double) (ceil((double) ((coeff[0]-coeff[1]/2.0)/MagickPI2))*MagickPI2);
a<(coeff[0]+coeff[1]/2.0); a+=MagickPI2 ) {
ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
}
/*
Convert the angle_to_width and radius_to_height
to appropriate scaling factors, to allow faster processing
in the mapping function.
*/
coeff[1] = (double) (Magick2PI*image->columns/coeff[1]);
coeff[3] = (double)image->rows/coeff[3];
break;
}
case PolarDistortion:
{
if (number_arguments < 2)
coeff[2] = coeff[3] = 0.0;
min.x = coeff[2]-coeff[0];
max.x = coeff[2]+coeff[0];
min.y = coeff[3]-coeff[0];
max.y = coeff[3]+coeff[0];
/* should be about 1.0 if Rmin = 0 */
coeff[7]=(double) geometry.height/(coeff[0]-coeff[1]);
break;
}
case DePolarDistortion:
{
/* direct calculation as it needs to tile correctly
* for reversibility in a DePolar-Polar cycle */
fix_bounds = MagickFalse;
geometry.x = geometry.y = 0;
geometry.height = (size_t) ceil(coeff[0]-coeff[1]);
geometry.width = (size_t)
ceil((coeff[0]-coeff[1])*(coeff[5]-coeff[4])*0.5);
/* correct scaling factors relative to new size */
coeff[6]=(coeff[5]-coeff[4])/geometry.width; /* changed width */
coeff[7]=(coeff[0]-coeff[1])/geometry.height; /* should be about 1.0 */
break;
}
case Cylinder2PlaneDistortion:
{
/* direct calculation so center of distortion is either a pixel
* center, or pixel edge. This allows for reversibility of the
* distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil( 2.0*coeff[1]*tan(coeff[0]/2.0) );
geometry.height = (size_t) ceil( 2.0*coeff[3]/cos(coeff[0]/2.0) );
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case Plane2CylinderDistortion:
{
/* direct calculation center is either pixel center, or pixel edge
* so as to allow reversibility of the image distortion */
geometry.x = geometry.y = 0;
geometry.width = (size_t) ceil(coeff[0]*coeff[1]); /* FOV * radius */
geometry.height = (size_t) (2*coeff[3]); /* input image height */
/* correct center of distortion relative to new size */
coeff[4] = (double) geometry.width/2.0;
coeff[5] = (double) geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case ShepardsDistortion:
case BilinearForwardDistortion:
case BilinearReverseDistortion:
#if 0
case QuadrilateralDistortion:
#endif
case PolynomialDistortion:
case BarrelDistortion:
case BarrelInverseDistortion:
default:
/* no calculated bestfit available for these distortions */
bestfit = MagickFalse;
fix_bounds = MagickFalse;
break;
}
/* Set the output image geometry to calculated 'bestfit'.
Yes this tends to 'over do' the file image size, ON PURPOSE!
Do not do this for DePolar which needs to be exact for virtual tiling.
*/
if ( fix_bounds ) {
geometry.x = (ssize_t) floor(min.x-0.5);
geometry.y = (ssize_t) floor(min.y-0.5);
geometry.width=(size_t) ceil(max.x-geometry.x+0.5);
geometry.height=(size_t) ceil(max.y-geometry.y+0.5);
}
} /* end bestfit destination image calculations */
/* The user provided a 'viewport' expert option which may
overrides some parts of the current output image geometry.
This also overrides its default 'bestfit' setting.
*/
{ const char *artifact=GetImageArtifact(image,"distort:viewport");
viewport_given = MagickFalse;
if ( artifact != (const char *) NULL ) {
MagickStatusType flags=ParseAbsoluteGeometry(artifact,&geometry);
if (flags==NoValue)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidGeometry","`%s' `%s'",
"distort:viewport",artifact);
else
viewport_given = MagickTrue;
}
}
/* Verbose output */
if ( GetImageArtifact(image,"verbose") != (const char *) NULL ) {
register ssize_t
i;
char image_gen[MaxTextExtent];
const char *lookup;
/* Set destination image size and virtual offset */
if ( bestfit || viewport_given ) {
(void) FormatLocaleString(image_gen, MaxTextExtent," -size %.20gx%.20g "
"-page %+.20g%+.20g xc: +insert \\\n",(double) geometry.width,
(double) geometry.height,(double) geometry.x,(double) geometry.y);
lookup="v.p{ xx-v.page.x-.5, yy-v.page.y-.5 }";
}
else {
image_gen[0] = '\0'; /* no destination to generate */
lookup = "p{ xx-page.x-.5, yy-page.y-.5 }"; /* simplify lookup */
}
switch (method) {
case AffineDistortion:
{
double *inverse;
inverse = (double *) AcquireQuantumMemory(6,sizeof(*inverse));
if (inverse == (double *) NULL) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortImages");
return((Image *) NULL);
}
InvertAffineCoefficients(coeff, inverse);
CoefficientsToAffineArgs(inverse);
(void) FormatLocaleFile(stderr, "Affine Projection:\n");
(void) FormatLocaleFile(stderr, " -distort AffineProjection \\\n '");
for (i=0; i < 5; i++)
(void) FormatLocaleFile(stderr, "%lf,", inverse[i]);
(void) FormatLocaleFile(stderr, "%lf'\n", inverse[5]);
inverse = (double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr, "Affine Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf;\n",
coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf;\n",
coeff[3], coeff[4], coeff[5]);
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
}
case PerspectiveDistortion:
{
double *inverse;
inverse = (double *) AcquireQuantumMemory(8,sizeof(*inverse));
if (inverse == (double *) NULL) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((Image *) NULL);
}
InvertPerspectiveCoefficients(coeff, inverse);
(void) FormatLocaleFile(stderr, "Perspective Projection:\n");
(void) FormatLocaleFile(stderr, " -distort PerspectiveProjection \\\n '");
for (i=0; i<4; i++)
(void) FormatLocaleFile(stderr, "%lf, ", inverse[i]);
(void) FormatLocaleFile(stderr, "\n ");
for (; i<7; i++)
(void) FormatLocaleFile(stderr, "%lf, ", inverse[i]);
(void) FormatLocaleFile(stderr, "%lf'\n", inverse[7]);
inverse = (double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr, "Perspective Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " rr=%+lf*ii %+lf*jj + 1;\n",
coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr, " xx=(%+lf*ii %+lf*jj %+lf)/rr;\n",
coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " yy=(%+lf*ii %+lf*jj %+lf)/rr;\n",
coeff[3], coeff[4], coeff[5]);
(void) FormatLocaleFile(stderr, " rr%s0 ? %s : blue' \\\n",
coeff[8] < 0 ? "<" : ">", lookup);
break;
}
case BilinearForwardDistortion:
(void) FormatLocaleFile(stderr, "BilinearForward Mapping Equations:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " i = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[0], coeff[1], coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " j = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[4], coeff[5], coeff[6], coeff[7]);
#if 0
/* for debugging */
(void) FormatLocaleFile(stderr, " c8 = %+lf c9 = 2*a = %+lf;\n",
coeff[8], coeff[9]);
#endif
(void) FormatLocaleFile(stderr, "BilinearForward Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",
0.5-coeff[3], 0.5-coeff[7]);
(void) FormatLocaleFile(stderr, " bb=%lf*ii %+lf*jj %+lf;\n",
coeff[6], -coeff[2], coeff[8]);
/* Handle Special degenerate (non-quadratic) or trapezoidal case */
if ( coeff[9] != 0 ) {
(void) FormatLocaleFile(stderr, " rt=bb*bb %+lf*(%lf*ii%+lf*jj);\n",
-2*coeff[9], coeff[4], -coeff[0]);
(void) FormatLocaleFile(stderr, " yy=( -bb + sqrt(rt) ) / %lf;\n",
coeff[9]);
} else
(void) FormatLocaleFile(stderr, " yy=(%lf*ii%+lf*jj)/bb;\n",
-coeff[4], coeff[0]);
(void) FormatLocaleFile(stderr, " xx=(ii %+lf*yy)/(%lf %+lf*yy);\n",
-coeff[1], coeff[0], coeff[2]);
if ( coeff[9] != 0 )
(void) FormatLocaleFile(stderr, " (rt < 0 ) ? red : %s'\n", lookup);
else
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
case BilinearReverseDistortion:
#if 0
(void) FormatLocaleFile(stderr, "Polynomial Projection Distort:\n");
(void) FormatLocaleFile(stderr, " -distort PolynomialProjection \\\n");
(void) FormatLocaleFile(stderr, " '1.5, %lf, %lf, %lf, %lf,\n",
coeff[3], coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " %lf, %lf, %lf, %lf'\n",
coeff[7], coeff[4], coeff[5], coeff[6]);
#endif
(void) FormatLocaleFile(stderr, "BilinearReverse Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",
coeff[0], coeff[1], coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",
coeff[4], coeff[5], coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
case PolynomialDistortion:
{
size_t nterms = (size_t) coeff[1];
(void) FormatLocaleFile(stderr, "Polynomial (order %lg, terms %lu), FX Equivelent\n",
coeff[0],(unsigned long) nterms);
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx =");
for (i=0; i<(ssize_t) nterms; i++) {
if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr, ";\n yy =");
for (i=0; i<(ssize_t) nterms; i++) {
if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i+nterms],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr, ";\n %s' \\\n", lookup);
break;
}
case ArcDistortion:
{
(void) FormatLocaleFile(stderr, "Arc Distort, Internal Coefficients:\n");
for ( i=0; i<5; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "Arc Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x; jj=j+page.y;\n");
(void) FormatLocaleFile(stderr, " xx=(atan2(jj,ii)%+lf)/(2*pi);\n",
-coeff[0]);
(void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr, " xx=xx*%lf %+lf;\n",
coeff[1], coeff[4]);
(void) FormatLocaleFile(stderr, " yy=(%lf - hypot(ii,jj)) * %lf;\n",
coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case PolarDistortion:
{
(void) FormatLocaleFile(stderr, "Polar Distort, Internal Coefficents\n");
for ( i=0; i<8; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "Polar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",
-coeff[2], -coeff[3]);
(void) FormatLocaleFile(stderr, " xx=(atan2(ii,jj)%+lf)/(2*pi);\n",
-(coeff[4]+coeff[5])/2 );
(void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr, " xx=xx*2*pi*%lf + v.w/2;\n",
coeff[6] );
(void) FormatLocaleFile(stderr, " yy=(hypot(ii,jj)%+lf)*%lf;\n",
-coeff[1], coeff[7] );
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case DePolarDistortion:
{
(void) FormatLocaleFile(stderr, "DePolar Distort, Internal Coefficents\n");
for ( i=0; i<8; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "DePolar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'aa=(i+.5)*%lf %+lf;\n", coeff[6], +coeff[4] );
(void) FormatLocaleFile(stderr, " rr=(j+.5)*%lf %+lf;\n", coeff[7], +coeff[1] );
(void) FormatLocaleFile(stderr, " xx=rr*sin(aa) %+lf;\n", coeff[2] );
(void) FormatLocaleFile(stderr, " yy=rr*cos(aa) %+lf;\n", coeff[3] );
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case Cylinder2PlaneDistortion:
{
(void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]);
(void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",
-coeff[4], -coeff[5]);
(void) FormatLocaleFile(stderr, " aa=atan(ii/%+lf);\n", coeff[1] );
(void) FormatLocaleFile(stderr, " xx=%lf*aa%+lf;\n",
coeff[1], coeff[2] );
(void) FormatLocaleFile(stderr, " yy=jj*cos(aa)%+lf;\n", coeff[3] );
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
}
case Plane2CylinderDistortion:
{
(void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]);
(void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",
-coeff[4], -coeff[5]);
(void) FormatLocaleFile(stderr, " ii=ii/%+lf;\n", coeff[1] );
(void) FormatLocaleFile(stderr, " xx=%lf*tan(ii)%+lf;\n",
coeff[1], coeff[2] );
(void) FormatLocaleFile(stderr, " yy=jj/cos(ii)%+lf;\n",
coeff[3] );
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ double xc,yc;
/* NOTE: This does the barrel roll in pixel coords not image coords
** The internal distortion must do it in image coordinates,
** so that is what the center coeff (8,9) is given in.
*/
xc = ((double)image->columns-1.0)/2.0 + image->page.x;
yc = ((double)image->rows-1.0)/2.0 + image->page.y;
(void) FormatLocaleFile(stderr, "Barrel%s Distort, FX Equivelent:\n",
method == BarrelDistortion ? "" : "Inv");
(void) FormatLocaleFile(stderr, "%s", image_gen);
if ( fabs(coeff[8]-xc-0.5) < 0.1 && fabs(coeff[9]-yc-0.5) < 0.1 )
(void) FormatLocaleFile(stderr, " -fx 'xc=(w-1)/2; yc=(h-1)/2;\n");
else
(void) FormatLocaleFile(stderr, " -fx 'xc=%lf; yc=%lf;\n",
coeff[8]-0.5, coeff[9]-0.5);
(void) FormatLocaleFile(stderr,
" ii=i-xc; jj=j-yc; rr=hypot(ii,jj);\n");
(void) FormatLocaleFile(stderr, " ii=ii%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",
coeff[0],coeff[1],coeff[2],coeff[3]);
(void) FormatLocaleFile(stderr, " jj=jj%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",
coeff[4],coeff[5],coeff[6],coeff[7]);
(void) FormatLocaleFile(stderr, " v.p{fx*ii+xc,fy*jj+yc}' \\\n");
}
default:
break;
}
}
/* The user provided a 'scale' expert option will scale the
output image size, by the factor given allowing for super-sampling
of the distorted image space. Any scaling factors must naturally
be halved as a result.
*/
{ const char *artifact;
artifact=GetImageArtifact(image,"distort:scale");
output_scaling = 1.0;
if (artifact != (const char *) NULL) {
output_scaling = fabs(StringToDouble(artifact,(char **) NULL));
geometry.width=(size_t) (output_scaling*geometry.width+0.5);
geometry.height=(size_t) (output_scaling*geometry.height+0.5);
geometry.x=(ssize_t) (output_scaling*geometry.x+0.5);
geometry.y=(ssize_t) (output_scaling*geometry.y+0.5);
if ( output_scaling < 0.1 ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument","%s","-define distort:scale" );
return((Image *) NULL);
}
output_scaling = 1/output_scaling;
}
}
#define ScaleFilter(F,A,B,C,D) \
ScaleResampleFilter( (F), \
output_scaling*(A), output_scaling*(B), \
output_scaling*(C), output_scaling*(D) )
/*
Initialize the distort image attributes.
*/
distort_image=CloneImage(image,geometry.width,geometry.height,MagickTrue,
exception);
if (distort_image == (Image *) NULL)
return((Image *) NULL);
/* if image is ColorMapped - change it to DirectClass */
if (SetImageStorageClass(distort_image,DirectClass) == MagickFalse)
{
InheritException(exception,&distort_image->exception);
distort_image=DestroyImage(distort_image);
return((Image *) NULL);
}
if ((IsPixelGray(&distort_image->background_color) == MagickFalse) &&
(IsGrayColorspace(distort_image->colorspace) != MagickFalse))
(void) SetImageColorspace(distort_image,sRGBColorspace);
if (distort_image->background_color.opacity != OpaqueOpacity)
distort_image->matte=MagickTrue;
distort_image->page.x=geometry.x;
distort_image->page.y=geometry.y;
{ /* ----- MAIN CODE -----
Sample the source image to each pixel in the distort image.
*/
CacheView
*distort_view;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
ResampleFilter
**restrict resample_filter;
ssize_t
j;
status=MagickTrue;
progress=0;
GetMagickPixelPacket(distort_image,&zero);
resample_filter=AcquireResampleFilterThreadSet(image,
UndefinedVirtualPixelMethod,MagickFalse,exception);
distort_view=AcquireAuthenticCacheView(distort_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,distort_image,distort_image->rows,1)
#endif
for (j=0; j < (ssize_t) distort_image->rows; j++)
{
const int
id = GetOpenMPThreadId();
double
validity; /* how mathematically valid is this the mapping */
MagickBooleanType
sync;
MagickPixelPacket
pixel, /* pixel color to assign to distorted image */
invalid; /* the color to assign when distort result is invalid */
PointInfo
d,
s; /* transform destination image x,y to source image x,y */
register IndexPacket
*restrict indexes;
register ssize_t
i;
register PixelPacket
*restrict q;
q=QueueCacheViewAuthenticPixels(distort_view,0,j,distort_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(distort_view);
pixel=zero;
/* Define constant scaling vectors for Affine Distortions
Other methods are either variable, or use interpolated lookup
*/
switch (method)
{
case AffineDistortion:
ScaleFilter( resample_filter[id],
coeff[0], coeff[1],
coeff[3], coeff[4] );
break;
default:
break;
}
/* Initialize default pixel validity
* negative: pixel is invalid output 'matte_color'
* 0.0 to 1.0: antialiased, mix with resample output
* 1.0 or greater: use resampled output.
*/
validity = 1.0;
GetMagickPixelPacket(distort_image,&invalid);
SetMagickPixelPacket(distort_image,&distort_image->matte_color,
(IndexPacket *) NULL, &invalid);
if (distort_image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&invalid); /* what about other color spaces? */
for (i=0; i < (ssize_t) distort_image->columns; i++)
{
/* map pixel coordinate to distortion space coordinate */
d.x = (double) (geometry.x+i+0.5)*output_scaling;
d.y = (double) (geometry.y+j+0.5)*output_scaling;
s = d; /* default is a no-op mapping */
switch (method)
{
case AffineDistortion:
{
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
s.y=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
/* Affine partial derivitives are constant -- set above */
break;
}
case PerspectiveDistortion:
{
double
p,q,r,abs_r,abs_c6,abs_c7,scale;
/* perspective is a ratio of affines */
p=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
q=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
r=coeff[6]*d.x+coeff[7]*d.y+1.0;
/* Pixel Validity -- is it a 'sky' or 'ground' pixel */
validity = (r*coeff[8] < 0.0) ? 0.0 : 1.0;
/* Determine horizon anti-alias blending */
abs_r = fabs(r)*2;
abs_c6 = fabs(coeff[6]);
abs_c7 = fabs(coeff[7]);
if ( abs_c6 > abs_c7 ) {
if ( abs_r < abs_c6*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[6]*output_scaling);
}
else if ( abs_r < abs_c7*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[7]*output_scaling);
/* Perspective Sampling Point (if valid) */
if ( validity > 0.0 ) {
/* divide by r affine, for perspective scaling */
scale = 1.0/r;
s.x = p*scale;
s.y = q*scale;
/* Perspective Partial Derivatives or Scaling Vectors */
scale *= scale;
ScaleFilter( resample_filter[id],
(r*coeff[0] - p*coeff[6])*scale,
(r*coeff[1] - p*coeff[7])*scale,
(r*coeff[3] - q*coeff[6])*scale,
(r*coeff[4] - q*coeff[7])*scale );
}
break;
}
case BilinearReverseDistortion:
{
/* Reversed Mapped is just a simple polynomial */
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]*d.x*d.y+coeff[3];
s.y=coeff[4]*d.x+coeff[5]*d.y
+coeff[6]*d.x*d.y+coeff[7];
/* Bilinear partial derivitives of scaling vectors */
ScaleFilter( resample_filter[id],
coeff[0] + coeff[2]*d.y,
coeff[1] + coeff[2]*d.x,
coeff[4] + coeff[6]*d.y,
coeff[5] + coeff[6]*d.x );
break;
}
case BilinearForwardDistortion:
{
/* Forward mapped needs reversed polynomial equations
* which unfortunatally requires a square root! */
double b,c;
d.x -= coeff[3]; d.y -= coeff[7];
b = coeff[6]*d.x - coeff[2]*d.y + coeff[8];
c = coeff[4]*d.x - coeff[0]*d.y;
validity = 1.0;
/* Handle Special degenerate (non-quadratic) case
* Currently without horizon anti-alising */
if ( fabs(coeff[9]) < MagickEpsilon )
s.y = -c/b;
else {
c = b*b - 2*coeff[9]*c;
if ( c < 0.0 )
validity = 0.0;
else
s.y = ( -b + sqrt(c) )/coeff[9];
}
if ( validity > 0.0 )
s.x = ( d.x - coeff[1]*s.y) / ( coeff[0] + coeff[2]*s.y );
/* NOTE: the sign of the square root should be -ve for parts
where the source image becomes 'flipped' or 'mirrored'.
FUTURE: Horizon handling
FUTURE: Scaling factors or Deritives (how?)
*/
break;
}
#if 0
case BilinearDistortion:
/* Bilinear mapping of any Quadrilateral to any Quadrilateral */
/* UNDER DEVELOPMENT */
break;
#endif
case PolynomialDistortion:
{
/* multi-ordered polynomial */
register ssize_t
k;
ssize_t
nterms=(ssize_t)coeff[1];
PointInfo
du,dv; /* the du,dv vectors from unit dx,dy -- derivatives */
s.x=s.y=du.x=du.y=dv.x=dv.y=0.0;
for(k=0; k < nterms; k++) {
s.x += poly_basis_fn(k,d.x,d.y)*coeff[2+k];
du.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k];
du.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k];
s.y += poly_basis_fn(k,d.x,d.y)*coeff[2+k+nterms];
dv.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k+nterms];
dv.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k+nterms];
}
ScaleFilter( resample_filter[id], du.x,du.y,dv.x,dv.y );
break;
}
case ArcDistortion:
{
/* what is the angle and radius in the destination image */
s.x = (double) ((atan2(d.y,d.x) - coeff[0])/Magick2PI);
s.x -= MagickRound(s.x); /* angle */
s.y = hypot(d.x,d.y); /* radius */
/* Arc Distortion Partial Scaling Vectors
Are derived by mapping the perpendicular unit vectors
dR and dA*R*2PI rather than trying to map dx and dy
The results is a very simple orthogonal aligned ellipse.
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[1]/(Magick2PI*s.y)), 0, 0, coeff[3] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[3] );
/* now scale the angle and radius for source image lookup point */
s.x = s.x*coeff[1] + coeff[4] + image->page.x +0.5;
s.y = (coeff[2] - s.y) * coeff[3] + image->page.y;
break;
}
case PolarDistortion:
{ /* 2D Cartesain to Polar View */
d.x -= coeff[2];
d.y -= coeff[3];
s.x = atan2(d.x,d.y) - (coeff[4]+coeff[5])/2;
s.x /= Magick2PI;
s.x -= MagickRound(s.x);
s.x *= Magick2PI; /* angle - relative to centerline */
s.y = hypot(d.x,d.y); /* radius */
/* Polar Scaling vectors are based on mapping dR and dA vectors
This results in very simple orthogonal scaling vectors
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[6]/(Magick2PI*s.y)), 0, 0, coeff[7] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[7] );
/* now finish mapping radius/angle to source x,y coords */
s.x = s.x*coeff[6] + (double)image->columns/2.0 + image->page.x;
s.y = (s.y-coeff[1])*coeff[7] + image->page.y;
break;
}
case DePolarDistortion:
{ /* @D Polar to Carteasain */
/* ignore all destination virtual offsets */
d.x = ((double)i+0.5)*output_scaling*coeff[6]+coeff[4];
d.y = ((double)j+0.5)*output_scaling*coeff[7]+coeff[1];
s.x = d.y*sin(d.x) + coeff[2];
s.y = d.y*cos(d.x) + coeff[3];
/* derivatives are usless - better to use SuperSampling */
break;
}
case Cylinder2PlaneDistortion:
{ /* 3D Cylinder to Tangential Plane */
double ax, cx;
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
d.x /= coeff[1]; /* x' = x/r */
ax=atan(d.x); /* aa = atan(x/r) = u/r */
cx=cos(ax); /* cx = cos(atan(x/r)) = 1/sqrt(x^2+u^2) */
s.x = coeff[1]*ax; /* u = r*atan(x/r) */
s.y = d.y*cx; /* v = y*cos(u/r) */
/* derivatives... (see personnal notes) */
ScaleFilter( resample_filter[id],
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
#if 0
if ( i == 0 && j == 0 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "phi = %lf\n", (double)(ax * 180.0/MagickPI) );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
fflush(stderr); }
#endif
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case Plane2CylinderDistortion:
{ /* 3D Cylinder to Tangential Plane */
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
/* is pixel valid - horizon of a infinite Virtual-Pixel Plane
* (see Anthony Thyssen's personal note) */
validity = (double) ((coeff[1]*MagickPI2 - fabs(d.x))/output_scaling + 0.5);
if ( validity > 0.0 ) {
double cx,tx;
d.x /= coeff[1]; /* x'= x/r */
cx = 1/cos(d.x); /* cx = 1/cos(x/r) */
tx = tan(d.x); /* tx = tan(x/r) */
s.x = coeff[1]*tx; /* u = r * tan(x/r) */
s.y = d.y*cx; /* v = y / cos(x/r) */
/* derivatives... (see Anthony Thyssen's personal notes) */
ScaleFilter( resample_filter[id],
cx*cx, 0.0, s.y*cx/coeff[1], cx );
#if 1
/*if ( i == 0 && j == 0 ) {*/
if ( d.x == 0.5 && d.y == 0.5 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "radius = %lf phi = %lf validity = %lf\n",
coeff[1], (double)(d.x * 180.0/MagickPI), validity );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
cx*cx, 0.0, s.y*cx/coeff[1], cx);
fflush(stderr); }
#endif
}
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ /* Lens Barrel Distionion Correction */
double r,fx,fy,gx,gy;
/* Radial Polynomial Distortion (de-normalized) */
d.x -= coeff[8];
d.y -= coeff[9];
r = sqrt(d.x*d.x+d.y*d.y);
if ( r > MagickEpsilon ) {
fx = ((coeff[0]*r + coeff[1])*r + coeff[2])*r + coeff[3];
fy = ((coeff[4]*r + coeff[5])*r + coeff[6])*r + coeff[7];
gx = ((3*coeff[0]*r + 2*coeff[1])*r + coeff[2])/r;
gy = ((3*coeff[4]*r + 2*coeff[5])*r + coeff[6])/r;
/* adjust functions and scaling for 'inverse' form */
if ( method == BarrelInverseDistortion ) {
fx = 1/fx; fy = 1/fy;
gx *= -fx*fx; gy *= -fy*fy;
}
/* Set the source pixel to lookup and EWA derivative vectors */
s.x = d.x*fx + coeff[8];
s.y = d.y*fy + coeff[9];
ScaleFilter( resample_filter[id],
gx*d.x*d.x + fx, gx*d.x*d.y,
gy*d.x*d.y, gy*d.y*d.y + fy );
}
else {
/* Special handling to avoid divide by zero when r==0
**
** The source and destination pixels match in this case
** which was set at the top of the loop using s = d;
** otherwise... s.x=coeff[8]; s.y=coeff[9];
*/
if ( method == BarrelDistortion )
ScaleFilter( resample_filter[id],
coeff[3], 0, 0, coeff[7] );
else /* method == BarrelInverseDistortion */
/* FUTURE, trap for D==0 causing division by zero */
ScaleFilter( resample_filter[id],
1.0/coeff[3], 0, 0, 1.0/coeff[7] );
}
break;
}
case ShepardsDistortion:
{ /* Shepards Method, or Inverse Weighted Distance for
displacement around the destination image control points
The input arguments are the coefficents to the function.
This is more of a 'displacement' function rather than an
absolute distortion function.
Note: We can not determine derivatives using shepards method
so only a point sample interpolatation can be used.
*/
size_t
i;
double
denominator;
denominator = s.x = s.y = 0;
for(i=0; i<number_arguments; i+=4) {
double weight =
((double)d.x-arguments[i+2])*((double)d.x-arguments[i+2])
+ ((double)d.y-arguments[i+3])*((double)d.y-arguments[i+3]);
weight = pow(weight,coeff[0]); /* shepards power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
s.x += (arguments[ i ]-arguments[i+2])*weight;
s.y += (arguments[i+1]-arguments[i+3])*weight;
denominator += weight;
}
s.x /= denominator;
s.y /= denominator;
s.x += d.x; /* make it as relative displacement */
s.y += d.y;
break;
}
default:
break; /* use the default no-op given above */
}
/* map virtual canvas location back to real image coordinate */
if ( bestfit && method != ArcDistortion ) {
s.x -= image->page.x;
s.y -= image->page.y;
}
s.x -= 0.5;
s.y -= 0.5;
if ( validity <= 0.0 ) {
/* result of distortion is an invalid pixel - don't resample */
SetPixelPacket(distort_image,&invalid,q,indexes);
}
else {
/* resample the source image to find its correct color */
(void) ResamplePixelColor(resample_filter[id],s.x,s.y,&pixel);
/* if validity between 0.0 and 1.0 mix result with invalid pixel */
if ( validity < 1.0 ) {
/* Do a blend of sample color and invalid pixel */
/* should this be a 'Blend', or an 'Over' compose */
MagickPixelCompositeBlend(&pixel,validity,&invalid,(1.0-validity),
&pixel);
}
SetPixelPacket(distort_image,&pixel,q,indexes);
}
q++;
indexes++;
}
sync=SyncCacheViewAuthenticPixels(distort_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_DistortImage)
#endif
proceed=SetImageProgress(image,DistortImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
distort_view=DestroyCacheView(distort_view);
resample_filter=DestroyResampleFilterThreadSet(resample_filter);
if (status == MagickFalse)
distort_image=DestroyImage(distort_image);
}
/* Arc does not return an offset unless 'bestfit' is in effect
And the user has not provided an overriding 'viewport'.
*/
if ( method == ArcDistortion && !bestfit && !viewport_given ) {
distort_image->page.x = 0;
distort_image->page.y = 0;
}
coeff = (double *) RelinquishMagickMemory(coeff);
return(distort_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. RotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the RotateImage method is:
%
% Image *RotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*distort_image,
*rotate_image;
MagickRealType
angle;
PointInfo
shear;
size_t
rotations;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
angle=degrees;
while (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((fabs(shear.x) < MagickEpsilon) && (fabs(shear.y) < MagickEpsilon))
return(IntegralRotateImage(image,rotations,exception));
distort_image=CloneImage(image,0,0,MagickTrue,exception);
if (distort_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(distort_image,BackgroundVirtualPixelMethod);
rotate_image=DistortImage(distort_image,ScaleRotateTranslateDistortion,1,
°rees,MagickTrue,exception);
distort_image=DestroyImage(distort_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p a r s e C o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SparseColorImage(), given a set of coordinates, interpolates the colors
% found at those coordinates, across the whole image, using various methods.
%
% The format of the SparseColorImage() method is:
%
% Image *SparseColorImage(const Image *image,const ChannelType channel,
% const SparseColorMethod method,const size_t number_arguments,
% const double *arguments,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be filled in.
%
% o channel: Specify which color values (in RGBKA sequence) are being set.
% This also determines the number of color_values in above.
%
% o method: the method to fill in the gradient between the control points.
%
% The methods used for SparseColor() are often simular to methods
% used for DistortImage(), and even share the same code for determination
% of the function coefficents, though with more dimensions (or resulting
% values).
%
% o number_arguments: the number of arguments given.
%
% o arguments: array of floating point arguments for this method--
% x,y,color_values-- with color_values given as normalized values.
%
% o exception: return any errors or warnings in this structure
%
*/
MagickExport Image *SparseColorImage(const Image *image,
const ChannelType channel,const SparseColorMethod method,
const size_t number_arguments,const double *arguments,
ExceptionInfo *exception)
{
#define SparseColorTag "Distort/SparseColor"
SparseColorMethod
sparse_method;
double
*coeff;
Image
*sparse_image;
size_t
number_colors;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
/* Determine number of color values needed per control point */
number_colors=0;
if ( channel & RedChannel ) number_colors++;
if ( channel & GreenChannel ) number_colors++;
if ( channel & BlueChannel ) number_colors++;
if ( channel & IndexChannel ) number_colors++;
if ( channel & OpacityChannel ) number_colors++;
/*
Convert input arguments into mapping coefficients, this this case
we are mapping (distorting) colors, rather than coordinates.
*/
{ DistortImageMethod
distort_method;
distort_method=(DistortImageMethod) method;
if ( distort_method >= SentinelDistortion )
distort_method = ShepardsDistortion; /* Pretend to be Shepards */
coeff = GenerateCoefficients(image, &distort_method, number_arguments,
arguments, number_colors, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Note some Distort Methods may fall back to other simpler methods,
Currently the only fallback of concern is Bilinear to Affine
(Barycentric), which is alaso sparse_colr method. This also ensures
correct two and one color Barycentric handling.
*/
sparse_method = (SparseColorMethod) distort_method;
if ( distort_method == ShepardsDistortion )
sparse_method = method; /* return non-distort methods to normal */
if ( sparse_method == InverseColorInterpolate )
coeff[0]=0.5; /* sqrt() the squared distance for inverse */
}
/* Verbose output */
if ( GetImageArtifact(image,"verbose") != (const char *) NULL ) {
switch (sparse_method) {
case BarycentricColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Barycentric Sparse Color:\n");
if ( channel & RedChannel )
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & GreenChannel )
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & BlueChannel )
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & IndexChannel )
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & OpacityChannel )
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Bilinear Sparse Color\n");
if ( channel & RedChannel )
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & GreenChannel )
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & BlueChannel )
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & IndexChannel )
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & OpacityChannel )
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
break;
}
default:
/* sparse color method is too complex for FX emulation */
break;
}
}
/* Generate new image for generated interpolated gradient.
* ASIDE: Actually we could have just replaced the colors of the original
* image, but IM Core policy, is if storage class could change then clone
* the image.
*/
sparse_image=CloneImage(image,0,0,MagickTrue,exception);
if (sparse_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sparse_image,DirectClass) == MagickFalse)
{ /* if image is ColorMapped - change it to DirectClass */
InheritException(exception,&image->exception);
sparse_image=DestroyImage(sparse_image);
return((Image *) NULL);
}
{ /* ----- MAIN CODE ----- */
CacheView
*sparse_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
j;
status=MagickTrue;
progress=0;
sparse_view=AcquireAuthenticCacheView(sparse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,sparse_image,sparse_image->rows,1)
#endif
for (j=0; j < (ssize_t) sparse_image->rows; j++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel; /* pixel to assign to distorted image */
register IndexPacket
*restrict indexes;
register ssize_t
i;
register PixelPacket
*restrict q;
q=GetCacheViewAuthenticPixels(sparse_view,0,j,sparse_image->columns,
1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(sparse_view);
GetMagickPixelPacket(sparse_image,&pixel);
for (i=0; i < (ssize_t) image->columns; i++)
{
SetMagickPixelPacket(image,q,indexes,&pixel);
switch (sparse_method)
{
case BarycentricColorInterpolate:
{
register ssize_t x=0;
if ( channel & RedChannel )
pixel.red = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & GreenChannel )
pixel.green = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & BlueChannel )
pixel.blue = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & IndexChannel )
pixel.index = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & OpacityChannel )
pixel.opacity = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
if ( channel & RedChannel )
pixel.red = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & GreenChannel )
pixel.green = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & BlueChannel )
pixel.blue = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & IndexChannel )
pixel.index = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & OpacityChannel )
pixel.opacity = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
break;
}
case InverseColorInterpolate:
case ShepardsColorInterpolate:
{ /* Inverse (Squared) Distance weights average (IDW) */
size_t
k;
double
denominator;
if ( channel & RedChannel ) pixel.red = 0.0;
if ( channel & GreenChannel ) pixel.green = 0.0;
if ( channel & BlueChannel ) pixel.blue = 0.0;
if ( channel & IndexChannel ) pixel.index = 0.0;
if ( channel & OpacityChannel ) pixel.opacity = 0.0;
denominator = 0.0;
for(k=0; k<number_arguments; k+=2+number_colors) {
register ssize_t x=(ssize_t) k+2;
double weight =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
weight = pow(weight,coeff[0]); /* inverse of power factor */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
if ( channel & RedChannel )
pixel.red += arguments[x++]*weight;
if ( channel & GreenChannel )
pixel.green += arguments[x++]*weight;
if ( channel & BlueChannel )
pixel.blue += arguments[x++]*weight;
if ( channel & IndexChannel )
pixel.index += arguments[x++]*weight;
if ( channel & OpacityChannel )
pixel.opacity += arguments[x++]*weight;
denominator += weight;
}
if ( channel & RedChannel ) pixel.red /= denominator;
if ( channel & GreenChannel ) pixel.green /= denominator;
if ( channel & BlueChannel ) pixel.blue /= denominator;
if ( channel & IndexChannel ) pixel.index /= denominator;
if ( channel & OpacityChannel ) pixel.opacity /= denominator;
break;
}
case VoronoiColorInterpolate:
default:
{ /* Just use the closest control point you can find! */
size_t
k;
double
minimum = MagickHuge;
for(k=0; k<number_arguments; k+=2+number_colors) {
double distance =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
if ( distance < minimum ) {
register ssize_t x=(ssize_t) k+2;
if ( channel & RedChannel ) pixel.red = arguments[x++];
if ( channel & GreenChannel ) pixel.green = arguments[x++];
if ( channel & BlueChannel ) pixel.blue = arguments[x++];
if ( channel & IndexChannel ) pixel.index = arguments[x++];
if ( channel & OpacityChannel ) pixel.opacity = arguments[x++];
minimum = distance;
}
}
break;
}
}
/* set the color directly back into the source image */
if ( channel & RedChannel ) pixel.red *= QuantumRange;
if ( channel & GreenChannel ) pixel.green *= QuantumRange;
if ( channel & BlueChannel ) pixel.blue *= QuantumRange;
if ( channel & IndexChannel ) pixel.index *= QuantumRange;
if ( channel & OpacityChannel ) pixel.opacity *= QuantumRange;
SetPixelPacket(sparse_image,&pixel,q,indexes);
q++;
indexes++;
}
sync=SyncCacheViewAuthenticPixels(sparse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SparseColorImage)
#endif
proceed=SetImageProgress(image,SparseColorTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sparse_view=DestroyCacheView(sparse_view);
if (status == MagickFalse)
sparse_image=DestroyImage(sparse_image);
}
coeff = (double *) RelinquishMagickMemory(coeff);
return(sparse_image);
}
|
single.c | #include <stdio.h>
#include <omp.h>
main(){
int n=9, i,a,b[n];
for(i=0;i<n;i++) b[i]=-1;
#pragma omp parallel
{
#pragma omp single
{
printf("Introduce valor de inicializacion a:");
scanf("%d",&a);
printf("Single ejecutada por el thread%d\n",omp_get_thread_num());
}
#pragma omp for
for(i=0;i<n;i++)
b[i]=a;
}
printf("Después de la región parallel:\n");
for(i=0;i<n;i++) printf("b[%d]=%d\t",i,b[i]);
printf("\n");
} |
Par-12-SeqForParForNestedParFor.c |
int main(int argc, char **argv) {
int a[4] = {1,2,3,4};
int b[4] = {1,1,1,1};
for (int i = 0; i < 1; ++i) {
if (i < 2) {
return -1;
}
}
#pragma omp parallel for
for (int i = 0; i < 4; ++i) {
a[i] = 3*a[i];
#pragma omp parallel for
for(int j = 0; j < 4; ++j) {
b[j] = b[j] + a[i];
}
}
return 0;
}
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 8;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
cygprofilesubs.c | #include "config.h"
#include <stdio.h>
extern void A (void);
extern void B (void);
extern void twice (void);
void callsubs (int niter)
{
A();
}
void A (void)
{
int n;
#pragma omp parallel for private(n)
for (n = 0; n < 2; ++n) {
B ();
}
}
void B (void)
{
twice ();
twice ();
}
void twice (void)
{
}
|
edge_miner.h | #ifndef EDGE_MINER_H
#define EDGE_MINER_H
#include <mutex>
#include "miner.h"
#include "domain_support.h"
typedef std::pair<unsigned, unsigned> InitPattern;
typedef QuickPattern<EdgeEmbedding, ElementType> QPattern;
typedef CanonicalGraph<EdgeEmbedding, ElementType> CPattern;
typedef std::unordered_map<QPattern, Frequency> QpMapFreq; // quick pattern map (mapping quick pattern to its frequency)
typedef std::unordered_map<CPattern, Frequency> CgMapFreq; // canonical pattern map (mapping canonical pattern to its frequency)
typedef std::map<InitPattern, DomainSupport*> InitMap;
typedef std::unordered_map<QPattern, DomainSupport*> QpMapDomain; // quick pattern map (mapping quick pattern to its domain support)
typedef std::unordered_map<CPattern, DomainSupport*> CgMapDomain; // canonical pattern map (mapping canonical pattern to its domain support)
typedef std::unordered_map<unsigned, unsigned> FreqMap;
typedef std::unordered_map<unsigned, bool> DomainMap;
typedef PerThreadStorage<InitMap> LocalInitMap;
typedef PerThreadStorage<QpMapFreq> LocalQpMapFreq; // PerThreadStorage: thread-local quick pattern map
typedef PerThreadStorage<CgMapFreq> LocalCgMapFreq; // PerThreadStorage: thread-local canonical pattern map
typedef PerThreadStorage<QpMapDomain> LocalQpMapDomain;
typedef PerThreadStorage<CgMapDomain> LocalCgMapDomain;
class EdgeMiner : public Miner {
public:
EdgeMiner(Graph *g, unsigned size = 3, int nthreads = 1) {
graph = g;
max_size = size;
numThreads = nthreads;
construct_edgemap();
init_localmaps.set_size(nthreads);
qp_localmaps.set_size(nthreads);
cg_localmaps.set_size(nthreads);
}
virtual ~EdgeMiner() {}
void extend_edge(unsigned level, EmbeddingList& emb_list) {
UintList num_new_emb(emb_list.size());
#pragma omp parallel for
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
EdgeEmbedding emb(level+1);
get_embedding(level, pos, emb_list, emb);
num_new_emb[pos] = 0;
unsigned n = emb.size();
std::set<VertexId> vert_set;
if (n > 3)
for (unsigned i = 0; i < n; i ++) vert_set.insert(emb.get_vertex(i));
for (unsigned i = 0; i < n; ++i) {
VertexId src = emb.get_vertex(i);
if (emb.get_key(i) == 0) { // TODO: need to fix this
IndexT row_begin = graph->edge_begin(src);
IndexT row_end = graph->edge_end(src);
for (IndexT e = row_begin; e < row_end; e++) {
IndexT dst = graph->getEdgeDst(e);
BYTE existed = 0;
//if (is_frequent_edge[e])
if (!is_edge_automorphism(n, emb, i, src, dst, existed, vert_set))
num_new_emb[pos] ++;
}
}
}
emb.clean();
}
Ulong new_size = std::accumulate(num_new_emb.begin(), num_new_emb.end(), (Ulong)0);
std::cout << "new_size = " << new_size << "\n";
assert(new_size < 4294967296); // TODO: currently do not support vector size larger than 2^32
UintList indices = parallel_prefix_sum(num_new_emb);
new_size = indices[indices.size()-1];
emb_list.add_level(new_size);
#pragma omp parallel for
for (size_t pos = 0; pos < emb_list.size(level); pos ++) {
EdgeEmbedding emb(level+1);
get_embedding(level, pos, emb_list, emb);
unsigned start = indices[pos];
unsigned n = emb.size();
std::set<VertexId> vert_set;
if (n > 3)
for (unsigned i = 0; i < n; i ++) vert_set.insert(emb.get_vertex(i));
for (unsigned i = 0; i < n; ++i) {
IndexT src = emb.get_vertex(i);
if (emb.get_key(i) == 0) {
IndexT row_begin = graph->edge_begin(src);
IndexT row_end = graph->edge_end(src);
for (IndexT e = row_begin; e < row_end; e++) {
IndexT dst = graph->getEdgeDst(e);
BYTE existed = 0;
//if (is_frequent_edge[e])
if (!is_edge_automorphism(n, emb, i, src, dst, existed, vert_set)) {
emb_list.set_idx(level+1, start, pos);
emb_list.set_his(level+1, start, i);
emb_list.set_vid(level+1, start++, dst);
}
}
}
}
}
}
inline unsigned init_aggregator() {
init_map.clear();
for (IndexT src = 0; src < graph->num_vertices(); src ++) {
InitMap *lmap = init_localmaps.getLocal();
auto src_label = graph->getData(src);
IndexT row_begin = graph->edge_begin(src);
IndexT row_end = graph->edge_end(src);
for (IndexT e = row_begin; e < row_end; e++) {
IndexT dst = graph->getEdgeDst(e);
auto dst_label = graph->getData(dst);
if (src_label <= dst_label) {
InitPattern key = get_init_pattern(src_label, dst_label);
if (lmap->find(key) == lmap->end()) {
(*lmap)[key] = new DomainSupport(2);
(*lmap)[key]->set_threshold(threshold);
}
(*lmap)[key]->add_vertex(0, src);
(*lmap)[key]->add_vertex(1, dst);
}
}
}
merge_init_map();
std::cout << "Number of single-edge patterns: " << init_map.size() << "\n";
unsigned count = 0;
for (auto it = init_map.begin(); it != init_map.end(); ++it)
if (it->second->get_support()) count ++;
return count; // return number of frequent single-edge patterns
}
inline void quick_aggregate(unsigned level, EmbeddingList& emb_list) {
for (auto i = 0; i < numThreads; i++) qp_localmaps.getLocal(i)->clear();
#pragma omp parallel for
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
QpMapDomain *lmap = qp_localmaps.getLocal();
EdgeEmbedding emb(level+1);
get_embedding(level, pos, emb_list, emb);
unsigned n = emb.size();
QPattern qp(emb, true);
bool qp_existed = false;
auto it = lmap->find(qp);
if (it == lmap->end()) {
(*lmap)[qp] = new DomainSupport(n);
(*lmap)[qp]->set_threshold(threshold);
emb_list.set_pid(pos, qp.get_id());
} else {
qp_existed = true;
emb_list.set_pid(pos, (it->first).get_id());
}
for (unsigned i = 0; i < n; i ++) {
if ((*lmap)[qp]->has_domain_reached_support(i) == false)
(*lmap)[qp]->add_vertex(i, emb.get_vertex(i));
}
if (qp_existed) qp.clean();
}
}
void insert_id_map(int qp_id, int cg_id) {
std::unique_lock<std::mutex> lock(map_mutex);
id_map.insert(std::make_pair(qp_id, cg_id));
}
// aggregate quick patterns into canonical patterns.
// construct id_map from quick pattern ID (qp_id) to canonical pattern ID (cg_id)
void canonical_aggregate() {
id_map.clear();
for (auto i = 0; i < numThreads; i++) cg_localmaps.getLocal(i)->clear();
for (std::pair<QPattern, DomainSupport*> element : qp_map) {
CgMapDomain *lmap = cg_localmaps.getLocal();
unsigned num_domains = element.first.get_size();
CPattern cg(element.first);
int qp_id = element.first.get_id();
int cg_id = cg.get_id();
insert_id_map(qp_id, cg_id);
auto it = lmap->find(cg);
if (it == lmap->end()) {
(*lmap)[cg] = new DomainSupport(num_domains);
(*lmap)[cg]->set_threshold(threshold);
element.first.set_cgid(cg.get_id());
} else {
element.first.set_cgid((it->first).get_id());
}
VertexPositionEquivalences equivalences;
element.first.get_equivalences(equivalences);
for (unsigned i = 0; i < num_domains; i ++) {
if ((*lmap)[cg]->has_domain_reached_support(i) == false) {
unsigned qp_idx = cg.get_quick_pattern_index(i);
assert(qp_idx >= 0 && qp_idx < num_domains);
UintSet equ_set = equivalences.get_equivalent_set(qp_idx);
for (unsigned idx : equ_set) {
DomainSupport *support = element.second;
if (support->has_domain_reached_support(idx) == false) {
bool reached_threshold = (*lmap)[cg]->add_vertices(i, support->domain_sets[idx]);
if (reached_threshold) break;
} else {
(*lmap)[cg]->set_domain_frequent(i);
break;
}
}
}
}
cg.clean();
}
}
inline void merge_init_map() {
init_map = *(init_localmaps.getLocal(0));
for (auto i = 1; i < numThreads; i++) {
for (auto element : *init_localmaps.getLocal(i)) {
DomainSupport *support = element.second;
if (init_map.find(element.first) == init_map.end()) {
init_map[element.first] = support;
} else {
for (unsigned i = 0; i < 2; i ++) {
if (!init_map[element.first]->has_domain_reached_support(i)) {
if (support->has_domain_reached_support(i))
init_map[element.first]->set_domain_frequent(i);
else init_map[element.first]->add_vertices(i, support->domain_sets[i]);
}
}
}
}
}
}
inline void merge_qp_map(unsigned num_domains) {
qp_map.clear();
qp_map = *(qp_localmaps.getLocal(0));
for (auto i = 1; i < numThreads; i++) {
const QpMapDomain *lmap = qp_localmaps.getLocal(i);
for (auto element : *lmap) {
if (qp_map.find(element.first) == qp_map.end())
qp_map[element.first] = element.second;
}
for (std::pair<QPattern, DomainSupport*> element : *lmap) {
DomainSupport *support = element.second;
for (unsigned i = 0; i < num_domains; i ++) {
if (!qp_map[element.first]->has_domain_reached_support(i) && qp_map[element.first] != support) {
if (support->has_domain_reached_support(i))
qp_map[element.first]->set_domain_frequent(i);
else qp_map[element.first]->add_vertices(i, support->domain_sets[i]);
}
}
}
}
}
inline void merge_cg_map(unsigned num_domains) {
cg_map.clear();
cg_map = *(cg_localmaps.getLocal(0));
for (auto i = 1; i < numThreads; i++) {
const CgMapDomain *lmap = cg_localmaps.getLocal(i);
for (auto element : *lmap) {
if (cg_map.find(element.first) == cg_map.end())
cg_map[element.first] = element.second;
}
for (std::pair<CPattern, DomainSupport*> element : *lmap) {
DomainSupport *support = element.second;
for (unsigned i = 0; i < num_domains; i ++) {
if (!cg_map[element.first]->has_domain_reached_support(i) && cg_map[element.first] != support) {
if (support->has_domain_reached_support(i))
cg_map[element.first]->set_domain_frequent(i);
else cg_map[element.first]->add_vertices(i, support->domain_sets[i]);
}
}
}
}
}
// Filtering for FSM
#ifdef ENABLE_LABEL
inline void init_filter(EmbeddingList& emb_list) {
UintList is_frequent_emb(emb_list.size(), 0);
#pragma omp parallel for
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
VertexId src = emb_list.get_idx(1, pos);
VertexId dst = emb_list.get_vid(1, pos);
auto src_label = graph->getData(src);
auto dst_label = graph->getData(dst);
InitPattern key = get_init_pattern(src_label, dst_label);
if (init_map[key]->get_support()) is_frequent_emb[pos] = 1;
}
//assert(emb_list.size()*2 == graph->num_edges()); // symmetric graph
is_frequent_edge.resize(graph->num_edges());
std::fill(is_frequent_edge.begin(), is_frequent_edge.end(), 0);
#pragma omp parallel for
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
if (is_frequent_emb[pos]) {
VertexId src = emb_list.get_idx(1, pos);
VertexId dst = emb_list.get_vid(1, pos);
unsigned eid0 = edge_map[OrderedEdge(src,dst)];
unsigned eid1 = edge_map[OrderedEdge(dst,src)];
__sync_bool_compare_and_swap(&is_frequent_edge[eid0], 0, 1);
__sync_bool_compare_and_swap(&is_frequent_edge[eid1], 0, 1);
}
}
std::cout << "Number of frequent edges: " << count(is_frequent_edge.begin(), is_frequent_edge.end(), 1) << "\n";
UintList indices = parallel_prefix_sum(is_frequent_emb);
VertexList vid_list0 = emb_list.get_idx_list(1);
VertexList vid_list1 = emb_list.get_vid_list(1);
#pragma omp parallel for
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
if (is_frequent_emb[pos]) {
VertexId src = vid_list0[pos];
VertexId dst = vid_list1[pos];
unsigned start = indices[pos];
emb_list.set_vid(1, start, dst);
emb_list.set_idx(1, start, src);
}
}
emb_list.remove_tail(indices.back());
}
#endif
inline void filter(unsigned level, EmbeddingList &emb_list) {
UintList is_frequent_emb(emb_list.size(), 0);
#pragma omp parallel for
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
unsigned qp_id = emb_list.get_pid(pos);
unsigned cg_id = id_map.at(qp_id);
if (domain_support_map.at(cg_id))
is_frequent_emb[pos] = 1;
}
UintList indices = parallel_prefix_sum(is_frequent_emb);
VertexList vid_list = emb_list.get_vid_list(level);
UintList idx_list = emb_list.get_idx_list(level);
ByteList his_list = emb_list.get_his_list(level);
for (size_t pos = 0; pos < emb_list.size(); pos ++) {
if (is_frequent_emb[pos]) {
unsigned start = indices[pos];
VertexId vid = vid_list[pos];
IndexTy idx = idx_list[pos];
BYTE his = his_list[pos];
emb_list.set_idx(level, start, idx);
emb_list.set_vid(level, start, vid);
emb_list.set_his(level, start, his);
}
}
emb_list.remove_tail(indices.back());
}
inline void set_threshold(const unsigned minsup) { threshold = minsup; }
inline void printout_agg(const CgMapFreq &cg_map) {
for (auto it = cg_map.begin(); it != cg_map.end(); ++it)
std::cout << "{" << it->first << " --> " << it->second << std::endl;
}
inline void printout_agg() {
std::cout << "num_patterns: " << cg_map.size() << " num_quick_patterns: " << qp_map.size() << "\n";
BoolVec support(cg_map.size());
int i = 0;
for (auto it = cg_map.begin(); it != cg_map.end(); ++it) {
support[i] = it->second->get_support();
i ++;
}
i = 0;
for (auto it = cg_map.begin(); it != cg_map.end(); ++it) {
std::cout << "{" << it->first << " --> " << support[i] << std::endl;
i ++;
}
}
inline unsigned support_count() {
domain_support_map.clear();
unsigned count = 0;
for (auto it = cg_map.begin(); it != cg_map.end(); ++it) {
bool support = it->second->get_support();
domain_support_map.insert(std::make_pair(it->first.get_id(), support));
if (support) count ++;
}
return count;
}
// construct edge-map for later use. May not be necessary if Galois has this support
void construct_edgemap() {
for (auto src = 0; src < graph->num_vertices(); src ++) {
IndexT row_begin = graph->edge_begin(src);
IndexT row_end = graph->edge_end(src);
for (IndexT e = row_begin; e < row_end; e++) {
auto dst = graph->getEdgeDst(e);
OrderedEdge edge(src, dst);
edge_map.insert(std::pair<OrderedEdge, unsigned>(edge, e));
}
}
}
private:
unsigned threshold;
InitMap init_map;
UintMap id_map;
unsigned max_size;
int numThreads;
FreqMap freq_support_map;
DomainMap domain_support_map;
std::map<OrderedEdge, unsigned> edge_map;
std::set<std::pair<VertexId,VertexId> > freq_edge_set;
std::vector<unsigned> is_frequent_edge;
LocalInitMap init_localmaps; // initialization map, only used for once, no need to clear
LocalQpMapDomain qp_localmaps; // quick pattern local map for each thread
LocalCgMapDomain cg_localmaps; // canonical pattern local map for each thread
QpMapDomain qp_map; // quick pattern map
CgMapDomain cg_map; // canonical graph map
std::mutex map_mutex;
inline InitPattern get_init_pattern(BYTE src_label, BYTE dst_label) {
if (src_label <= dst_label) return std::make_pair(src_label, dst_label);
else return std::make_pair(dst_label, src_label);
}
inline void get_embedding(unsigned level, unsigned pos, const EmbeddingList& emb_list, EdgeEmbedding &emb) {
VertexId vid = emb_list.get_vid(level, pos);
IndexTy idx = emb_list.get_idx(level, pos);
BYTE his = emb_list.get_his(level, pos);
BYTE lab = graph->getData(vid);
ElementType ele(vid, 0, lab, his);
emb.set_element(level, ele);
for (unsigned l = 1; l < level; l ++) {
vid = emb_list.get_vid(level-l, idx);
his = emb_list.get_his(level-l, idx);
lab = graph->getData(vid);
ElementType ele(vid, 0, lab, his);
emb.set_element(level-l, ele);
idx = emb_list.get_idx(level-l, idx);
}
lab = graph->getData(idx);
ElementType ele0(idx, 0, lab, 0);
emb.set_element(0, ele0);
}
bool is_quick_automorphism(unsigned size, const EdgeEmbedding& emb, BYTE history, VertexId src, VertexId dst, BYTE& existed) {
if (dst <= emb.get_vertex(0)) return true;
if (dst == emb.get_vertex(1)) return true;
if (history == 0 && dst < emb.get_vertex(1)) return true;
if (size == 2) {
} else if (size == 3) {
if (history == 0 && emb.get_history(2) == 0 && dst <= emb.get_vertex(2)) return true;
if (history == 0 && emb.get_history(2) == 1 && dst == emb.get_vertex(2)) return true;
if (history == 1 && emb.get_history(2) == 1 && dst <= emb.get_vertex(2)) return true;
if (dst == emb.get_vertex(2)) existed = 1;
//if (!existed && max_size < 4) return true;
} else {
std::cout << "Error: should go to detailed check\n";
}
return false;
}
bool is_edge_automorphism(unsigned size, const EdgeEmbedding& emb, BYTE history, VertexId src, VertexId dst, BYTE& existed, const std::set<VertexId>& vertex_set) {
if (size < 3) return is_quick_automorphism(size, emb, history, src, dst, existed);
// check with the first element
if (dst <= emb.get_vertex(0)) return true;
if (history == 0 && dst <= emb.get_vertex(1)) return true;
// check loop edge
if (dst == emb.get_vertex(emb.get_history(history))) return true;
if (vertex_set.find(dst) != vertex_set.end()) existed = 1;
// check to see if there already exists the vertex added;
// if so, just allow to add edge which is (smaller id -> bigger id)
if (existed && src > dst) return true;
std::pair<VertexId, VertexId> added_edge(src, dst);
for (unsigned index = history + 1; index < emb.size(); ++index) {
std::pair<VertexId, VertexId> edge;
edge.first = emb.get_vertex(emb.get_history(index));
edge.second = emb.get_vertex(index);
//assert(edge.first != edge.second);
int cmp = compare(added_edge, edge);
if(cmp <= 0) return true;
}
return false;
}
inline void swap(std::pair<VertexId, VertexId>& pair) {
if (pair.first > pair.second) {
VertexId tmp = pair.first;
pair.first = pair.second;
pair.second = tmp;
}
}
inline int compare(std::pair<VertexId, VertexId>& oneEdge, std::pair<VertexId, VertexId>& otherEdge) {
swap(oneEdge);
swap(otherEdge);
if(oneEdge.first == otherEdge.first) return oneEdge.second - otherEdge.second;
else return oneEdge.first - otherEdge.first;
}
};
#endif // EDGE_MINER_HPP_
|
DRB096-doall2-taskloop-collapse-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two-dimensional array computation:
Two loops are associated with omp taskloop due to collapse(2).
Both loop index variables are private.
taskloop requires OpenMP 4.5 compilers.
*/
#include <stdio.h>
#include <omp.h>
int a[100][100];
int main()
{
int i;
int j;
{
#pragma omp parallel for private (i,j)
for (i = 0; i <= 99; i += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= 99; j += 1) {
a[i][j] += 1;
}
}
}
printf("a[50][50]=%d\n",a[50][50]);
return 0;
}
|
q_rhashmap.tmpl.c | // TODO: Any advantages to size being a prime number?
/*
* Copyright (c) 2019 Ramesh Subramonian <subramonian@gmail.com>
* All rights reserved.
*
* Use is subject to license terms, as specified in the LICENSE file.
This code was initially forked from the following. Subsequent to that,
significant modifications have been made and new functionality added,
bugs fixes, ....
* Copyright (c) 2017 Mindaugas Rasiukevicius <rmind at noxt eu>
* All rights reserved.
*
* Use is subject to license terms, as specified in the LICENSE file.
*/
/*
* A general purpose hash table, using the Robin Hood hashing algorithm.
*
* Conceptually, it is a hash table using linear probing on lookup with
* a particular displacement strategy on inserts. The central idea of
* the Robin Hood hashing algorithm is to reduce the variance of the
* probe sequence length (PSL).
*
* Reference:
*
* Pedro Celis, 1986, Robin Hood Hashing, University of Waterloo
* https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
*/
#include "_q_rhashmap___KV__.h"
#ifdef DEBUG
static int __attribute__((__unused__))
validate_psl_p(
q_rhashmap___KV___t *hmap,
const q_rh_bucket___KV___t *bucket,
uint32_t i
)
{
uint32_t base_i = fast_rem32(bucket->hash, hmap->size, hmap->divinfo);
uint32_t diff = (base_i > i) ? hmap->size - base_i + i : i - base_i;
return bucket->key == 0 || diff == bucket->psl;
}
#endif
/* Checks whether resize is needed. If so, calculates newsize */
/* Resize needed when occupancy is too high or too low */
static int
calc_new_size(
uint32_t nitems,
uint32_t minsize,
uint32_t size,
bool decreasing,
/* true => just added an element and are concerned about sparsity
* false=> just added an element and are concerned about denseness
*/
uint32_t *ptr_newsize,
bool *ptr_resize
)
{
int status = 0;
*ptr_resize = false;
*ptr_newsize = 0;
uint32_t threshold;
if ( decreasing ) {
/*
* If the load factor is less than threshold, then shrink by
* halving the size, but not more than the minimum size.
*/
threshold = (uint32_t)(LOW_WATER_MARK * size);
if ( ( nitems > minsize ) && ( nitems < threshold ) ) {
*ptr_resize = true;
*ptr_newsize = MAX(size >> 1, minsize);
}
}
else {
/*
* If the load factor is more than the threshold, then resize.
*/
threshold = (uint32_t)(0.85 * (float)size);
// TODO P4 Clean up the following code
if ( nitems > threshold ) {
*ptr_resize = true;
for ( ; nitems > threshold; ) {
/*
* Grow the hash table by doubling its size, but with
* a limit of MAX_GROWTH_STEP.
*/
// TODO: P4 Worry about overflow in addition below
const size_t grow_limit = size + MAX_GROWTH_STEP;
*ptr_newsize = MIN(size << 1, grow_limit);
threshold = (uint32_t)(0.85 * *ptr_newsize);
}
}
}
return status;
}
/*
* q_rhashmap_get: lookup an value given the key.
*
* => If key is present, *ptr_val is set to its associated value
* and is_found is set to true
* => If key is absent, *ptr_val is set to 0
* and is_found is set to false
*/
int
q_rhashmap_get___KV__(
q_rhashmap___KV___t *hmap,
__KEYTYPE__ key,
__VALTYPE__ *ptr_val,
bool *ptr_is_found
)
{
int status = 0;
const uint32_t hash = murmurhash3(&key, sizeof(__KEYTYPE__), hmap->hashkey);
uint32_t n = 0;
uint32_t i = fast_rem32(hash, hmap->size, hmap->divinfo);
q_rh_bucket___KV___t *bucket = NULL;
*ptr_is_found = false;
*ptr_val = 0;
/*
* Lookup is a linear probe.
*/
register uint64_t divinfo = hmap->divinfo;
register uint64_t size = hmap->size;
for ( ; ; ) {
bucket = &hmap->buckets[i];
#ifdef DEBUG
ASSERT(validate_psl_p(hmap, bucket, i));
if ( ( bucket->hash == hash ) && ( bucket->key == key ) )
#else
if ( bucket->key == key )
#endif
{
*ptr_val = bucket->val;
*ptr_is_found = true;
break;
}
/*
* Stop probing if we hit an empty bucket; also, if we hit a
* bucket with PSL lower than the distance from the base location,
* then it means that we found the "rich" bucket which should
* have been captured, if the key was inserted -- see the central
* point of the algorithm in the insertion function.
*/
if ( ( !bucket->key ) || ( n > bucket->psl ) ) {
*ptr_is_found = false;
break;
}
n++;
/* Continue to the next bucket. */
i = fast_rem32(i + 1, size, divinfo);
}
return status;
}
//------------------------------------------------------
int
q_rhashmap_getn___KV__(
q_rhashmap___KV___t *hmap, // INPUT
__KEYTYPE__ *keys, // INPUT: [nkeys]
uint32_t *hashes, // INPUT [nkeys]
uint32_t *locs, // INPUT [nkeys]
__VALTYPE__ *vals, // OUTPUT [nkeys]
uint32_t nkeys // INPUT
// TODO P4 we won't do is_found for the first implementation
)
{
int status = 0;
int chunk_size = 1024;
#pragma omp parallel for schedule(static, chunk_size)
for ( uint32_t j = 0; j < nkeys; j++ ) {
uint32_t n = 0;
uint32_t i = locs[j];
#ifdef DEBUG
uint32_t hash = hashes[j];
#endif
q_rh_bucket___KV___t *bucket = NULL;
vals[j] = 0;
for ( ; ; ) {
bucket = &hmap->buckets[i];
#ifdef DEBUG
ASSERT(validate_psl_p(hmap, bucket, i));
if ( ( bucket->hash == hash ) && ( bucket->key == keys[j] ) )
#else
if ( bucket->key == keys[j] )
#endif
{
vals[j] = bucket->val;
break; // found
}
if (!bucket->key || n > bucket->psl) {
break; // not found
}
n++;
i = fast_rem32(i + 1, hmap->size, hmap->divinfo);
}
}
return status;
}
/*
* rhashmap_insert: internal rhashmap_put(), without the resize.
*/
static int
q_rhashmap_insert(
q_rhashmap___KV___t *hmap,
__KEYTYPE__ key,
__VALTYPE__ val,
int update_type,
__VALTYPE__ *ptr_oldval,
int *ptr_num_probes
)
{
int status = 0;
const uint32_t hash = murmurhash3(&key, sizeof(__KEYTYPE__), hmap->hashkey);
q_rh_bucket___KV___t *bucket, entry;
uint32_t i;
int num_probes = 0;
register uint32_t size = hmap->size;
register uint64_t divinfo = hmap->divinfo;
bool key_updated = false;
// 0 is not a valid value for a key, TODO P3 Document this better
// Note that we do NOT throw an error
if ( key == 0 ) { return status; }
// Setup the bucket entry.
entry.key = key;
#ifdef DEBUG
entry.hash = hash;
#endif
entry.val = val;
entry.psl = 0;
*ptr_oldval = 0;
/*
* From the paper: "when inserting, if a record probes a location
* that is already occupied, the record that has traveled longer
* in its probe sequence keeps the location, and the other one
* continues on its probe sequence" (page 12).
*
* Basically: if the probe sequence length (PSL) of the element
* being inserted is greater than PSL of the element in the bucket,
* then swap them and continue.
*/
i = fast_rem32(hash, hmap->size, hmap->divinfo);
for ( ; ; ) {
bucket = &hmap->buckets[i];
// If there is a key in the bucket.
if ( bucket->key ) {
#ifdef DEBUG
ASSERT(validate_psl_p(hmap, bucket, i));
if ( (bucket->hash == hash) && (bucket->key == key) )
#else
if ( bucket->key == key )
#endif
{
key_updated = true;
// do the prescribed update
*ptr_oldval = bucket->val;
if ( update_type == Q_RHM_SET ) {
bucket->val = val;
}
else if ( update_type == Q_RHM_ADD ) {
bucket->val += val;
}
else {
go_BYE(-1);
}
break;
}
// We found a "rich" bucket. Capture its location.
if (entry.psl > bucket->psl) {
q_rh_bucket___KV___t tmp;
/*
* Place our key-value pair by swapping the "rich"
* bucket with our entry. Copy the structures.
*/
tmp = entry;
entry = *bucket;
*bucket = tmp;
}
entry.psl++;
/* Continue to the next bucket. */
#ifdef DEBUG
ASSERT(validate_psl_p(hmap, bucket, i));
#endif
num_probes++;
i = fast_rem32(i + 1, size, divinfo);
}
else {
break;
}
}
if ( !key_updated ) {
// Found a free bucket: insert the entry.
*bucket = entry; // copy
hmap->nitems++;
}
#ifdef DEBUG
ASSERT(validate_psl_p(hmap, bucket, i));
#endif
*ptr_num_probes = num_probes;
BYE:
return status;
}
static int
q_rhashmap_resize(
q_rhashmap___KV___t *hmap,
size_t newsize
)
{
int status = 0;
q_rh_bucket___KV___t *oldbuckets = hmap->buckets;
const size_t oldsize = hmap->size;
q_rh_bucket___KV___t *newbuckets = NULL;
const size_t len = newsize * sizeof(q_rh_bucket___KV___t);
int num_probes = 0;
// some obvious logical checks
if ( ( oldbuckets == NULL ) && ( oldsize != 0 ) ) { go_BYE(-1); }
if ( ( oldbuckets != NULL ) && ( oldsize == 0 ) ) { go_BYE(-1); }
if ( ( newsize <= 0 ) || ( newsize >= UINT_MAX ) ) { go_BYE(-1); }
if ( newsize < hmap->nitems ) { go_BYE(-1); }
// allocate buckets.
newbuckets = malloc(len);
return_if_malloc_failed(newbuckets);
memset(newbuckets, '\0', len);
hmap->buckets = newbuckets;
hmap->size = newsize;
hmap->nitems = 0;
// generate a new hash key/seed every time we resize the hash table.
hmap->divinfo = fast_div32_init(newsize);
hmap->hashkey ^= random() | (random() << 32);
for ( uint32_t i = 0; i < oldsize; i++) {
const q_rh_bucket___KV___t *bucket = &oldbuckets[i];
/* Skip the empty buckets. */
if ( !bucket->key ) { continue; }
__VALTYPE__ oldval; // not needed except for signature
q_rhashmap_insert(hmap, bucket->key, bucket->val, Q_RHM_SET, &oldval,
&num_probes);
}
free_if_non_null(oldbuckets);
BYE:
return status;
}
/*
* rhashmap_put: insert a value given the key.
*
* => If the key is already present, return its associated value.
* => Otherwise, on successful insert, return the given value.
*/
int
q_rhashmap_put___KV__(
q_rhashmap___KV___t *hmap,
__KEYTYPE__ key,
__VALTYPE__ val,
int update_type,
__VALTYPE__ *ptr_oldval,
int *ptr_num_probes
)
{
int status = 0;
uint32_t newsize; bool resize, decreasing = false;
status = calc_new_size(hmap->nitems, hmap->minsize, hmap->size,
decreasing, &newsize, &resize);
if ( resize ) {
status = q_rhashmap_resize(hmap, newsize); cBYE(status);
}
status = q_rhashmap_insert(hmap, key, val, update_type,
ptr_oldval, ptr_num_probes);
cBYE(status);
BYE:
return status;
}
/*
* rhashmap_del: remove the given key and return its value.
*
* => If key was present, return its associated value; otherwise NULL.
*/
int
q_rhashmap_del___KV__(
q_rhashmap___KV___t *hmap,
__KEYTYPE__ key,
__VALTYPE__ *ptr_oldval,
bool *ptr_is_found
)
{
int status = 0;
const uint32_t hash = murmurhash3(&key, sizeof(__KEYTYPE__), hmap->hashkey);
uint32_t n = 0, i = fast_rem32(hash, hmap->size, hmap->divinfo);
q_rh_bucket___KV___t *bucket;
*ptr_oldval = 0;
bool decreasing = true, resize; uint32_t newsize;
probe:
/*
* The same probing logic as in the lookup function.
*/
bucket = &hmap->buckets[i];
if (bucket->key == 0 ) {
*ptr_is_found = false; goto BYE;
}
if ( n > bucket->psl ) {
*ptr_is_found = false; goto BYE;
}
#ifdef DEBUG
ASSERT(validate_psl_p(hmap, bucket, i));
if ( ( bucket->hash != hash ) || ( bucket->key != key ) )
#else
if ( bucket->key != key )
#endif
{
/* Continue to the next bucket. */
i = fast_rem32(i + 1, hmap->size, hmap->divinfo);
n++;
goto probe;
}
// Free the bucket.
bucket->key = 0;
*ptr_oldval = bucket->val;
*ptr_is_found = true;
bucket->val = 0;
#ifdef DEBUG
bucket->hash = 0;
#endif
bucket->psl = 0;
hmap->nitems--;
/*
* The probe sequence must be preserved in the deletion case.
* Use the backwards-shifting method to maintain low variance.
*/
for ( ; ; ) {
q_rh_bucket___KV___t *nbucket = NULL;
bucket->key = 0;
bucket->val = 0;
#ifdef DEBUG
bucket->hash = 0;
#endif
bucket->psl = 0;
i = fast_rem32(i + 1, hmap->size, hmap->divinfo);
nbucket = &hmap->buckets[i];
#ifdef DEBUG
ASSERT(validate_psl_p(hmap, nbucket, i));
#endif
/*
* Stop if we reach an empty bucket or hit a key which
* is in its base (original) location.
*/
if (!nbucket->key || nbucket->psl == 0) {
break;
}
nbucket->psl--;
*bucket = *nbucket;
bucket = nbucket;
}
status = calc_new_size(hmap->nitems, hmap->minsize, hmap->size,
decreasing, &newsize, &resize);
cBYE(status);
if ( resize ) {
status = q_rhashmap_resize(hmap, newsize); cBYE(status);
}
BYE:
return status;
}
/*
* rhashmap_create: construct a new hash table.
*
* => If size is non-zero, then pre-allocate the given number of buckets;
* => If size is zero, then a default minimum is used.
*/
q_rhashmap___KV___t *
q_rhashmap_create___KV__(
size_t size
)
{
q_rhashmap___KV___t *hmap = NULL;
hmap = calloc(1, sizeof(q_rhashmap___KV___t));
if ( hmap == NULL ) { return NULL; }
hmap->minsize = MAX(size, HASH_INIT_SIZE);
if ( q_rhashmap_resize(hmap, hmap->minsize) != 0) {
free(hmap);
return NULL;
}
if (hmap->buckets == NULL ) { WHEREAMI; return NULL; }
if (hmap->size == 0 ) { WHEREAMI; return NULL; }
return hmap;
}
/*
* rhashmap_destroy: free the memory used by the hash table.
*
* => It is the responsibility of the caller to remove elements if needed.
*/
void
q_rhashmap_destroy___KV__(
q_rhashmap___KV___t *ptr_hmap
)
{
free(ptr_hmap->buckets);
ptr_hmap->buckets = NULL;
ptr_hmap->size = 0;
ptr_hmap->nitems = 0;
ptr_hmap->divinfo = 0;
ptr_hmap->buckets = 0;
ptr_hmap->hashkey = 0;
ptr_hmap->minsize = 0;
free(ptr_hmap);
ptr_hmap = NULL;
}
// Given
// (1) a set of keys
// (2) hash for each key
// (3) value for each key
// Update as follows. If j^{th} key found, then
// (A) set is_found[j] to true
// (B) update value with new value provided (either set or add)
// Else, set is_found[j] to false
int
q_rhashmap_putn___KV__(
q_rhashmap___KV___t *hmap, // INPUT
int update_type, // INPUT
__KEYTYPE__ *keys, // INPUT [nkeys]
uint32_t *hashes, // INPUT [nkeys]
uint32_t *locs, // INPUT [nkeys] -- starting point for probe
uint8_t *tids, // INPUT [nkeys] -- thread who should work on it
int nT,
__VALTYPE__ *vals, // INPUT [nkeys]
uint32_t nkeys, // INPUT
uint8_t *is_founds // OUTPUT [nkeys bits] TODO: Change from byte to bit
)
{
int status = 0;
int *is_new = NULL;
register uint32_t hmap_size = hmap->size;
register uint64_t hmap_divinfo = hmap->divinfo;
// quick sanity check
switch ( update_type ) {
case Q_RHM_SET : case Q_RHM_ADD : break;
default: go_BYE(-1); break;
}
is_new = malloc(nT * sizeof(int));
return_if_malloc_failed(is_new);
for ( int i = 0; i < nT; i++ ) { is_new[i] = 0; }
#pragma omp parallel
{
// TODO P3 Can I avoid get_thread_num() in each iteration?
register uint32_t mytid = omp_get_thread_num();
for ( uint32_t j = 0; j < nkeys; j++ ) {
// Following so that 2 threads don't deal with same key
if ( tids[j] != mytid ) { continue; }
#ifdef DEBUG
register uint32_t hash = hashes[j];
#endif
register q_rh_bucket___KV___t *buckets = hmap->buckets;
register __KEYTYPE__ key = keys[j];
register __VALTYPE__ val = vals[j];
uint32_t i = locs[j]; // fast_rem32(hash, hmap_size, hmap_divinfo);
is_founds[j] = 0;
uint32_t n = 0;
for ( ; ; ) { // search until found
q_rh_bucket___KV___t *bucket = buckets + i;
#ifdef DEBUG
ASSERT(validate_psl_p(hmap, bucket, i));
if ( ( bucket->hash == hash ) && ( bucket->key == key ) )
#else
if ( bucket->key == key )
#endif
{
switch ( update_type ) {
case Q_RHM_SET : bucket->val = val; break;
case Q_RHM_ADD : bucket->val += val; break;
}
is_founds[j] = 1;
break;
}
if ( ( !bucket->key ) || ( n > bucket->psl ) ) { // not found
is_founds[j] = 0;
break;
}
n++;
i = fast_rem32(i + 1, hmap_size, hmap_divinfo);
}
if ( is_founds[j] == 0 ) {
if ( is_new[mytid] == 0 ) {
is_new[mytid] = 1;
}
}
}
}
// Find out if new keys were provided in the above loop
bool need_sequential_put = false;
for ( int i = 0; i < nT; i++ ) {
if ( is_new[i] != 0 ) { need_sequential_put = true; }
}
// If so, we have no choice but to put these in sequentially
// TODO P2: Currently, we are scannning the entire list of keys,
// looking for the ones to add. Ideally, each thread should keep
// a list of keys to be added and we should just scan that list.
int num_new = 0;
if ( need_sequential_put ) {
for ( unsigned int i = 0; i < nkeys; i++ ) {
if ( is_founds[i] == 0 ) {
__VALTYPE__ oldval;
int num_probes; // TODO P2 Should do this properly
status = q_rhashmap_put___KV__(hmap, keys[i], vals[i], update_type,
&oldval, &num_probes);
cBYE(status);
/* Following has been commented out because it is a wrong check
By definition, these keys don't exist and hence oldval == 0
if ( oldval != 0 ) { go_BYE(-1); }
*/
num_new++;
}
}
// TODO P1: Should return num_new as diagnostic information
if ( num_new == 0 ) { go_BYE(-1); }
}
BYE:
free_if_non_null(is_new);
return status;
}
|
multmatrix.c | /*
Author: Luiz
3 Different approaches on two NxN matrices multiplication.
a) The naive way
b) With cache optimization by transposing the second matrix
c) With multiple threads and transposing the second matrix
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
void printMatrix(double *m, int n){
//printf("Printing matrix...\n");
int i, j;
for(i=0; i<n; i++){
for(j=0; j<n; j++){
printf(" %.0f ", m[i*n+j]);
}
printf("\n");
}
//printf("Done printing!\n");
}
void initMatrix(double *m, int n){
//printf("Initiating matrix...");
int i, j;
for(i=0; i<n; i++){
for(j=0; j<n; j++){
m[i*n+j] = j;
}
}
//printf("Done\n");
}
double* transpose(double *m, int n){
int i, j;
double *a;
a = malloc(n * n * sizeof(double));
#pragma omp parallel for private(j)
for(i=0;i<n;i++){
for(j=0;j<n;j++){
a[i*n+j] = m[j*n+i];
}
}
return a;
}
void multiplyMatrixTransposeMulti(double *c, double *a, double *b, int n){
b = transpose(b,n);
#pragma omp parallel
{
int i, j, k;
#pragma omp for
for(i=0; i<n; i++){
for(j=0; j<n; j++){
double sum = 0;
for(k=0; k<n; k++){
sum = sum + a[i*n+k] * b[j*n+k];
}
c[i*n+j] = sum;
}
}
}
}
void multiplyMatrixTransposeSingle(double *c, double *a, double *b, int n){
int i, j, k;
b = transpose(b,n);
for(i=0; i<n; i++){
for(j=0; j<n; j++){
double sum = 0;
for(k=0; k<n; k++){
sum = sum + a[i*n+k] * b[j*n+k];
}
c[i*n+j] = sum;
}
}
}
void multiplyMatrix(double *c, double *a, double *b, int n){
int i, j, k;
double sum;
for(i=0; i<n; i++){
for(j=0; j<n; j++){
sum = 0;
for(k=0; k<n; k++){
sum = sum + a[i*n+k] * b[n*k+j];
}
c[i*n+j] = sum;
}
}
}
void main(int argc, char **argv){
int n;
double *a, *b, *c, i;
clock_t start, end;
float secs;
n = atoi(argv[1]);
a = malloc(n * n * sizeof(double));
b = malloc(n * n * sizeof(double));
c = malloc(n * n * sizeof(double));
initMatrix(a,n);
initMatrix(b,n);
start = clock();
multiplyMatrix(c,a,b,n);
end = clock();
secs = (end-start) / (double)CLOCKS_PER_SEC;
printf("It took %.2f seconds to multiply the two matrices without transposing!\n", secs);
//printMatrix(c,n);
start = clock();
multiplyMatrixTransposeSingle(c,a,b,n);
end = clock();
secs = (end-start) / (double) CLOCKS_PER_SEC;
printf("It took %.2f seconds to multiply the two matrices transposing with 1 thread!\n", secs);
//printMatrix(c,n);
start = clock();
multiplyMatrixTransposeMulti(c,a,b,n);
end = clock();
secs = (end-start) / (double) CLOCKS_PER_SEC / 4; //It should divide by number of threads, sorry!
printf("It took %.2f seconds to multiply the two matrices transposing with multiple threads!\n", secs);
//printMatrix(c,n);
}
|
GB_assign_zombie2.c | //------------------------------------------------------------------------------
// GB_assign_zombie2: delete all entries in C(i,:) for GB_assign
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// C(i,:)<!> = anything: GrB_Row_assign or GrB_Col_assign with an empty
// complemented mask requires all entries in C(i,:) to be deleted.
#include "GB_assign.h"
void GB_assign_zombie2
(
GrB_Matrix C,
const int64_t i,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// get C
//--------------------------------------------------------------------------
const int64_t *restrict Cp = C->p ;
int64_t *restrict Ci = C->i ;
const int64_t Cnvec = C->nvec ;
int64_t nzombies = C->nzombies ;
const int64_t zorig = nzombies ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ;
int ntasks = (nthreads == 1) ? 1 : (64 * nthreads) ;
//--------------------------------------------------------------------------
// C(i,:) = empty
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (int taskid = 0 ; taskid < ntasks ; taskid++)
{
int64_t kfirst, klast ;
GB_PARTITION (kfirst, klast, Cnvec, taskid, ntasks) ;
for (int64_t k = kfirst ; k < klast ; k++)
{
//------------------------------------------------------------------
// find C(i,j)
//------------------------------------------------------------------
int64_t pC = Cp [k] ;
int64_t pC_end = Cp [k+1] ;
int64_t pright = pC_end - 1 ;
bool found, is_zombie ;
GB_BINARY_ZOMBIE (i, Ci, pC, pright, found, zorig, is_zombie) ;
//------------------------------------------------------------------
// if found and not a zombie, mark it as a zombie
//------------------------------------------------------------------
if (found && !is_zombie)
{
ASSERT (i == Ci [pC]) ;
nzombies++ ;
Ci [pC] = GB_FLIP (i) ;
}
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
C->nzombies = nzombies ;
}
|
task-taskwait-nested.c | /*
* task-taskwait-nested.c -- Archer testcase
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
//
// See tools/archer/LICENSE.txt for details.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// RUN: %libarcher-compile-and-run | FileCheck %s
#include <omp.h>
#include <stdio.h>
#include <unistd.h>
#include "ompt/ompt-signal.h"
int main(int argc, char *argv[]) {
int var = 0, a = 0;
#pragma omp parallel num_threads(2) shared(var, a)
#pragma omp master
{
#pragma omp task
{
#pragma omp task shared(var, a)
{
OMPT_SIGNAL(a);
delay(100);
var++;
}
#pragma omp taskwait
}
// Give other thread time to steal the task and execute its child.
OMPT_WAIT(a, 1);
#pragma omp taskwait
var++;
}
fprintf(stderr, "DONE\n");
int error = (var != 2);
return error;
}
// CHECK-NOT: ThreadSanitizer: data race
// CHECK-NOT: ThreadSanitizer: reported
// CHECK: DONE
|
semiring.h | #ifndef __SEMIRING_H__
#define __SEMIRING_H__
#include "functions.h"
#include "../sparse_formats/csr.h"
#include "../sparse_formats/ccsr.h"
#include "../redistribution/nosym_transp.h"
#include <iostream>
using namespace std;
namespace CTF_int {
template <typename dtype>
dtype default_mul(dtype a, dtype b){
return a*b;
}
template <typename dtype>
void default_vec_mul(dtype const * a, dtype const * b, dtype * c, int64_t n){
for (int64_t i=0; i<n; i++){
c[i] = a[i]*b[i];
}
}
template <typename dtype>
void default_axpy(int n,
dtype alpha,
dtype const * X,
int incX,
dtype * Y,
int incY){
for (int i=0; i<n; i++){
Y[incY*i] += alpha*X[incX*i];
}
}
template <>
void default_axpy<float>
(int,float,float const *,int,float *,int);
template <>
void default_axpy<double>
(int,double,double const *,int,double *,int);
template <>
void default_axpy< std::complex<float> >
(int,std::complex<float>,std::complex<float> const *,int,std::complex<float> *,int);
template <>
void default_axpy< std::complex<double> >
(int,std::complex<double>,std::complex<double> const *,int,std::complex<double> *,int);
template <typename dtype>
void default_scal(int n,
dtype alpha,
dtype * X,
int incX){
for (int i=0; i<n; i++){
X[incX*i] *= alpha;
}
}
template <>
void default_scal<float>(int n, float alpha, float * X, int incX);
template <>
void default_scal<double>(int n, double alpha, double * X, int incX);
template <>
void default_scal< std::complex<float> >
(int n, std::complex<float> alpha, std::complex<float> * X, int incX);
template <>
void default_scal< std::complex<double> >
(int n, std::complex<double> alpha, std::complex<double> * X, int incX);
template<typename dtype>
void default_gemm(char tA,
char tB,
int m,
int n,
int k,
dtype alpha,
dtype const * A,
dtype const * B,
dtype beta,
dtype * C){
int i,j,l;
int istride_A, lstride_A, jstride_B, lstride_B;
//TAU_FSTART(default_gemm);
if (tA == 'N' || tA == 'n'){
istride_A=1;
lstride_A=m;
} else {
istride_A=k;
lstride_A=1;
}
if (tB == 'N' || tB == 'n'){
jstride_B=k;
lstride_B=1;
} else {
jstride_B=1;
lstride_B=n;
}
for (j=0; j<n; j++){
for (i=0; i<m; i++){
C[j*m+i] *= beta;
for (l=0; l<k; l++){
C[j*m+i] += alpha*A[istride_A*i+lstride_A*l]*B[lstride_B*l+jstride_B*j];
}
}
}
//TAU_FSTOP(default_gemm);
}
template<typename dtype>
dtype ** get_grp_ptrs(int64_t grp_sz,
int64_t ngrp,
dtype const * data){
dtype ** data_ptrs = (dtype**)malloc(sizeof(dtype*)*ngrp);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i=0; i<ngrp; i++){
data_ptrs[i] = ((dtype*)data)+i*grp_sz;
}
return data_ptrs;
}
template <typename dtype>
void gemm_batch(
char taA,
char taB,
int l,
int m,
int n,
int k,
dtype alpha,
dtype const* A,
dtype const* B,
dtype beta,
dtype * C);
template <typename dtype>
void gemm(char tA,
char tB,
int m,
int n,
int k,
dtype alpha,
dtype const * A,
dtype const * B,
dtype beta,
dtype * C);
template<>
inline void default_gemm<float>
(char tA,
char tB,
int m,
int n,
int k,
float alpha,
float const * A,
float const * B,
float beta,
float * C){
CTF_int::gemm<float>(tA,tB,m,n,k,alpha,A,B,beta,C);
}
template<>
inline void default_gemm<double>
(char tA,
char tB,
int m,
int n,
int k,
double alpha,
double const * A,
double const * B,
double beta,
double * C){
CTF_int::gemm<double>(tA,tB,m,n,k,alpha,A,B,beta,C);
}
template<>
inline void default_gemm< std::complex<float> >
(char tA,
char tB,
int m,
int n,
int k,
std::complex<float> alpha,
std::complex<float> const * A,
std::complex<float> const * B,
std::complex<float> beta,
std::complex<float> * C){
CTF_int::gemm< std::complex<float> >(tA,tB,m,n,k,alpha,A,B,beta,C);
}
template<>
inline void default_gemm< std::complex<double> >
(char tA,
char tB,
int m,
int n,
int k,
std::complex<double> alpha,
std::complex<double> const * A,
std::complex<double> const * B,
std::complex<double> beta,
std::complex<double> * C){
CTF_int::gemm< std::complex<double> >(tA,tB,m,n,k,alpha,A,B,beta,C);
}
template<typename dtype>
void default_gemm_batch
(char taA,
char taB,
int l,
int m,
int n,
int k,
dtype alpha,
dtype const* A,
dtype const* B,
dtype beta,
dtype * C){
if (m == 1 && n == 1 && k == 1){
for (int i=0; i<l; i++){
C[i] = C[i]*beta + alpha*A[i]*B[i];
}
} else {
for (int i=0; i<l; i++){
default_gemm<dtype>(taA, taB, m, n, k, alpha, A+i*m*k, B+i*k*n, beta, C+i*m*n);
}
}
}
template<>
inline void default_gemm_batch<float>
(char taA,
char taB,
int l,
int m,
int n,
int k,
float alpha,
float const* A,
float const* B,
float beta,
float * C){
CTF_int::gemm_batch<float>(taA, taB, l, m, n, k, alpha, A, B, beta, C);
}
template<>
inline void default_gemm_batch<double>
(char taA,
char taB,
int l,
int m,
int n,
int k,
double alpha,
double const* A,
double const* B,
double beta,
double * C){
CTF_int::gemm_batch<double>(taA, taB, l, m, n, k, alpha, A, B, beta, C);
}
template<>
inline void default_gemm_batch<std::complex<float>>
(char taA,
char taB,
int l,
int m,
int n,
int k,
std::complex<float> alpha,
std::complex<float> const* A,
std::complex<float> const* B,
std::complex<float> beta,
std::complex<float> * C){
CTF_int::gemm_batch< std::complex<float> >(taA, taB, l, m, n, k, alpha, A, B, beta, C);
}
template<>
inline void default_gemm_batch<std::complex<double>>
(char taA,
char taB,
int l,
int m,
int n,
int k,
std::complex<double> alpha,
std::complex<double> const* A,
std::complex<double> const* B,
std::complex<double> beta,
std::complex<double> * C){
CTF_int::gemm_batch< std::complex<double> >(taA, taB, l, m, n, k, alpha, A, B, beta, C);
}
template <typename dtype>
void default_coomm
(int m,
int n,
int k,
dtype alpha,
dtype const * A,
int const * rows_A,
int const * cols_A,
int nnz_A,
dtype const * B,
dtype beta,
dtype * C){
//TAU_FSTART(default_coomm);
for (int j=0; j<n; j++){
for (int i=0; i<m; i++){
C[j*m+i] *= beta;
}
}
for (int i=0; i<nnz_A; i++){
int row_A = rows_A[i]-1;
int col_A = cols_A[i]-1;
for (int col_C=0; col_C<n; col_C++){
C[col_C*m+row_A] += alpha*A[i]*B[col_C*k+col_A];
}
}
//TAU_FSTOP(default_coomm);
}
template <>
void default_coomm< float >
(int,int,int,float,float const *,int const *,int const *,int,float const *,float,float *);
template <>
void default_coomm< double >
(int,int,int,double,double const *,int const *,int const *,int,double const *,double,double *);
template <>
void default_coomm< std::complex<float> >
(int,int,int,std::complex<float>,std::complex<float> const *,int const *,int const *,int,std::complex<float> const *,std::complex<float>,std::complex<float> *);
template <>
void default_coomm< std::complex<double> >
(int,int,int,std::complex<double>,std::complex<double> const *,int const *,int const *,int,std::complex<double> const *,std::complex<double>,std::complex<double> *);
}
namespace CTF {
/**
* \addtogroup algstrct
* @{
*/
/**
* \brief Semiring is a Monoid with an addition multiplicaton function
* addition must have an identity and be associative, does not need to be commutative
* multiplications must have an identity as well as be distributive and associative
* special case (parent) of a Ring (which also has an additive inverse)
*/
template <typename dtype=double, bool is_ord=CTF_int::get_default_is_ord<dtype>()>
class Semiring : public Monoid<dtype, is_ord> {
public:
bool is_def;
dtype tmulid;
void (*fscal)(int,dtype,dtype*,int);
void (*faxpy)(int,dtype,dtype const*,int,dtype*,int);
dtype (*fmul)(dtype a, dtype b);
void (*fvmul)(dtype const * a, dtype const * b, dtype * c, int64_t n);
void (*fgemm)(char,char,int,int,int,dtype,dtype const*,dtype const*,dtype,dtype*);
void (*fcoomm)(int,int,int,dtype,dtype const*,int const*,int const*,int,dtype const*,dtype,dtype*);
void (*fgemm_batch)(char,char,int,int,int,int,dtype,dtype const*,dtype const*,dtype,dtype*);
//void (*fcsrmm)(int,int,int,dtype,dtype const*,int const*,int const*,dtype const*,dtype,dtype*);
//csrmultd_ kernel for multiplying two sparse matrices into a dense output
//void (*fcsrmultd)(int,int,int,dtype const*,int const*,int const*,dtype const*,int const*, int const*,dtype*,int);
Semiring(Semiring const & other) : Monoid<dtype, is_ord>(other) {
this->tmulid = other.tmulid;
this->fscal = other.fscal;
this->faxpy = other.faxpy;
this->fmul = other.fmul;
this->fvmul = other.fvmul;
this->fgemm = other.fgemm;
this->fcoomm = other.fcoomm;
this->is_def = other.is_def;
this->fgemm_batch = other.fgemm_batch;
}
virtual CTF_int::algstrct * clone() const {
return new Semiring<dtype, is_ord>(*this);
}
/**
* \brief constructor for algstrct equipped with * and +
* \param[in] addid_ additive identity
* \param[in] fadd_ binary addition function
* \param[in] addmop_ MPI_Op operation for addition
* \param[in] mulid_ multiplicative identity
* \param[in] fmul_ binary multiplication function
* \param[in] fvmul_ binary vector multiplication function
* \param[in] gemm_ block matrix multiplication function
* \param[in] axpy_ vector sum function
* \param[in] scal_ vector scale function
* \param[in] coomm_ kernel for multiplying sparse matrix in coordinate format with dense matrix
*/
Semiring(dtype addid_,
dtype (*fadd_)(dtype a, dtype b),
MPI_Op addmop_,
dtype mulid_,
dtype (*fmul_)(dtype a, dtype b),
void (*gemm_)(char,char,int,int,int,dtype,dtype const*,dtype const*,dtype,dtype*)=NULL,
void (*axpy_)(int,dtype,dtype const*,int,dtype*,int)=NULL,
void (*scal_)(int,dtype,dtype*,int)=NULL,
void (*coomm_)(int,int,int,dtype,dtype const*,int const*,int const*,int,dtype const*,dtype,dtype*)=NULL,
void (*fgemm_batch_)(char,char,int,int,int,int,dtype,dtype const*,dtype const*,dtype,dtype*)=NULL,
void (*fvmul_)(dtype const * a, dtype const * b, dtype * c, int64_t n)=NULL)
: Monoid<dtype, is_ord>(addid_, fadd_, addmop_) {
fmul = fmul_;
fvmul = fvmul_;
fgemm = gemm_;
faxpy = axpy_;
fscal = scal_;
fcoomm = coomm_;
fgemm_batch = fgemm_batch_;
tmulid = mulid_;
// if provided a coordinate MM kernel, don't use CSR
this->has_coo_ker = (coomm_ != NULL);
is_def = false;
}
/**
* \brief constructor for algstrct equipped with + only
*/
Semiring() : Monoid<dtype,is_ord>() {
tmulid = dtype(1);
fmul = &CTF_int::default_mul<dtype>;
fvmul = &CTF_int::default_vec_mul<dtype>;
fgemm = &CTF_int::default_gemm<dtype>;
faxpy = &CTF_int::default_axpy<dtype>;
fscal = &CTF_int::default_scal<dtype>;
fcoomm = &CTF_int::default_coomm<dtype>;
fgemm_batch = &CTF_int::default_gemm_batch<dtype>;
is_def = true;
}
void mul(char const * a,
char const * b,
char * c) const {
((dtype*)c)[0] = fmul(((dtype*)a)[0],((dtype*)b)[0]);
}
void safemul(char const * a,
char const * b,
char *& c) const {
if (a == NULL && b == NULL){
if (c!=NULL) CTF_int::cdealloc(c);
c = NULL;
} else if (a == NULL) {
if (c==NULL) c = (char*)CTF_int::alloc(this->el_size);
memcpy(c,b,this->el_size);
} else if (b == NULL) {
if (c==NULL) c = (char*)CTF_int::alloc(this->el_size);
memcpy(c,b,this->el_size);
} else {
if (c==NULL) c = (char*)CTF_int::alloc(this->el_size);
((dtype*)c)[0] = fmul(((dtype*)a)[0],((dtype*)b)[0]);
}
}
char const * mulid() const {
return (char const *)&tmulid;
}
bool has_mul() const { return true; }
/** \brief X["i"]=alpha*X["i"]; */
void scal(int n,
char const * alpha,
char * X,
int incX) const {
if (fscal != NULL) fscal(n, ((dtype const *)alpha)[0], (dtype *)X, incX);
else {
dtype const a = ((dtype*)alpha)[0];
dtype * dX = (dtype*) X;
for (int64_t i=0; i<n; i++){
dX[i] = fmul(a,dX[i]);
}
}
}
/** \brief Y["i"]+=alpha*X["i"]; */
void axpy(int n,
char const * alpha,
char const * X,
int incX,
char * Y,
int incY) const {
if (faxpy != NULL) faxpy(n, ((dtype const *)alpha)[0], (dtype const *)X, incX, (dtype *)Y, incY);
else {
assert(incX==1);
assert(incY==1);
dtype a = ((dtype*)alpha)[0];
dtype const * dX = (dtype*) X;
dtype * dY = (dtype*) Y;
for (int64_t i=0; i<n; i++){
dY[i] = this->fadd(fmul(a,dX[i]), dY[i]);
}
}
}
/** \brief beta*C["ij"]=alpha*A^tA["ik"]*B^tB["kj"]; */
void gemm(char tA,
char tB,
int m,
int n,
int k,
char const * alpha,
char const * A,
char const * B,
char const * beta,
char * C) const {
if (fgemm != NULL) {
fgemm(tA, tB, m, n, k, ((dtype const *)alpha)[0], (dtype const *)A, (dtype const *)B, ((dtype const *)beta)[0], (dtype *)C);
} else {
//TAU_FSTART(sring_gemm);
dtype const * dA = (dtype const *) A;
dtype const * dB = (dtype const *) B;
dtype * dC = (dtype*) C;
if (!this->isequal(beta, this->mulid())){
scal(m*n, beta, C, 1);
}
int lda_Cj, lda_Ci, lda_Al, lda_Ai, lda_Bj, lda_Bl;
lda_Cj = m;
lda_Ci = 1;
if (tA == 'N'){
lda_Al = m;
lda_Ai = 1;
} else {
assert(tA == 'T');
lda_Al = 1;
lda_Ai = k;
}
if (tB == 'N'){
lda_Bj = k;
lda_Bl = 1;
} else {
assert(tB == 'T');
lda_Bj = 1;
lda_Bl = n;
}
if (!this->isequal(alpha, this->mulid())){
dtype a = ((dtype*)alpha)[0];
for (int64_t j=0; j<n; j++){
for (int64_t i=0; i<m; i++){
for (int64_t l=0; l<k; l++){
//dC[j*m+i] = this->fadd(fmul(a,fmul(dA[l*m+i],dB[j*k+l])), dC[j*m+i]);
dC[j*lda_Cj+i*lda_Ci] = this->fadd(fmul(a,fmul(dA[l*lda_Al+i*lda_Ai],dB[j*lda_Bj+l*lda_Bl])), dC[j*lda_Cj+i*lda_Ci]);
}
}
}
} else {
for (int64_t j=0; j<n; j++){
for (int64_t i=0; i<m; i++){
for (int64_t l=0; l<k; l++){
//dC[j*m+i] = this->fadd(fmul(a,fmul(dA[l*m+i],dB[j*k+l])), dC[j*m+i]);
dC[j*lda_Cj+i*lda_Ci] = this->fadd(fmul(dA[l*lda_Al+i*lda_Ai],dB[j*lda_Bj+l*lda_Bl]), dC[j*lda_Cj+i*lda_Ci]);
}
}
}
}
//TAU_FSTOP(sring_gemm);
}
}
void gemm_batch(char tA,
char tB,
int l,
int m,
int n,
int k,
char const * alpha,
char const * A,
char const * B,
char const * beta,
char * C) const {
if (fgemm_batch != NULL) {
fgemm_batch(tA, tB, l, m, n, k, ((dtype const *)alpha)[0], ((dtype const *)A), ((dtype const *)B), ((dtype const *)beta)[0], ((dtype *)C));
} else {
for (int i=0; i<l; i++){
gemm(tA, tB, m, n, k, alpha, A+m*k*i*sizeof(dtype), B+k*n*i*sizeof(dtype), beta, C+m*n*i*sizeof(dtype));
}
}
}
void offload_gemm(char tA,
char tB,
int m,
int n,
int k,
char const * alpha,
char const * A,
char const * B,
char const * beta,
char * C) const {
printf("CTF ERROR: offload gemm not present for this semiring\n");
assert(0);
}
bool is_offloadable() const {
return false;
}
void coomm(int m, int n, int k, char const * alpha, char const * A, int const * rows_A, int const * cols_A, int64_t nnz_A, char const * B, char const * beta, char * C, CTF_int::bivar_function const * func) const {
if (func == NULL && alpha != NULL && fcoomm != NULL){
fcoomm(m, n, k, ((dtype const *)alpha)[0], (dtype const *)A, rows_A, cols_A, nnz_A, (dtype const *)B, ((dtype const *)beta)[0], (dtype *)C);
return;
}
if (func == NULL && alpha != NULL && this->isequal(beta,mulid())){
//TAU_FSTART(func_coomm);
dtype const * dA = (dtype const*)A;
dtype const * dB = (dtype const*)B;
dtype * dC = (dtype*)C;
dtype a = ((dtype*)alpha)[0];
if (!this->isequal(beta, this->mulid())){
scal(m*n, beta, C, 1);
}
for (int64_t i=0; i<nnz_A; i++){
int row_A = rows_A[i]-1;
int col_A = cols_A[i]-1;
for (int col_C=0; col_C<n; col_C++){
dC[col_C*m+row_A] = this->fadd(fmul(a,fmul(dA[i],dB[col_C*k+col_A])), dC[col_C*m+row_A]);
}
}
//TAU_FSTOP(func_coomm);
} else { assert(0); }
}
void gen_csrmm
(int m,
int n,
int k,
dtype alpha,
dtype const * A,
int const * JA,
int const * IA,
int nnz_A,
dtype const * B,
dtype beta,
dtype * C) const {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int row_A=0; row_A<m; row_A++){
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int col_B=0; col_B<n; col_B++){
C[col_B*m+row_A] = this->fmul(beta,C[col_B*m+row_A]);
if (IA[row_A] < IA[row_A+1]){
int i_A1 = IA[row_A]-1;
int col_A1 = JA[i_A1]-1;
dtype tmp = this->fmul(A[i_A1],B[col_B*k+col_A1]);
for (int i_A=IA[row_A]; i_A<IA[row_A+1]-1; i_A++){
int col_A = JA[i_A]-1;
tmp = this->fadd(tmp, this->fmul(A[i_A],B[col_B*k+col_A]));
}
C[col_B*m+row_A] = this->fadd(C[col_B*m+row_A], this->fmul(alpha,tmp));
}
}
}
}
void default_csrmm
(int m,
int n,
int k,
dtype alpha,
dtype const * A,
int const * JA,
int const * IA,
int nnz_A,
dtype const * B,
dtype beta,
dtype * C) const {
gen_csrmm(m,n,k,alpha,A,JA,IA,nnz_A,B,beta,C);
}
// void (*fcsrmultd)(int,int,int,dtype const*,int const*,int const*,dtype const*,int const*, int const*,dtype*,int);
/** \brief sparse version of gemm using CSR format for A */
void csrmm(int m,
int n,
int k,
char const * alpha,
char const * A,
int const * JA,
int const * IA,
int64_t nnz_A,
char const * B,
char const * beta,
char * C,
CTF_int::bivar_function const * func) const {
assert(!this->has_coo_ker);
assert(func == NULL);
if (is_def)
this->default_csrmm(m,n,k,((dtype*)alpha)[0],(dtype*)A,JA,IA,nnz_A,(dtype*)B,((dtype*)beta)[0],(dtype*)C);
else
this->gen_csrmm(m,n,k,((dtype*)alpha)[0],(dtype*)A,JA,IA,nnz_A,(dtype*)B,((dtype*)beta)[0],(dtype*)C);
}
bool is_last_col_zero(int64_t m, int64_t n, dtype const * M) const {
for (int64_t i=0; i<m; i++){
if (!this->isequal((char*)(M+(m*(n-1)+i)), (char*)&this->taddid)) return false;
}
return true;
}
void gen_ccsrmm
(int64_t m,
int64_t n0,
int64_t k,
int64_t nnz_row,
dtype alpha,
dtype const * A,
int const * JA,
int const * IA,
int64_t const * row_enc,
int64_t nnz_A,
dtype const * B,
dtype beta,
char *& C_CCSR) const {
CTF_int::CCSR_Matrix M;
int64_t n = n0;
if (this->is_last_col_zero(k, n, B)){
n = n0-1;
}
if (n == 0){
M = CTF_int::CCSR_Matrix(0, 0, m, 1, this);
if (C_CCSR != NULL && !this->isequal((char const *)&beta, this->addid())){
CTF_int::CCSR_Matrix C(C_CCSR);
if (!this->isequal((char const *)&beta, this->mulid()))
this->scal(C.nnz(), (char*)&beta, C.all_data, 1);
C_CCSR = CTF_int::CCSR_Matrix::ccsr_add(C.all_data, M.all_data, this);
CTF_int::cdealloc(M.all_data);
} else {
//CTF_int::cdealloc(C_CCSR);
C_CCSR = M.all_data;
}
return;
}
if (nnz_row == 0){
M = CTF_int::CCSR_Matrix(nnz_row*n, nnz_row, m, n, this);
} else {
int new_order[2] = {1, 0};
int64_t lens[2] = {(int64_t)nnz_row, (int64_t)n};
bool use_hptt = CTF_int::hptt_is_applicable(2, new_order, this->el_size);
//Note: if there is padding last column of dense matrix would be full of zeros and we don't want to generate nonzeros for this colum, as this will cause tricky bugs!
if (use_hptt){
char * data = this->alloc(((int64_t)nnz_row)*n);
this->init_shell(((int64_t)nnz_row)*n, data);
csrmm(nnz_row,n,k,(char const *)&alpha, (char const *)A, JA, IA, nnz_A, (char const*)B, this->mulid(), data, NULL);
M = CTF_int::CCSR_Matrix(((int64_t)nnz_row)*n, nnz_row, m, n0, this);
CTF_int::nosym_transpose_hptt(2, new_order, lens, 1, data, M.vals(), this);
this->dealloc(data);
} else {
M = CTF_int::CCSR_Matrix(((int64_t)nnz_row)*n, nnz_row, m, n0, this);
csrmm(nnz_row,n,k,(char const *)&alpha, (char const *)A, JA, IA, nnz_A, (char const*)B, this->mulid(), M.vals(), NULL);
CTF_int::nosym_transpose(2,new_order,lens,M.vals(),1,this);
}
}
memcpy(M.nnz_row_encoding(), row_enc, nnz_row*sizeof(int64_t));
int * C_IA = M.IA();
C_IA[0] = 1;
for (int64_t row_A=1; row_A<nnz_row+1; row_A++){
C_IA[row_A] = C_IA[row_A-1] + n;
}
int * C_JA = M.JA();
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int64_t row_C=0; row_C<nnz_row; row_C++){
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int64_t col_C=0; col_C<n; col_C++){
C_JA[row_C*n+col_C] = col_C+1;
}
}
if (C_CCSR != NULL && !this->isequal((char const *)&beta, this->addid())){
CTF_int::CCSR_Matrix C(C_CCSR);
if (!this->isequal((char const *)&beta, this->mulid()))
this->scal(C.nnz(), (char*)&beta, C.all_data, 1);
C_CCSR = CTF_int::CCSR_Matrix::ccsr_add(C.all_data, M.all_data, this);
CTF_int::cdealloc(M.all_data);
} else {
//CTF_int::cdealloc(C_CCSR);
C_CCSR = M.all_data;
}
}
void default_ccsrmm
(int64_t m,
int64_t n,
int64_t k,
int64_t nnz_row,
dtype alpha,
dtype const * A,
int const * JA,
int const * IA,
int64_t const * row_enc,
int64_t nnz_A,
dtype const * B,
dtype beta,
char *& C) const {
gen_ccsrmm(m,n,k,nnz_row,alpha,A,JA,IA,row_enc,nnz_A,B,beta,C);
}
// void (*fccsrmultd)(int,int,int,dtype const*,int const*,int const*,dtype const*,int const*, int const*,dtype*,int);
/** \brief sparse version of gemm using CSR format for A */
void ccsrmm(int64_t m,
int64_t n,
int64_t k,
int64_t nnz_row,
char const * alpha,
char const * A,
int const * JA,
int const * IA,
int64_t const * row_enc,
int64_t nnz_A,
char const * B,
char const * beta,
char *& C,
CTF_int::bivar_function const * func) const {
assert(!this->has_coo_ker);
assert(func == NULL);
if (is_def)
this->default_ccsrmm(m,n,k,nnz_row,((dtype*)alpha)[0],(dtype*)A,JA,IA,row_enc,nnz_A,(dtype*)B,((dtype*)beta)[0],C);
else
this->gen_ccsrmm(m,n,k,nnz_row,((dtype*)alpha)[0],(dtype*)A,JA,IA,row_enc,nnz_A,(dtype*)B,((dtype*)beta)[0],C);
}
void gen_csrmultd
(int m,
int n,
int k,
dtype alpha,
dtype const * A,
int const * JA,
int const * IA,
int nnz_A,
dtype const * B,
int const * JB,
int const * IB,
int nnz_B,
dtype beta,
dtype * C) const {
if (!this->isequal((char const*)&beta, this->mulid())){
this->scal(m*n, (char const *)&beta, (char*)C, 1);
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int row_A=0; row_A<m; row_A++){
for (int i_A=IA[row_A]-1; i_A<IA[row_A+1]-1; i_A++){
int row_B = JA[i_A]-1; //=col_A
for (int i_B=IB[row_B]-1; i_B<IB[row_B+1]-1; i_B++){
int col_B = JB[i_B]-1;
if (!this->isequal((char const*)&alpha, this->mulid()))
this->fadd(C[col_B*m+row_A], this->fmul(alpha,this->fmul(A[i_A],B[i_B])));
else
this->fadd(C[col_B*m+row_A], this->fmul(A[i_A],B[i_B]));
}
}
}
}
void default_csrmultd
(int m,
int n,
int k,
dtype alpha,
dtype const * A,
int const * JA,
int const * IA,
int nnz_A,
dtype const * B,
int const * JB,
int const * IB,
int nnz_B,
dtype beta,
dtype * C) const {
gen_csrmultd(m,n,k,alpha,A,JA,IA,nnz_A,B,JB,IB,nnz_B,beta,C);
}
void gen_csrmultcsr
(int m,
int n,
int k,
dtype alpha,
dtype const * A, // A m by k
int const * JA,
int const * IA,
int nnz_A,
dtype const * B, // B k by n
int const * JB,
int const * IB,
int nnz_B,
dtype beta,
char *& C_CSR) const {
int * IC = (int*)CTF_int::alloc(sizeof(int)*(m+1));
memset(IC, 0, sizeof(int)*(m+1));
#ifdef _OPENMP
#pragma omp parallel
{
#endif
int * has_col = (int*)CTF_int::alloc(sizeof(int)*(n+1)); //n is the num of col of B
int nnz = 0;
#ifdef _OPENMP
#pragma omp for schedule(dynamic) // TO DO test other strategies
#endif
for (int i=0; i<m; i++){
memset(has_col, 0, sizeof(int)*(n+1));
nnz = 0;
for (int j=0; j<IA[i+1]-IA[i]; j++){
int row_B = JA[IA[i]+j-1]-1;
for (int kk=0; kk<IB[row_B+1]-IB[row_B]; kk++){
int idx_B = IB[row_B]+kk-1;
if (has_col[JB[idx_B]] == 0){
nnz++;
has_col[JB[idx_B]] = 1;
}
}
IC[i+1]=nnz;
}
}
CTF_int::cdealloc(has_col);
#ifdef _OPENMP
} // END PARALLEL
#endif
int ic_prev = 1;
for(int i=0;i < m+1; i++){
ic_prev += IC[i];
IC[i] = ic_prev;
}
CTF_int::CSR_Matrix C(IC[m]-1, m, n, this);
dtype * vC = (dtype*)C.vals();
this->set((char *)vC, this->addid(), IC[m]+1);
int * JC = C.JA();
memcpy(C.IA(), IC, sizeof(int)*(m+1));
CTF_int::cdealloc(IC);
IC = C.IA();
#ifdef _OPENMP
#pragma omp parallel
{
#endif
int ins = 0;
int *dcol = (int *) CTF_int::alloc(n*sizeof(int));
dtype *acc_data = (dtype*)CTF_int::alloc(n*sizeof (dtype));
#ifdef _OPENMP
#pragma omp for
#endif
for (int i=0; i<m; i++){
std::fill(acc_data, acc_data+n, this->taddid);
memset(dcol, 0, sizeof(int)*(n));
ins = 0;
for (int j=0; j<IA[i+1]-IA[i]; j++){
int row_b = JA[IA[i]+j-1]-1; // 1-based
int idx_a = IA[i]+j-1;
for (int ii = 0; ii < IB[row_b+1]-IB[row_b]; ii++){
int col_b = IB[row_b]+ii-1;
int col_c = JB[col_b]-1; // 1-based
dtype val = fmul(A[idx_a], B[col_b]);
if (dcol[col_c] == 0){
dcol[col_c] = JB[col_b];
}
//acc_data[col_c] += val;
acc_data[col_c]= this->fadd(acc_data[col_c], val);
}
}
for(int jj = 0; jj < n; jj++){
if (dcol[jj] != 0){
JC[IC[i]+ins-1] = dcol[jj];
vC[IC[i]+ins-1] = acc_data[jj];
++ins;
}
}
}
CTF_int::cdealloc(dcol);
CTF_int::cdealloc(acc_data);
#ifdef _OPENMP
} //PRAGMA END
#endif
CTF_int::CSR_Matrix C_in(C_CSR);
if (!this->isequal((char const *)&alpha, this->mulid())){
this->scal(C.nnz(), (char const *)&alpha, C.vals(), 1);
}
if (C_CSR == NULL || C_in.nnz() == 0 || this->isequal((char const *)&beta, this->addid())){
C_CSR = C.all_data;
} else {
if (!this->isequal((char const *)&beta, this->mulid())){
this->scal(C_in.nnz(), (char const *)&beta, C_in.vals(), 1);
}
char * ans = this->csr_add(C_CSR, C.all_data, false);
CTF_int::cdealloc(C.all_data);
C_CSR = ans;
}
}
/* void gen_csrmultcsr_old
(int m,
int n,
int k,
dtype alpha,
dtype const * A,
int const * JA,
int const * IA,
int nnz_A,
dtype const * B,
int const * JB,
int const * IB,
int nnz_B,
dtype beta,
char *& C_CSR) const {
int * IC = (int*)CTF_int::alloc(sizeof(int)*(m+1));
int * has_col = (int*)CTF_int::alloc(sizeof(int)*n);
IC[0] = 1;
for (int i=0; i<m; i++){
memset(has_col, 0, sizeof(int)*n);
IC[i+1] = IC[i];
CTF_int::CSR_Matrix::compute_has_col(JA, IA, JB, IB, i, has_col);
for (int j=0; j<n; j++){
IC[i+1] += has_col[j];
}
}
CTF_int::CSR_Matrix C(IC[m]-1, m, n, sizeof(dtype));
dtype * vC = (dtype*)C.vals();
this->set((char *)vC, this->addid(), IC[m]-1);
int * JC = C.JA();
memcpy(C.IA(), IC, sizeof(int)*(m+1));
CTF_int::cdealloc(IC);
IC = C.IA();
int64_t * rev_col = (int64_t*)CTF_int::alloc(sizeof(int64_t)*n);
for (int i=0; i<m; i++){
memset(has_col, 0, sizeof(int)*n);
CTF_int::CSR_Matrix::compute_has_col(JA, IA, JB, IB, i, has_col);
int vs = 0;
for (int j=0; j<n; j++){
if (has_col[j]){
JC[IC[i]+vs-1] = j+1;
rev_col[j] = IC[i]+vs-1;
vs++;
}
}
for (int j=0; j<IA[i+1]-IA[i]; j++){
int row_B = JA[IA[i]+j-1]-1;
int idx_A = IA[i]+j-1;
for (int l=0; l<IB[row_B+1]-IB[row_B]; l++){
int idx_B = IB[row_B]+l-1;
dtype tmp = fmul(A[idx_A],B[idx_B]);
vC[(rev_col[JB[idx_B]-1])] = this->fadd(vC[(rev_col[JB[idx_B]-1])], tmp);
}
}
}
CTF_int::CSR_Matrix C_in(C_CSR);
if (!this->isequal((char const *)&alpha, this->mulid())){
this->scal(C.nnz(), (char const *)&alpha, C.vals(), 1);
}
if (C_CSR == NULL || C_in.nnz() == 0 || this->isequal((char const *)&beta, this->addid())){
C_CSR = C.all_data;
} else {
if (!this->isequal((char const *)&beta, this->mulid())){
this->scal(C_in.nnz(), (char const *)&beta, C_in.vals(), 1);
}
char * ans = this->csr_add(C_CSR, C.all_data);
CTF_int::cdealloc(C.all_data);
C_CSR = ans;
}
CTF_int::cdealloc(has_col);
CTF_int::cdealloc(rev_col);
}*/
void default_csrmultcsr
(int m,
int n,
int k,
dtype alpha,
dtype const * A,
int const * JA,
int const * IA,
int nnz_A,
dtype const * B,
int const * JB,
int const * IB,
int nnz_B,
dtype beta,
char *& C_CSR) const {
this->gen_csrmultcsr(m,n,k,alpha,A,JA,IA,nnz_A,B,JB,IB,nnz_B,beta,C_CSR);
}
void csrmultd
(int m,
int n,
int k,
char const * alpha,
char const * A,
int const * JA,
int const * IA,
int64_t nnz_A,
char const * B,
int const * JB,
int const * IB,
int64_t nnz_B,
char const * beta,
char * C) const {
if (is_def)
this->default_csrmultd(m,n,k,((dtype const*)alpha)[0],(dtype const*)A,JA,IA,nnz_A,(dtype const*)B,JB,IB,nnz_B,((dtype const*)beta)[0],(dtype*)C);
else
this->gen_csrmultd(m,n,k,((dtype const*)alpha)[0],(dtype const*)A,JA,IA,nnz_A,(dtype const*)B,JB,IB,nnz_B,((dtype const*)beta)[0],(dtype*)C);
}
void csrmultcsr
(int m,
int n,
int k,
char const * alpha,
char const * A,
int const * JA,
int const * IA,
int64_t nnz_A,
char const * B,
int const * JB,
int const * IB,
int64_t nnz_B,
char const * beta,
char *& C_CSR) const {
if (is_def){
this->default_csrmultcsr(m,n,k,((dtype const*)alpha)[0],(dtype const*)A,JA,IA,nnz_A,(dtype const*)B,JB,IB,nnz_B,((dtype const*)beta)[0],C_CSR);
} else {
this->gen_csrmultcsr(m,n,k,((dtype const*)alpha)[0],(dtype const*)A,JA,IA,nnz_A,(dtype const*)B,JB,IB,nnz_B,((dtype const*)beta)[0],C_CSR);
}
}
void accumulate_local_slice(int order,
int64_t * lens,
int64_t * lens_slice,
int const * sym,
int64_t const * offsets,
int64_t const * ends,
char const * slice_data,
char const * alpha,
char * tensor_data,
char const * beta) const {
dtype const * sdata = (dtype const*)slice_data;
dtype * tdata = (dtype*)tensor_data;
if (order == 1){
dtype a = ((dtype*)alpha)[0];
dtype b = ((dtype*)beta)[0];
for (int64_t i=offsets[0]; i<ends[0]; i++){
tdata[i] = this->fadd(this->fmul(b,tdata[i]),this->fmul(a,sdata[i-offsets[0]]));
}
} else {
int64_t lda_tensor = 1;
int64_t lda_slice = 1;
for (int64_t i=0; i<order-1; i++){
lda_tensor *= lens[i];
lda_slice *= lens_slice[i];
}
for (int64_t i=offsets[order-1]; i<ends[order-1]; i++){
this->accumulate_local_slice(order-1, lens, lens_slice, sym, offsets, ends, (char const*)(sdata + (i-offsets[order-1])*lda_slice), alpha, (char *)(tdata + i*lda_tensor), beta);
}
}
}
void MTTKRP(int order,
int64_t * lens,
int * phys_phase,
int64_t k,
int64_t nnz,
int out_mode,
bool aux_mode_first,
CTF::Pair<dtype> const * tsr_data,
dtype const * const * op_mats,
dtype * out_mat){
if (aux_mode_first){
dtype * buffer = (dtype*)this->alloc(k);
dtype * out_buffer;
if (out_mode != 0)
out_buffer = (dtype*)this->alloc(k);
int64_t * inds = (int64_t*)malloc(sizeof(int64_t)*(order-1));
int64_t idx = 0;
while (idx < nnz){
int64_t fiber_idx = tsr_data[idx].k/lens[0];
int64_t fi = fiber_idx;
for (int i=0; i<order-1; i++){
inds[i] = (fi % lens[i+1])/phys_phase[i+1];
fi = fi / lens[i+1];
}
int64_t fiber_nnz = 1;
while (idx+fiber_nnz < nnz && tsr_data[idx+fiber_nnz].k/lens[0] == fiber_idx)
fiber_nnz++;
if (out_mode == 0){
memcpy(buffer, op_mats[1] + inds[0]*k, k*sizeof(dtype));
for (int i=1; i<order-1; i++){
fvmul(buffer, op_mats[i+1]+inds[i]*k, buffer, k);
}
for (int64_t i=idx; i<idx+fiber_nnz; i++){
int64_t kk = (tsr_data[i].k%lens[0])/phys_phase[0];
this->faxpy(k, tsr_data[i].d, buffer, 1, out_mat+kk*k, 1);
}
} else {
int64_t ok = inds[out_mode-1];
if (out_mode > 1)
memcpy(buffer, op_mats[1] + inds[0]*k, k*sizeof(dtype));
else if (order > 2)
memcpy(buffer, op_mats[2] + inds[1]*k, k*sizeof(dtype));
else
std::fill(buffer, buffer+k, this->tmulid);
for (int i=1+(out_mode==1); i<order-1; i++){
if (out_mode != i+1)
fvmul(buffer, op_mats[i+1] + inds[i]*k, buffer, k);
}
std::fill(out_buffer, out_buffer+k, this->taddid);
for (int64_t i=idx; i<idx+fiber_nnz; i++){
int64_t kk = (tsr_data[i].k%lens[0])/phys_phase[0];
this->faxpy(k, tsr_data[i].d, op_mats[0] + kk*k, 1, out_buffer, 1);
}
fvmul(out_buffer, buffer, out_buffer, k);
this->faxpy(k, this->tmulid, out_buffer, 1, out_mat + ok*k, 1);
//for (int j=0; j<k; j++){
// out_mat[j+ok*k] += out_buffer[j]*buffer[j];
//}
}
idx += fiber_nnz;
}
if (out_mode != 0)
this->dealloc((char*)out_buffer);
this->dealloc((char*)buffer);
free(inds);
} else {
IASSERT(0);
}
}
void MTTKRP(int order,
int64_t * lens,
int * phys_phase,
int64_t nnz,
int out_mode,
CTF::Pair<dtype> const * tsr_data,
dtype const * const * op_vecs,
dtype * out_vec){
int64_t * inds = (int64_t*)malloc(sizeof(int64_t)*(order-1));
int64_t idx = 0;
while (idx < nnz){
int64_t fiber_idx = tsr_data[idx].k/lens[0];
int64_t fi = fiber_idx;
for (int i=0; i<order-1; i++){
inds[i] = (fi % lens[i+1])/phys_phase[i+1];
fi = fi / lens[i+1];
}
int64_t fiber_nnz = 1;
while (idx+fiber_nnz < nnz && tsr_data[idx+fiber_nnz].k/lens[0] == fiber_idx)
fiber_nnz++;
if (out_mode == 0){
dtype buf_val = op_vecs[1][inds[0]];
for (int i=1; i<order-1; i++){
buf_val *= op_vecs[i+1][inds[i]];
}
for (int64_t i=idx; i<idx+fiber_nnz; i++){
int64_t kk = (tsr_data[i].k%lens[0])/phys_phase[0];
out_vec[kk] += tsr_data[i].d*buf_val;
}
} else {
int64_t ok = inds[out_mode-1];
dtype buf_val = op_vecs[1][inds[0]];
if (out_mode > 1)
buf_val = op_vecs[1][inds[0]];
else if (order > 2)
buf_val = op_vecs[2][inds[1]];
else
buf_val = this->tmulid;
for (int i=1+(out_mode==1); i<order-1; i++){
if (out_mode != i+1)
buf_val *= op_vecs[i+1][inds[i]];
}
dtype buf_val2 = this->taddid;
for (int64_t i=idx; i<idx+fiber_nnz; i++){
int64_t kk = (tsr_data[i].k%lens[0])/phys_phase[0];
buf_val2 += tsr_data[i].d*op_vecs[0][kk];
}
out_vec[ok] += buf_val*buf_val2;
}
idx += fiber_nnz;
}
free(inds);
}
};
/**
* @}
*/
}
namespace CTF {
// TODO: add these with manual loop
// template <>
// bool CTF::Semiring<float,1>::is_last_col_zero(int64_t m, int64_t n, float const * M) const;
template <>
bool CTF::Semiring<double,1>::is_last_col_zero(int64_t m, int64_t n, double const * M) const;
// template <>
// bool CTF::Semiring<std::complex<float>,0>::is_last_col_zero(int64_t m, int64_t n, std::complex<float> const * M) const;
// template <>
// void CTF::Semiring<std::complex<double>,0>::is_last_col_zero(int64_t m, int64_t n, std::complex<double> const * M) const;
template <>
void CTF::Semiring<float,1>::default_csrmm(int,int,int,float,float const *,int const *,int const *,int,float const *,float,float *) const;
template <>
void CTF::Semiring<double,1>::default_csrmm(int,int,int,double,double const *,int const *,int const *,int,double const *,double,double *) const;
template <>
void CTF::Semiring<std::complex<float>,0>::default_csrmm(int,int,int,std::complex<float>,std::complex<float> const *,int const *,int const *,int,std::complex<float> const *,std::complex<float>,std::complex<float> *) const;
template <>
void CTF::Semiring<std::complex<double>,0>::default_csrmm(int,int,int,std::complex<double>,std::complex<double> const *,int const *,int const *,int,std::complex<double> const *,std::complex<double>,std::complex<double> *) const;
template <>
void CTF::Semiring<float,1>::default_csrmultd(int,int,int,float,float const *,int const *,int const *,int,float const *,int const *,int const *,int,float,float *) const;
template <>
void CTF::Semiring<double,1>::default_csrmultd(int,int,int,double,double const *,int const *,int const *,int,double const *,int const *,int const *,int,double,double *) const;
template <>
void CTF::Semiring<std::complex<float>,0>::default_csrmultd(int,int,int,std::complex<float>,std::complex<float> const *,int const *,int const *,int,std::complex<float> const *,int const *,int const *,int,std::complex<float>,std::complex<float> *) const;
template <>
void CTF::Semiring<std::complex<double>,0>::default_csrmultd(int,int,int,std::complex<double>,std::complex<double> const *,int const *,int const *,int,std::complex<double> const *,int const *,int const *,int,std::complex<double>,std::complex<double> *) const;
template <>
void CTF::Semiring<float,1>::default_csrmultcsr(int,int,int,float,float const *,int const *,int const *,int,float const *,int const *,int const *,int,float,char *&) const;
template <>
void CTF::Semiring<double,1>::default_csrmultcsr(int,int,int,double,double const *,int const *,int const *,int,double const *,int const *,int const *,int,double,char *&) const;
template <>
void CTF::Semiring<std::complex<float>,0>::default_csrmultcsr(int,int,int,std::complex<float>,std::complex<float> const *,int const *,int const *,int,std::complex<float> const *,int const *,int const *,int,std::complex<float>,char *&) const;
template <>
void CTF::Semiring<std::complex<double>,0>::default_csrmultcsr(int,int,int,std::complex<double>,std::complex<double> const *,int const *,int const *,int,std::complex<double> const *,int const *,int const *,int,std::complex<double>,char *&) const;
template<>
bool CTF::Semiring<double,1>::is_offloadable() const;
template<>
bool CTF::Semiring<float,1>::is_offloadable() const;
template<>
bool CTF::Semiring<std::complex<float>,0>::is_offloadable() const;
template<>
bool CTF::Semiring<std::complex<double>,0>::is_offloadable() const;
template<>
void CTF::Semiring<double,1>::offload_gemm(char,char,int,int,int,char const *,char const *,char const *,char const *,char *) const;
template<>
void CTF::Semiring<double,1>::offload_gemm(char,char,int,int,int,char const *,char const *,char const *,char const *,char *) const;
template<>
void CTF::Semiring<std::complex<float>,0>::offload_gemm(char,char,int,int,int,char const *,char const *,char const *,char const *,char *) const;
template<>
void CTF::Semiring<std::complex<double>,0>::offload_gemm(char,char,int,int,int,char const *,char const *,char const *,char const *,char *) const;
}
#include "ring.h"
#endif
|
detector.c | #include "darknet.h"
#include <stdio.h>
#ifdef WIN32
#include "unistd\dirent.h"
#else
#include <dirent.h>
#endif
#ifdef WIN32
#include "unistd\unistd.h"
#else
#include <unistd.h>
#endif
#include <sys/stat.h>
#define class temp
struct stat st;
static int coco_ids[] = {1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,67,70,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,90};
void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear)
{
list *options = read_data_cfg(datacfg);
char *train_images = option_find_str(options, "train", "data/train.list");
char *backup_directory = option_find_str(options, "backup", "/backup/");
srand(time(0));
char *base = basecfg(cfgfile);
printf("%s\n", base);
float avg_loss = -1;
network **nets = (network**)calloc(ngpus, sizeof(network*));
srand(time(0));
int seed = rand();
int i;
for(i = 0; i < ngpus; ++i){
srand(seed);
#ifdef GPU
if(gpu_index >= 0){
opencl_set_device(i);
}
#endif
nets[i] = load_network(cfgfile, weightfile, clear);
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
network *net = nets[0];
int imgs = net->batch * net->subdivisions * ngpus;
#ifndef BENCHMARK
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
#endif
data train, buffer;
layer l = net->layers[net->n - 1];
int classes = l.classes;
float jitter = l.jitter;
list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **)list_to_array(plist);
load_args args = get_base_args(net);
args.coords = l.coords;
args.paths = paths;
args.n = imgs;
args.m = plist->size;
args.classes = classes;
args.jitter = jitter;
args.num_boxes = l.max_boxes;
args.d = &buffer;
args.type = DETECTION_DATA;
//args.type = INSTANCE_DATA;
args.threads = 64;
pthread_t load_thread = load_data(args);
#ifdef LOSS_ONLY
double time=what_time_is_it_now();
#else
double time;
#endif
int count = 0;
if(count == 0) {
#ifdef GPU
if (gpu_index >= 0) {
if (ngpus != 1) sync_nets(nets, ngpus, 0);
}
#endif
char buff[256];
sprintf(buff, "%s/%s.start.conv.weights", backup_directory, base);
save_weights(net, buff);
}
int max_size = ((net->w + net->h)/2);
//while(i*imgs < N*120){
while(get_current_batch(net) < net->max_batches){
if(l.random && count++%10 == 0){
#if !defined(BENCHMARK) && !defined(LOSS_ONLY)
printf("Resizing\n");
#endif
int dim = max_size - ((rand() % 8) * 32);
#ifdef BENCHMARK
dim = 608;
#endif
if (get_current_batch(net)+200 > net->max_batches) dim = max_size;
if (net->w < dim || net->h < dim) dim = max_size;
#if !defined(BENCHMARK) && !defined(LOSS_ONLY)
printf("%d\n", dim);
#endif
args.w = dim;
args.h = dim;
pthread_join(load_thread, 0);
train = buffer;
free_data(train);
load_thread = load_data(args);
#pragma omp parallel for
for(i = 0; i < ngpus; ++i){
resize_network(nets[i], dim, dim);
}
net = nets[0];
}
#ifndef LOSS_ONLY
time=what_time_is_it_now();
#endif
pthread_join(load_thread, 0);
train = buffer;
load_thread = load_data(args);
/*
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[10] + 1 + k*5);
if(!b.x) break;
printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h);
}
*/
/*
int zz;
for(zz = 0; zz < train.X.cols; ++zz){
image im = float_to_image(net->w, net->h, 3, train.X.vals[zz]);
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[zz] + k*5, 1);
printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
draw_bbox(im, b, 1, 1,0,0);
}
show_image(im, "truth11");
cvWaitKey(0);
save_image(im, "truth11");
}
*/
#ifndef LOSS_ONLY
printf("Loaded: %lf seconds\n", what_time_is_it_now()-time);
#endif
#ifndef LOSS_ONLY
time=what_time_is_it_now();
#endif
float loss = 0;
#ifdef GPU
if (gpu_index >= 0) {
if (ngpus == 1) {
loss = train_network(net, train);
} else {
loss = train_networks(nets, ngpus, train, 4);
}
}
else {
loss = train_network(net, train);
}
#else
loss = train_network(net, train);
#endif
if (avg_loss < 0) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
i = get_current_batch(net);
#ifdef LOSS_ONLY
printf("%lf\t%f\n", what_time_is_it_now()-time, loss);
#else
printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, i*imgs);
#endif
#ifdef GPU
if (loss != loss && gpu_index >= 0) {
opencl_deinit(gpusg, ngpusg);
}
#endif
if(loss != loss) { printf("NaN LOSS detected! No possible to continue!\n"); exit(-7); }
if(i%100==0){
#ifdef GPU
if (gpu_index >= 0) {
if (ngpus != 1) sync_nets(nets, ngpus, 0);
}
#endif
char buff[256];
sprintf(buff, "%s/%s.backup", backup_directory, base);
save_weights(net, buff);
}
if(i%10000==0 || (i < 1000 && i%100 == 0)){
#ifdef GPU
if (gpu_index >= 0) {
if (ngpus != 1) sync_nets(nets, ngpus, 0);
}
#endif
char buff[256];
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
save_weights(net, buff);
}
free_data(train);
#ifdef GPU_STATS
opencl_dump_mem_stat();
#endif
#ifdef BENCHMARK
break;
#endif
}
#ifdef GPU
if (gpu_index >= 0) {
if (ngpus != 1) sync_nets(nets, ngpus, 0);
}
#endif
char buff[256];
sprintf(buff, "%s/%s_final.weights", backup_directory, base);
save_weights(net, buff);
free(paths);
free(plist);
free(base);
free(nets);
free(options);
}
static int get_coco_image_id(char *filename)
{
char *p = strrchr(filename, '/');
char *c = strrchr(filename, '_');
if(c) p = c;
return atoi(p+1);
}
static void print_cocos(FILE *fp, char *image_path, detection *dets, int num_boxes, int classes, int w, int h)
{
int i, j;
int image_id = get_coco_image_id(image_path);
for(i = 0; i < num_boxes; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
float bx = xmin;
float by = ymin;
float bw = xmax - xmin;
float bh = ymax - ymin;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fp, "{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f},\n", image_id, coco_ids[j], bx, by, bw, bh, dets[i].prob[j]);
}
}
}
void print_detector_detections(FILE **fps, char *id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2. + 1;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2. + 1;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2. + 1;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2. + 1;
if (xmin < 1) xmin = 1;
if (ymin < 1) ymin = 1;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fps[j], "%s %f %f %f %f %f\n", id, dets[i].prob[j],
xmin, ymin, xmax, ymax);
}
}
}
void print_imagenet_detections(FILE *fp, int id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
int class = j;
if (dets[i].prob[class]) fprintf(fp, "%d %d %f %f %f %f %f\n", id, j+1, dets[i].prob[class],
xmin, ymin, xmax, ymax);
}
}
}
void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 2);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = (FILE**)calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = (image*)calloc(nthreads, sizeof(image));
image *val_resized = (image*)calloc(nthreads, sizeof(image));
image *buf = (image*)calloc(nthreads, sizeof(image));
image *buf_resized = (image*)calloc(nthreads, sizeof(image));
pthread_t *thr = (pthread_t*)calloc(nthreads, sizeof(pthread_t));
image input = make_image(net->w, net->h, net->c*2);
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data, 1);
flip_image(val_resized[t]);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data + net->w*net->h*net->c, 1);
network_predict(net, input.data);
int w = val[t].w;
int h = val[t].h;
int num = 0;
int nboxes = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &num);
if (nms) {
if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms);
else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms);
}
if (coco){
print_cocos(fp, path, dets, num, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, num, classes, w, h);
} else {
print_detector_detections(fps, id, dets, num, classes, w, h);
}
free_detections(dets, num);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = (FILE**)calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = (image*)calloc(nthreads, sizeof(image));
image *val_resized = (image*)calloc(nthreads, sizeof(image));
image *buf = (image*)calloc(nthreads, sizeof(image));
image *buf_resized = (image*)calloc(nthreads, sizeof(image));
pthread_t *thr = (pthread_t*)calloc(nthreads, sizeof(pthread_t));
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
float *X = val_resized[t].data;
network_predict(net, X);
int w = val[t].w;
int h = val[t].h;
int nboxes = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &nboxes);
if (nms) {
if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms);
else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms);
}
if (coco){
print_cocos(fp, path, dets, nboxes, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, nboxes, classes, w, h);
} else {
print_detector_detections(fps, id, dets, nboxes, classes, w, h);
}
free_detections(dets, nboxes);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector_recall(char *datacfg, char *cfgfile, char *weightfile)
{
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
//list *plist = get_paths("data/coco_val_5k.list");
list *options = read_data_cfg(datacfg);
char *test_images = option_find_str(options, "test", "data/test.list");
list *plist = get_paths(test_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int j, k;
int m = plist->size;
int i=0;
float thresh = .001;
float iou_thresh = .5;
float nms = .4;
int total = 0;
int correct = 0;
int proposals = 0;
float avg_iou = 0;
for(i = 0; i < m; ++i){
char *path = paths[i];
image orig = load_image_color(path, 0, 0);
image sized = resize_image(orig, net->w, net->h);
char *id = basecfg(path);
network_predict(net, sized.data);
int nboxes = 0;
detection *dets = get_network_boxes(net, sized.w, sized.h, thresh, .5, 0, 1, &nboxes);
if (nms) {
if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms);
else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms);
}
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int num_labels = 0;
box_label *truth = read_boxes(labelpath, &num_labels);
for(k = 0; k < nboxes; ++k){
if(dets[k].objectness > thresh){
++proposals;
}
}
for (j = 0; j < num_labels; ++j) {
++total;
box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h};
float best_iou = 0;
for(k = 0; k < l.w*l.h*l.n; ++k){
float iou = box_iou(dets[k].bbox, t);
if(dets[k].objectness > thresh && iou > best_iou){
best_iou = iou;
}
}
avg_iou += best_iou;
if(best_iou > iou_thresh){
++correct;
}
}
fprintf(stderr, "%5d %5d %5d\tRPs/Img: %.2f\tIOU: %.2f%%\tRecall:%.2f%%\n", i, correct, total, (float)proposals/(i+1), avg_iou*100/total, 100.*correct/total);
free(id);
free_image(orig);
free_image(sized);
}
}
void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen)
{
list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
image **alphabet = load_alphabet();
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
double time;
char buff[256];
char *input = buff;
float nms=.45;
while(1){
if(filename){
strncpy(input, filename, 256);
} else {
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if(!input) return;
strtok(input, "\n");
}
image im = load_image_color(input,0,0);
int resize = im.w != net->w || im.h != net->h;
image sized = resize ? letterbox_image(im, net->w, net->h) : im;
//image sized = resize_image(im, net->w, net->h);
//image sized2 = resize_max(im, net->w);
//image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h);
//resize_network(net, sized.w, sized.h);
layer l = net->layers[net->n-1];
float *X = sized.data;
time=what_time_is_it_now();
if (l.type == DETECTION || l.type == REGION || l.type == YOLO) {
network_predict(net, X);
}
if (l.type == YOLO4) {
network_predict_y4(net, X);
}
printf("%s: Predicted in %f seconds.\n", input, ((double)(clock() - time)) / CLOCKS_PER_SEC);
int nboxes = 0;
detection *dets = 0;
if (l.type == DETECTION || l.type == REGION || l.type == YOLO) {
dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
}
if (l.type == YOLO4) {
dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
}
//printf("%d\n", nboxes);
if (nms) {
if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms);
else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms);
}
if (l.type == DETECTION || l.type == REGION || l.type == YOLO) {
draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes, 0);
}
if (l.type == YOLO4) {
draw_detections_v3(im, dets, nboxes, thresh, names, alphabet, l.classes, 0);
}
free_detections(dets, nboxes);
if(outfile){
save_image(im, outfile);
}
else{
save_image(im, "predictions");
#ifdef OPENCV
show_image(im, "predictions", 0);
#endif
}
free_image(im);
if (resize) free_image(sized);
if (filename) break;
}
}
int exists(const char *fname, const char* ext)
{
FILE *file;
if (strstr(fname, ext) && (file = fopen(fname, "r")))
{
fclose(file);
return 1;
}
return 0;
}
int empty(char *dirname) {
int n = 0;
struct dirent *d;
DIR *dir = opendir(dirname);
if (dir == NULL) // not a dir or doesn't exist
return 1;
while ((d = readdir(dir)) != NULL) {
if(++n > 2)
break;
}
closedir(dir);
if (n <= 2) //dir empty
return 1;
else
return 0;
}
void test_ddetector(char *datacfg, char *cfgfile, char *weightfile, char *in_dir, float thresh, float hier_thresh, char *out_dir)
{
list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
image **alphabet = load_alphabet();
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
double time;
char buff[256];
char *input = buff;
float nms=.45;
char fname[256];
char ffname[1024];
char ffoname[1024];
struct dirent *de = NULL;
while(1) {
while (empty(in_dir)) {
usleep(100);
}
DIR *dr = opendir(in_dir);
while ((de = readdir(dr)) != NULL) {
printf("%s\n", de->d_name);
strcpy(fname, de->d_name);
strcpy(ffname, in_dir);
strcat(ffname, "/");
strcat(ffname, fname);
if (!exists(ffname, ".jpg")) continue;
if (1) {
strcpy(ffoname, out_dir);
strcat(ffoname, "/");
strcat(ffoname, fname);
int len = strlen(ffoname) - 4;
ffoname[len] = '\0';
strncpy(input, ffname, 256);
} else {
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if (!input) continue;
strtok(input, "\n");
}
off_t size = 0;
off_t offs = 0;
do {
offs = size;
stat(input, &st);
size = st.st_size;
if (offs != size) usleep(10); else break;
} while (1);
image im = load_image_color(input, 0, 0);
int resize = im.w != net->w || im.h != net->h;
image sized = resize ? letterbox_image(im, net->w, net->h) : im;
//image sized = resize_image(im, net->w, net->h);
//image sized2 = resize_max(im, net->w);
//image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h);
//resize_network(net, sized.w, sized.h);
layer l = net->layers[net->n - 1];
float *X = sized.data;
time = what_time_is_it_now();
network_predict(net, X);
printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now() - time);
int nboxes = 0;
detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
//printf("%d\n", nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) {
if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms);
else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms);
}
draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes, 0);
free_detections(dets, nboxes);
free_image(im);
if (resize) free_image(sized);
// if (filename) break;
remove(input);
}
closedir(dr);
}
}
/*
void censor_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
float nms = .45;
while(1){
image in = get_image_from_stream_cv(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
float *X = in_s.data;
network_predict(net, X);
int nboxes = 0;
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 0, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) {
if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms);
else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms);
}
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int left = b.x-b.w/2.;
int top = b.y-b.h/2.;
censor_image(in, left, top, b.w, b.h);
}
}
show_image(in, base);
cvWaitKey(10);
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream_cv(cap);
free_image(in);
}
}
#endif
}
void extract_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
int count = 0;
float nms = .45;
while(1){
image in = get_image_from_stream_cv(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
show_image(in, base);
int nboxes = 0;
float *X = in_s.data;
network_predict(net, X);
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 1, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) {
if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms);
else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms);
}
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int size = b.w*in.w > b.h*in.h ? b.w*in.w : b.h*in.h;
int dx = b.x*in.w-size/2.;
int dy = b.y*in.h-size/2.;
image bim = crop_image(in, dx, dy, size, size);
char buff[2048];
sprintf(buff, "results/extract/%07d", count);
++count;
save_image(bim, buff);
free_image(bim);
}
}
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream_cv(cap);
free_image(in);
}
}
#endif
}
*/
/*
void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets)
{
network_predict_image(net, im);
layer l = net->layers[net->n-1];
int nboxes = num_boxes(net);
fill_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 0, dets);
if (nms) {
if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms);
else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms);
}
}
*/
void run_detector(int argc, char **argv)
{
char *prefix = find_char_arg(argc, argv, "-prefix", 0);
float thresh = find_float_arg(argc, argv, "-thresh", .5);
float hier_thresh = find_float_arg(argc, argv, "-hier", .5);
int cam_index = find_int_arg(argc, argv, "-c", 0);
int frame_skip = find_int_arg(argc, argv, "-s", 0);
int avg = find_int_arg(argc, argv, "-avg", 3);
if(argc < 4){
fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
return;
}
char *gpu_list = find_char_arg(argc, argv, "-gpus", 0);
char *outfile = find_char_arg(argc, argv, "-out", 0);
int *gpus = 0;
int gpu = 0;
int ngpus = 0;
if(gpu_list){
printf("%s\n", gpu_list);
int len = strlen(gpu_list);
ngpus = 1;
int i;
for(i = 0; i < len; ++i){
if (gpu_list[i] == ',') ++ngpus;
}
gpus = (int*)calloc(ngpus, sizeof(int));
for(i = 0; i < ngpus; ++i){
gpus[i] = atoi(gpu_list);
gpu_list = strchr(gpu_list, ',')+1;
}
} else {
gpu = gpu_index;
gpus = &gpu;
ngpus = 1;
}
int clear = find_arg(argc, argv, "-clear");
int fullscreen = find_arg(argc, argv, "-fullscreen");
int width = find_int_arg(argc, argv, "-w", 0);
int height = find_int_arg(argc, argv, "-h", 0);
int fps = find_int_arg(argc, argv, "-fps", 0);
//int class = find_int_arg(argc, argv, "-class", 0);
char *datacfg = argv[3];
char *cfg = argv[4];
char *weights = (argc > 5) ? argv[5] : 0;
char *filename = (argc > 6) ? argv[6]: 0;
if(0==strcmp(argv[2], "test")) test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh, outfile, fullscreen);
else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear);
else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "valid2")) validate_detector_flip(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "recall")) validate_detector_recall(datacfg, cfg, weights);
else if(0==strcmp(argv[2], "demo")) {
list *options = read_data_cfg(datacfg);
int classes = option_find_int(options, "classes", 20);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
demo(cfg, weights, thresh, cam_index, filename, names, classes, frame_skip, prefix, avg, hier_thresh, width, height, fps, fullscreen);
}
//else if(0==strcmp(argv[2], "extract")) extract_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
//else if(0==strcmp(argv[2], "censor")) censor_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
}
#undef class |
laplace2d-03.c | #include <math.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#define NN 4096
#define NM 4096
//__attribute__ ((target(mic)))
double A[NN][NM];
//__attribute__ ((target(mic)))
double Anew[NN][NM];
int main(int argc, char** argv) {
//int nThreads = atoi(argv[1]);
//omp_set_num_threads(nThreads);
const int n = NN;
const int m = NM;
const int iter_max = 200;
const double tol = 1.0e-6;
double error = 1.0;
memset(A, 0, n * m * sizeof(double));
memset(Anew, 0, n * m * sizeof(double));
for (int j = 0; j < n; j++) {
A[j][0] = 1.0;
Anew[j][0] = 1.0;
}
printf("Jacobi relaxation Calculation: %d x %d mesh\n", n, m);
double st = omp_get_wtime();
int iter = 0;
#pragma omp target data map(alloc:Anew) map(A)
while ( error > tol && iter < iter_max ) {
error = 0.0;
#pragma omp target teams distribute parallel for reduction(max:error)
for( int j = 1; j < n-1; j++) {
for( int i = 1; i < m-1; i++ ) {
Anew[j][i] = 0.25 * (A[j][i+1] + A[j][i-1] + A[j-1][i] + A[j+1][i]);
error = fmax(error, fabs(Anew[j][i] - A[j][i]));
}
}
#pragma omp target teams distribute parallel for
for( int j = 1; j < n-1; j++) {
for( int i = 1; i < m-1; i++ ) {
A[j][i] = Anew[j][i];
}
}
if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error);
iter++;
}
double et = omp_get_wtime();
printf(" total: %f s\n", (et - st));
return 0;
}
|
GB_unaryop__ainv_int8_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int8_bool
// op(A') function: GB_tran__ainv_int8_bool
// C type: int8_t
// A type: bool
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
int8_t z = (int8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT8 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int8_bool
(
int8_t *Cx, // Cx and Ax may be aliased
bool *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int8_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
munit.c | /* Copyright (c) 2013-2018 Evan Nemerson <evan@nemerson.com>
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*** Configuration ***/
/* This is just where the output from the test goes. It's really just
* meant to let you choose stdout or stderr, but if anyone really want
* to direct it to a file let me know, it would be fairly easy to
* support. */
#if !defined(MUNIT_OUTPUT_FILE)
# define MUNIT_OUTPUT_FILE stdout
#endif
/* This is a bit more useful; it tells µnit how to format the seconds in
* timed tests. If your tests run for longer you might want to reduce
* it, and if your computer is really fast and your tests are tiny you
* can increase it. */
#if !defined(MUNIT_TEST_TIME_FORMAT)
# define MUNIT_TEST_TIME_FORMAT "0.8f"
#endif
/* If you have long test names you might want to consider bumping
* this. The result information takes 43 characters. */
#if !defined(MUNIT_TEST_NAME_LEN)
# define MUNIT_TEST_NAME_LEN 37
#endif
/* If you don't like the timing information, you can disable it by
* defining MUNIT_DISABLE_TIMING. */
#if !defined(MUNIT_DISABLE_TIMING)
# define MUNIT_ENABLE_TIMING
#endif
/*** End configuration ***/
#if defined(_POSIX_C_SOURCE) && (_POSIX_C_SOURCE < 200809L)
# undef _POSIX_C_SOURCE
#endif
#if !defined(_POSIX_C_SOURCE)
# define _POSIX_C_SOURCE 200809L
#endif
/* Solaris freaks out if you try to use a POSIX or SUS standard without
* the "right" C standard. */
#if defined(_XOPEN_SOURCE)
# undef _XOPEN_SOURCE
#endif
#if defined(__STDC_VERSION__)
# if __STDC_VERSION__ >= 201112L
# define _XOPEN_SOURCE 700
# elif __STDC_VERSION__ >= 199901L
# define _XOPEN_SOURCE 600
# endif
#endif
/* Because, according to Microsoft, POSIX is deprecated. You've got
* to appreciate the chutzpah. */
#if defined(_MSC_VER) && !defined(_CRT_NONSTDC_NO_DEPRECATE)
# define _CRT_NONSTDC_NO_DEPRECATE
#endif
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
# include <stdbool.h>
#elif defined(_WIN32)
/* https://msdn.microsoft.com/en-us/library/tf4dy80a.aspx */
#endif
#include <limits.h>
#include <time.h>
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <setjmp.h>
#if !defined(MUNIT_NO_NL_LANGINFO) && !defined(_WIN32)
#define MUNIT_NL_LANGINFO
#include <locale.h>
#include <langinfo.h>
#include <strings.h>
#endif
#if !defined(_WIN32)
# include <unistd.h>
# include <sys/types.h>
# include <sys/wait.h>
#else
# include <windows.h>
# include <io.h>
# include <fcntl.h>
# if !defined(STDERR_FILENO)
# define STDERR_FILENO _fileno(stderr)
# endif
#endif
#include "munit.h"
#define MUNIT_STRINGIFY(x) #x
#define MUNIT_XSTRINGIFY(x) MUNIT_STRINGIFY(x)
#if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_CC) || defined(__IBMCPP__)
# define MUNIT_THREAD_LOCAL __thread
#elif (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201102L)) || defined(_Thread_local)
# define MUNIT_THREAD_LOCAL _Thread_local
#elif defined(_WIN32)
# define MUNIT_THREAD_LOCAL __declspec(thread)
#endif
/* MSVC 12.0 will emit a warning at /W4 for code like 'do { ... }
* while (0)', or 'do { ... } while (1)'. I'm pretty sure nobody
* at Microsoft compiles with /W4. */
#if defined(_MSC_VER) && (_MSC_VER <= 1800)
#pragma warning(disable: 4127)
#endif
#if defined(_WIN32) || defined(__EMSCRIPTEN__)
# define MUNIT_NO_FORK
#endif
#if defined(__EMSCRIPTEN__)
# define MUNIT_NO_BUFFER
#endif
/*** Logging ***/
static MunitLogLevel munit_log_level_visible = MUNIT_LOG_INFO;
static MunitLogLevel munit_log_level_fatal = MUNIT_LOG_ERROR;
#if defined(MUNIT_THREAD_LOCAL)
static MUNIT_THREAD_LOCAL munit_bool munit_error_jmp_buf_valid = 0;
static MUNIT_THREAD_LOCAL jmp_buf munit_error_jmp_buf;
#endif
/* At certain warning levels, mingw will trigger warnings about
* suggesting the format attribute, which we've explicity *not* set
* because it will then choke on our attempts to use the MS-specific
* I64 modifier for size_t (which we have to use since MSVC doesn't
* support the C99 z modifier). */
#if defined(__MINGW32__) || defined(__MINGW64__)
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wsuggest-attribute=format"
#endif
MUNIT_PRINTF(5,0)
static void
munit_logf_exv(MunitLogLevel level, FILE* fp, const char* filename, int line, const char* format, va_list ap) {
if (level < munit_log_level_visible)
return;
switch (level) {
case MUNIT_LOG_DEBUG:
fputs("Debug", fp);
break;
case MUNIT_LOG_INFO:
fputs("Info", fp);
break;
case MUNIT_LOG_WARNING:
fputs("Warning", fp);
break;
case MUNIT_LOG_ERROR:
fputs("Error", fp);
break;
default:
munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Invalid log level (%d)", level);
return;
}
fputs(": ", fp);
if (filename != NULL)
fprintf(fp, "%s:%d: ", filename, line);
vfprintf(fp, format, ap);
fputc('\n', fp);
}
MUNIT_PRINTF(3,4)
static void
munit_logf_internal(MunitLogLevel level, FILE* fp, const char* format, ...) {
va_list ap;
va_start(ap, format);
munit_logf_exv(level, fp, NULL, 0, format, ap);
va_end(ap);
}
static void
munit_log_internal(MunitLogLevel level, FILE* fp, const char* message) {
munit_logf_internal(level, fp, "%s", message);
}
void
munit_logf_ex(MunitLogLevel level, const char* filename, int line, const char* format, ...) {
va_list ap;
va_start(ap, format);
munit_logf_exv(level, stderr, filename, line, format, ap);
va_end(ap);
if (level >= munit_log_level_fatal) {
#if defined(MUNIT_THREAD_LOCAL)
if (munit_error_jmp_buf_valid)
longjmp(munit_error_jmp_buf, 1);
#endif
abort();
}
}
void
munit_errorf_ex(const char* filename, int line, const char* format, ...) {
va_list ap;
va_start(ap, format);
munit_logf_exv(MUNIT_LOG_ERROR, stderr, filename, line, format, ap);
va_end(ap);
#if defined(MUNIT_THREAD_LOCAL)
if (munit_error_jmp_buf_valid)
longjmp(munit_error_jmp_buf, 1);
#endif
abort();
}
#if defined(__MINGW32__) || defined(__MINGW64__)
#pragma GCC diagnostic pop
#endif
#if !defined(MUNIT_STRERROR_LEN)
# define MUNIT_STRERROR_LEN 80
#endif
static void
munit_log_errno(MunitLogLevel level, FILE* fp, const char* msg) {
#if defined(MUNIT_NO_STRERROR_R) || (defined(__MINGW32__) && !defined(MINGW_HAS_SECURE_API))
munit_logf_internal(level, fp, "%s: %s (%d)", msg, strerror(errno), errno);
#else
char munit_error_str[MUNIT_STRERROR_LEN];
munit_error_str[0] = '\0';
#if !defined(_WIN32)
strerror_r(errno, munit_error_str, MUNIT_STRERROR_LEN);
#else
strerror_s(munit_error_str, MUNIT_STRERROR_LEN, errno);
#endif
munit_logf_internal(level, fp, "%s: %s (%d)", msg, munit_error_str, errno);
#endif
}
/*** Memory allocation ***/
void*
munit_malloc_ex(const char* filename, int line, size_t size) {
void* ptr;
if (size == 0)
return NULL;
ptr = calloc(1, size);
if (MUNIT_UNLIKELY(ptr == NULL)) {
munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Failed to allocate %" MUNIT_SIZE_MODIFIER "u bytes.", size);
}
return ptr;
}
/*** Timer code ***/
#if defined(MUNIT_ENABLE_TIMING)
#define psnip_uint64_t munit_uint64_t
#define psnip_uint32_t munit_uint32_t
/* Code copied from portable-snippets
* <https://github.com/nemequ/portable-snippets/>. If you need to
* change something, please do it there so we can keep the code in
* sync. */
/* Clocks (v1)
* Portable Snippets - https://gitub.com/nemequ/portable-snippets
* Created by Evan Nemerson <evan@nemerson.com>
*
* To the extent possible under law, the authors have waived all
* copyright and related or neighboring rights to this code. For
* details, see the Creative Commons Zero 1.0 Universal license at
* https://creativecommons.org/publicdomain/zero/1.0/
*/
#if !defined(PSNIP_CLOCK_H)
#define PSNIP_CLOCK_H
#if !defined(psnip_uint64_t)
# include "../exact-int/exact-int.h"
#endif
#if !defined(PSNIP_CLOCK_STATIC_INLINE)
# if defined(__GNUC__)
# define PSNIP_CLOCK__COMPILER_ATTRIBUTES __attribute__((__unused__))
# else
# define PSNIP_CLOCK__COMPILER_ATTRIBUTES
# endif
# define PSNIP_CLOCK__FUNCTION PSNIP_CLOCK__COMPILER_ATTRIBUTES static
#endif
enum PsnipClockType {
/* This clock provides the current time, in units since 1970-01-01
* 00:00:00 UTC not including leap seconds. In other words, UNIX
* time. Keep in mind that this clock doesn't account for leap
* seconds, and can go backwards (think NTP adjustments). */
PSNIP_CLOCK_TYPE_WALL = 1,
/* The CPU time is a clock which increases only when the current
* process is active (i.e., it doesn't increment while blocking on
* I/O). */
PSNIP_CLOCK_TYPE_CPU = 2,
/* Monotonic time is always running (unlike CPU time), but it only
ever moves forward unless you reboot the system. Things like NTP
adjustments have no effect on this clock. */
PSNIP_CLOCK_TYPE_MONOTONIC = 3
};
struct PsnipClockTimespec {
psnip_uint64_t seconds;
psnip_uint64_t nanoseconds;
};
/* Methods we support: */
#define PSNIP_CLOCK_METHOD_CLOCK_GETTIME 1
#define PSNIP_CLOCK_METHOD_TIME 2
#define PSNIP_CLOCK_METHOD_GETTIMEOFDAY 3
#define PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER 4
#define PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME 5
#define PSNIP_CLOCK_METHOD_CLOCK 6
#define PSNIP_CLOCK_METHOD_GETPROCESSTIMES 7
#define PSNIP_CLOCK_METHOD_GETRUSAGE 8
#define PSNIP_CLOCK_METHOD_GETSYSTEMTIMEPRECISEASFILETIME 9
#define PSNIP_CLOCK_METHOD_GETTICKCOUNT64 10
#include <assert.h>
#if defined(HEDLEY_UNREACHABLE)
# define PSNIP_CLOCK_UNREACHABLE() HEDLEY_UNREACHABLE()
#else
# define PSNIP_CLOCK_UNREACHABLE() assert(0)
#endif
/* Choose an implementation */
/* #undef PSNIP_CLOCK_WALL_METHOD */
/* #undef PSNIP_CLOCK_CPU_METHOD */
/* #undef PSNIP_CLOCK_MONOTONIC_METHOD */
/* We want to be able to detect the libc implementation, so we include
<limits.h> (<features.h> isn't available everywhere). */
#if defined(__unix__) || defined(__unix) || defined(__linux__)
# include <limits.h>
# include <unistd.h>
#endif
#if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0)
/* These are known to work without librt. If you know of others
* please let us know so we can add them. */
# if \
(defined(__GLIBC__) && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 17))) || \
(defined(__FreeBSD__))
# define PSNIP_CLOCK_HAVE_CLOCK_GETTIME
# elif !defined(PSNIP_CLOCK_NO_LIBRT)
# define PSNIP_CLOCK_HAVE_CLOCK_GETTIME
# endif
#endif
#if defined(_WIN32)
# if !defined(PSNIP_CLOCK_CPU_METHOD)
# define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_GETPROCESSTIMES
# endif
# if !defined(PSNIP_CLOCK_MONOTONIC_METHOD)
# define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER
# endif
#endif
#if defined(__MACH__) && !defined(__gnu_hurd__)
# if !defined(PSNIP_CLOCK_MONOTONIC_METHOD)
# define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME
# endif
#endif
#if defined(PSNIP_CLOCK_HAVE_CLOCK_GETTIME)
# include <time.h>
# if !defined(PSNIP_CLOCK_WALL_METHOD)
# if defined(CLOCK_REALTIME_PRECISE)
# define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME_PRECISE
# elif !defined(__sun)
# define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME
# endif
# endif
# if !defined(PSNIP_CLOCK_CPU_METHOD)
# if defined(_POSIX_CPUTIME) || defined(CLOCK_PROCESS_CPUTIME_ID)
# define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_PROCESS_CPUTIME_ID
# elif defined(CLOCK_VIRTUAL)
# define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_VIRTUAL
# endif
# endif
# if !defined(PSNIP_CLOCK_MONOTONIC_METHOD)
# if defined(CLOCK_MONOTONIC_RAW)
# define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC
# elif defined(CLOCK_MONOTONIC_PRECISE)
# define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC_PRECISE
# elif defined(_POSIX_MONOTONIC_CLOCK) || defined(CLOCK_MONOTONIC)
# define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC
# endif
# endif
#endif
#if defined(_POSIX_VERSION) && (_POSIX_VERSION >= 200112L)
# if !defined(PSNIP_CLOCK_WALL_METHOD)
# define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_GETTIMEOFDAY
# endif
#endif
#if !defined(PSNIP_CLOCK_WALL_METHOD)
# define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_TIME
#endif
#if !defined(PSNIP_CLOCK_CPU_METHOD)
# define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK
#endif
/* Primarily here for testing. */
#if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) && defined(PSNIP_CLOCK_REQUIRE_MONOTONIC)
# error No monotonic clock found.
#endif
/* Implementations */
#if \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_TIME))
# include <time.h>
#endif
#if \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY))
# include <sys/time.h>
#endif
#if \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64))
# include <windows.h>
#endif
#if \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE))
# include <sys/time.h>
# include <sys/resource.h>
#endif
#if \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME))
# include <CoreServices/CoreServices.h>
# include <mach/mach.h>
# include <mach/mach_time.h>
#endif
/*** Implementations ***/
#define PSNIP_CLOCK_NSEC_PER_SEC ((psnip_uint32_t) (1000000000ULL))
#if \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME))
PSNIP_CLOCK__FUNCTION psnip_uint32_t
psnip_clock__clock_getres (clockid_t clk_id) {
struct timespec res;
int r;
r = clock_getres(clk_id, &res);
if (r != 0)
return 0;
return (psnip_uint32_t) (PSNIP_CLOCK_NSEC_PER_SEC / res.tv_nsec);
}
PSNIP_CLOCK__FUNCTION int
psnip_clock__clock_gettime (clockid_t clk_id, struct PsnipClockTimespec* res) {
struct timespec ts;
if (clock_gettime(clk_id, &ts) != 0)
return -10;
res->seconds = (psnip_uint64_t) (ts.tv_sec);
res->nanoseconds = (psnip_uint64_t) (ts.tv_nsec);
return 0;
}
#endif
PSNIP_CLOCK__FUNCTION psnip_uint32_t
psnip_clock_wall_get_precision (void) {
#if !defined(PSNIP_CLOCK_WALL_METHOD)
return 0;
#elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME
return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_WALL);
#elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY
return 1000000;
#elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME
return 1;
#else
return 0;
#endif
}
PSNIP_CLOCK__FUNCTION int
psnip_clock_wall_get_time (struct PsnipClockTimespec* res) {
(void) res;
#if !defined(PSNIP_CLOCK_WALL_METHOD)
return -2;
#elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME
return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_WALL, res);
#elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME
res->seconds = time(NULL);
res->nanoseconds = 0;
#elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY
struct timeval tv;
if (gettimeofday(&tv, NULL) != 0)
return -6;
res->seconds = tv.tv_sec;
res->nanoseconds = tv.tv_usec * 1000;
#else
return -2;
#endif
return 0;
}
PSNIP_CLOCK__FUNCTION psnip_uint32_t
psnip_clock_cpu_get_precision (void) {
#if !defined(PSNIP_CLOCK_CPU_METHOD)
return 0;
#elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME
return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_CPU);
#elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK
return CLOCKS_PER_SEC;
#elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES
return PSNIP_CLOCK_NSEC_PER_SEC / 100;
#else
return 0;
#endif
}
PSNIP_CLOCK__FUNCTION int
psnip_clock_cpu_get_time (struct PsnipClockTimespec* res) {
#if !defined(PSNIP_CLOCK_CPU_METHOD)
(void) res;
return -2;
#elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME
return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_CPU, res);
#elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK
clock_t t = clock();
if (t == ((clock_t) -1))
return -5;
res->seconds = t / CLOCKS_PER_SEC;
res->nanoseconds = (t % CLOCKS_PER_SEC) * (PSNIP_CLOCK_NSEC_PER_SEC / CLOCKS_PER_SEC);
#elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES
FILETIME CreationTime, ExitTime, KernelTime, UserTime;
LARGE_INTEGER date, adjust;
if (!GetProcessTimes(GetCurrentProcess(), &CreationTime, &ExitTime, &KernelTime, &UserTime))
return -7;
/* http://www.frenk.com/2009/12/convert-filetime-to-unix-timestamp/ */
date.HighPart = UserTime.dwHighDateTime;
date.LowPart = UserTime.dwLowDateTime;
adjust.QuadPart = 11644473600000 * 10000;
date.QuadPart -= adjust.QuadPart;
res->seconds = date.QuadPart / 10000000;
res->nanoseconds = (date.QuadPart % 10000000) * (PSNIP_CLOCK_NSEC_PER_SEC / 100);
#elif PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE
struct rusage usage;
if (getrusage(RUSAGE_SELF, &usage) != 0)
return -8;
res->seconds = usage.ru_utime.tv_sec;
res->nanoseconds = tv.tv_usec * 1000;
#else
(void) res;
return -2;
#endif
return 0;
}
PSNIP_CLOCK__FUNCTION psnip_uint32_t
psnip_clock_monotonic_get_precision (void) {
#if !defined(PSNIP_CLOCK_MONOTONIC_METHOD)
return 0;
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME
return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC);
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME
static mach_timebase_info_data_t tbi = { 0, };
if (tbi.denom == 0)
mach_timebase_info(&tbi);
return (psnip_uint32_t) (tbi.numer / tbi.denom);
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64
return 1000;
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER
LARGE_INTEGER Frequency;
QueryPerformanceFrequency(&Frequency);
return (psnip_uint32_t) ((Frequency.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC) ? PSNIP_CLOCK_NSEC_PER_SEC : Frequency.QuadPart);
#else
return 0;
#endif
}
PSNIP_CLOCK__FUNCTION int
psnip_clock_monotonic_get_time (struct PsnipClockTimespec* res) {
#if !defined(PSNIP_CLOCK_MONOTONIC_METHOD)
(void) res;
return -2;
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME
return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC, res);
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME
psnip_uint64_t nsec = mach_absolute_time();
static mach_timebase_info_data_t tbi = { 0, };
if (tbi.denom == 0)
mach_timebase_info(&tbi);
nsec *= ((psnip_uint64_t) tbi.numer) / ((psnip_uint64_t) tbi.denom);
res->seconds = nsec / PSNIP_CLOCK_NSEC_PER_SEC;
res->nanoseconds = nsec % PSNIP_CLOCK_NSEC_PER_SEC;
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER
LARGE_INTEGER t, f;
if (QueryPerformanceCounter(&t) == 0)
return -12;
QueryPerformanceFrequency(&f);
res->seconds = t.QuadPart / f.QuadPart;
res->nanoseconds = t.QuadPart % f.QuadPart;
if (f.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC)
res->nanoseconds /= f.QuadPart / PSNIP_CLOCK_NSEC_PER_SEC;
else
res->nanoseconds *= PSNIP_CLOCK_NSEC_PER_SEC / f.QuadPart;
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64
const ULONGLONG msec = GetTickCount64();
res->seconds = msec / 1000;
res->nanoseconds = sec % 1000;
#else
return -2;
#endif
return 0;
}
/* Returns the number of ticks per second for the specified clock.
* For example, a clock with millisecond precision would return 1000,
* and a clock with 1 second (such as the time() function) would
* return 1.
*
* If the requested clock isn't available, it will return 0.
* Hopefully this will be rare, but if it happens to you please let us
* know so we can work on finding a way to support your system.
*
* Note that different clocks on the same system often have a
* different precisions.
*/
PSNIP_CLOCK__FUNCTION psnip_uint32_t
psnip_clock_get_precision (enum PsnipClockType clock_type) {
switch (clock_type) {
case PSNIP_CLOCK_TYPE_MONOTONIC:
return psnip_clock_monotonic_get_precision ();
case PSNIP_CLOCK_TYPE_CPU:
return psnip_clock_cpu_get_precision ();
case PSNIP_CLOCK_TYPE_WALL:
return psnip_clock_wall_get_precision ();
}
PSNIP_CLOCK_UNREACHABLE();
return 0;
}
/* Set the provided timespec to the requested time. Returns 0 on
* success, or a negative value on failure. */
PSNIP_CLOCK__FUNCTION int
psnip_clock_get_time (enum PsnipClockType clock_type, struct PsnipClockTimespec* res) {
assert(res != NULL);
switch (clock_type) {
case PSNIP_CLOCK_TYPE_MONOTONIC:
return psnip_clock_monotonic_get_time (res);
case PSNIP_CLOCK_TYPE_CPU:
return psnip_clock_cpu_get_time (res);
case PSNIP_CLOCK_TYPE_WALL:
return psnip_clock_wall_get_time (res);
}
return -1;
}
#endif /* !defined(PSNIP_CLOCK_H) */
static psnip_uint64_t
munit_clock_get_elapsed(struct PsnipClockTimespec* start, struct PsnipClockTimespec* end) {
psnip_uint64_t r = (end->seconds - start->seconds) * PSNIP_CLOCK_NSEC_PER_SEC;
if (end->nanoseconds < start->nanoseconds) {
r -= (start->nanoseconds - end->nanoseconds);
} else {
r += (end->nanoseconds - start->nanoseconds);
}
return r;
}
#else
# include <time.h>
#endif /* defined(MUNIT_ENABLE_TIMING) */
/*** PRNG stuff ***/
/* This is (unless I screwed up, which is entirely possible) the
* version of PCG with 32-bit state. It was chosen because it has a
* small enough state that we should reliably be able to use CAS
* instead of requiring a lock for thread-safety.
*
* If I did screw up, I probably will not bother changing it unless
* there is a significant bias. It's really not important this be
* particularly strong, as long as it is fairly random it's much more
* important that it be reproducible, so bug reports have a better
* chance of being reproducible. */
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) && !defined(__EMSCRIPTEN__) && (!defined(__GNUC_MINOR__) || (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ > 8))
# define HAVE_STDATOMIC
#elif defined(__clang__)
# if __has_extension(c_atomic)
# define HAVE_CLANG_ATOMICS
# endif
#endif
/* Workaround for http://llvm.org/bugs/show_bug.cgi?id=26911 */
#if defined(__clang__) && defined(_WIN32)
# undef HAVE_STDATOMIC
# if defined(__c2__)
# undef HAVE_CLANG_ATOMICS
# endif
#endif
#if defined(_OPENMP)
# define ATOMIC_UINT32_T uint32_t
# define ATOMIC_UINT32_INIT(x) (x)
#elif defined(HAVE_STDATOMIC)
# include <stdatomic.h>
# define ATOMIC_UINT32_T _Atomic uint32_t
# define ATOMIC_UINT32_INIT(x) ATOMIC_VAR_INIT(x)
#elif defined(HAVE_CLANG_ATOMICS)
# define ATOMIC_UINT32_T _Atomic uint32_t
# define ATOMIC_UINT32_INIT(x) (x)
#elif defined(_WIN32)
# define ATOMIC_UINT32_T volatile LONG
# define ATOMIC_UINT32_INIT(x) (x)
#else
# define ATOMIC_UINT32_T volatile uint32_t
# define ATOMIC_UINT32_INIT(x) (x)
#endif
static ATOMIC_UINT32_T munit_rand_state = ATOMIC_UINT32_INIT(42);
#if defined(_OPENMP)
static inline void
munit_atomic_store(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T value) {
#pragma omp critical (munit_atomics)
*dest = value;
}
static inline uint32_t
munit_atomic_load(ATOMIC_UINT32_T* src) {
int ret;
#pragma omp critical (munit_atomics)
ret = *src;
return ret;
}
static inline uint32_t
munit_atomic_cas(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T* expected, ATOMIC_UINT32_T desired) {
munit_bool ret;
#pragma omp critical (munit_atomics)
{
if (*dest == *expected) {
*dest = desired;
ret = 1;
} else {
ret = 0;
}
}
return ret;
}
#elif defined(HAVE_STDATOMIC)
# define munit_atomic_store(dest, value) atomic_store(dest, value)
# define munit_atomic_load(src) atomic_load(src)
# define munit_atomic_cas(dest, expected, value) atomic_compare_exchange_weak(dest, expected, value)
#elif defined(HAVE_CLANG_ATOMICS)
# define munit_atomic_store(dest, value) __c11_atomic_store(dest, value, __ATOMIC_SEQ_CST)
# define munit_atomic_load(src) __c11_atomic_load(src, __ATOMIC_SEQ_CST)
# define munit_atomic_cas(dest, expected, value) __c11_atomic_compare_exchange_weak(dest, expected, value, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
#elif defined(__GNUC__) && (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)
# define munit_atomic_store(dest, value) __atomic_store_n(dest, value, __ATOMIC_SEQ_CST)
# define munit_atomic_load(src) __atomic_load_n(src, __ATOMIC_SEQ_CST)
# define munit_atomic_cas(dest, expected, value) __atomic_compare_exchange_n(dest, expected, value, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
#elif defined(__GNUC__) && (__GNUC__ >= 4)
# define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0)
# define munit_atomic_load(src) (*(src))
# define munit_atomic_cas(dest, expected, value) __sync_bool_compare_and_swap(dest, *expected, value)
#elif defined(_WIN32) /* Untested */
# define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0)
# define munit_atomic_load(src) (*(src))
# define munit_atomic_cas(dest, expected, value) InterlockedCompareExchange((dest), (value), *(expected))
#else
# warning No atomic implementation, PRNG will not be thread-safe
# define munit_atomic_store(dest, value) do { *(dest) = (value); } while (0)
# define munit_atomic_load(src) (*(src))
static inline munit_bool
munit_atomic_cas(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T* expected, ATOMIC_UINT32_T desired) {
if (*dest == *expected) {
*dest = desired;
return 1;
} else {
return 0;
}
}
#endif
#define MUNIT_PRNG_MULTIPLIER (747796405U)
#define MUNIT_PRNG_INCREMENT (1729U)
static munit_uint32_t
munit_rand_next_state(munit_uint32_t state) {
return state * MUNIT_PRNG_MULTIPLIER + MUNIT_PRNG_INCREMENT;
}
static munit_uint32_t
munit_rand_from_state(munit_uint32_t state) {
munit_uint32_t res = ((state >> ((state >> 28) + 4)) ^ state) * (277803737U);
res ^= res >> 22;
return res;
}
void
munit_rand_seed(munit_uint32_t seed) {
munit_uint32_t state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT);
munit_atomic_store(&munit_rand_state, state);
}
static munit_uint32_t
munit_rand_generate_seed(void) {
munit_uint32_t seed, state;
#if defined(MUNIT_ENABLE_TIMING)
struct PsnipClockTimespec wc = { 0, };
psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wc);
seed = (munit_uint32_t) wc.nanoseconds;
#else
seed = (munit_uint32_t) time(NULL);
#endif
state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT);
return munit_rand_from_state(state);
}
static munit_uint32_t
munit_rand_state_uint32(munit_uint32_t* state) {
const munit_uint32_t old = *state;
*state = munit_rand_next_state(old);
return munit_rand_from_state(old);
}
munit_uint32_t
munit_rand_uint32(void) {
munit_uint32_t old, state;
do {
old = munit_atomic_load(&munit_rand_state);
state = munit_rand_next_state(old);
} while (!munit_atomic_cas(&munit_rand_state, &old, state));
return munit_rand_from_state(old);
}
static void
munit_rand_state_memory(munit_uint32_t* state, size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) {
size_t members_remaining = size / sizeof(munit_uint32_t);
size_t bytes_remaining = size % sizeof(munit_uint32_t);
munit_uint8_t* b = data;
munit_uint32_t rv;
while (members_remaining-- > 0) {
rv = munit_rand_state_uint32(state);
memcpy(b, &rv, sizeof(munit_uint32_t));
b += sizeof(munit_uint32_t);
}
if (bytes_remaining != 0) {
rv = munit_rand_state_uint32(state);
memcpy(b, &rv, bytes_remaining);
}
}
void
munit_rand_memory(size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) {
munit_uint32_t old, state;
do {
state = old = munit_atomic_load(&munit_rand_state);
munit_rand_state_memory(&state, size, data);
} while (!munit_atomic_cas(&munit_rand_state, &old, state));
}
static munit_uint32_t
munit_rand_state_at_most(munit_uint32_t* state, munit_uint32_t salt, munit_uint32_t max) {
/* We want (UINT32_MAX + 1) % max, which in unsigned arithmetic is the same
* as (UINT32_MAX + 1 - max) % max = -max % max. We compute -max using not
* to avoid compiler warnings.
*/
const munit_uint32_t min = (~max + 1U) % max;
munit_uint32_t x;
if (max == (~((munit_uint32_t) 0U)))
return munit_rand_state_uint32(state) ^ salt;
max++;
do {
x = munit_rand_state_uint32(state) ^ salt;
} while (x < min);
return x % max;
}
static munit_uint32_t
munit_rand_at_most(munit_uint32_t salt, munit_uint32_t max) {
munit_uint32_t old, state;
munit_uint32_t retval;
do {
state = old = munit_atomic_load(&munit_rand_state);
retval = munit_rand_state_at_most(&state, salt, max);
} while (!munit_atomic_cas(&munit_rand_state, &old, state));
return retval;
}
int
munit_rand_int_range(int min, int max) {
munit_uint64_t range = (munit_uint64_t) max - (munit_uint64_t) min;
if ((munit_uint64_t) 0U == range)
return min;
if (min > max)
return munit_rand_int_range(max, min);
if (range > (~((munit_uint32_t) 0U)))
range = (~((munit_uint32_t) 0U));
return min + munit_rand_at_most(0, (munit_uint32_t) range);
}
double
munit_rand_double(void) {
munit_uint32_t old, state;
double retval = 0.0;
do {
state = old = munit_atomic_load(&munit_rand_state);
/* See http://mumble.net/~campbell/tmp/random_real.c for how to do
* this right. Patches welcome if you feel that this is too
* biased. */
retval = munit_rand_state_uint32(&state) / ((~((munit_uint32_t) 0U)) + 1.0);
} while (!munit_atomic_cas(&munit_rand_state, &old, state));
return retval;
}
/*** Test suite handling ***/
typedef struct {
unsigned int successful;
unsigned int skipped;
unsigned int failed;
unsigned int errored;
#if defined(MUNIT_ENABLE_TIMING)
munit_uint64_t cpu_clock;
munit_uint64_t wall_clock;
#endif
} MunitReport;
typedef struct {
const char* prefix;
const MunitSuite* suite;
const char** tests;
munit_uint32_t seed;
unsigned int iterations;
MunitParameter* parameters;
munit_bool single_parameter_mode;
void* user_data;
MunitReport report;
munit_bool colorize;
munit_bool fork;
munit_bool show_stderr;
munit_bool fatal_failures;
} MunitTestRunner;
const char*
munit_parameters_get(const MunitParameter params[], const char* key) {
const MunitParameter* param;
for (param = params ; param != NULL && param->name != NULL ; param++)
if (strcmp(param->name, key) == 0)
return param->value;
return NULL;
}
#if defined(MUNIT_ENABLE_TIMING)
static void
munit_print_time(FILE* fp, munit_uint64_t nanoseconds) {
fprintf(fp, "%" MUNIT_TEST_TIME_FORMAT, ((double) nanoseconds) / ((double) PSNIP_CLOCK_NSEC_PER_SEC));
}
#endif
/* Add a paramter to an array of parameters. */
static MunitResult
munit_parameters_add(size_t* params_size, MunitParameter* params[MUNIT_ARRAY_PARAM(*params_size)], char* name, char* value) {
*params = realloc(*params, sizeof(MunitParameter) * (*params_size + 2));
if (*params == NULL)
return MUNIT_ERROR;
(*params)[*params_size].name = name;
(*params)[*params_size].value = value;
(*params_size)++;
(*params)[*params_size].name = NULL;
(*params)[*params_size].value = NULL;
return MUNIT_OK;
}
/* Concatenate two strings, but just return one of the components
* unaltered if the other is NULL or "". */
static char*
munit_maybe_concat(size_t* len, char* prefix, char* suffix) {
char* res;
size_t res_l;
const size_t prefix_l = prefix != NULL ? strlen(prefix) : 0;
const size_t suffix_l = suffix != NULL ? strlen(suffix) : 0;
if (prefix_l == 0 && suffix_l == 0) {
res = NULL;
res_l = 0;
} else if (prefix_l == 0 && suffix_l != 0) {
res = suffix;
res_l = suffix_l;
} else if (prefix_l != 0 && suffix_l == 0) {
res = prefix;
res_l = prefix_l;
} else {
res_l = prefix_l + suffix_l;
res = malloc(res_l + 1);
memcpy(res, prefix, prefix_l);
memcpy(res + prefix_l, suffix, suffix_l);
res[res_l] = 0;
}
if (len != NULL)
*len = res_l;
return res;
}
/* Possbily free a string returned by munit_maybe_concat. */
static void
munit_maybe_free_concat(char* s, const char* prefix, const char* suffix) {
if (prefix != s && suffix != s)
free(s);
}
/* Cheap string hash function, just used to salt the PRNG. */
static munit_uint32_t
munit_str_hash(const char* name) {
const char *p;
munit_uint32_t h = 5381U;
for (p = name; *p != '\0'; p++)
h = (h << 5) + h + *p;
return h;
}
static void
munit_splice(int from, int to) {
munit_uint8_t buf[1024];
#if !defined(_WIN32)
ssize_t len;
ssize_t bytes_written;
ssize_t write_res;
#else
int len;
int bytes_written;
int write_res;
#endif
do {
len = read(from, buf, sizeof(buf));
if (len > 0) {
bytes_written = 0;
do {
write_res = write(to, buf + bytes_written, len - bytes_written);
if (write_res < 0)
break;
bytes_written += write_res;
} while (bytes_written < len);
}
else
break;
} while (1);
}
/* This is the part that should be handled in the child process */
static MunitResult
munit_test_runner_exec(MunitTestRunner* runner, const MunitTest* test, const MunitParameter params[], MunitReport* report) {
unsigned int iterations = runner->iterations;
MunitResult result = MUNIT_FAIL;
#if defined(MUNIT_ENABLE_TIMING)
struct PsnipClockTimespec wall_clock_begin = { 0, }, wall_clock_end = { 0, };
struct PsnipClockTimespec cpu_clock_begin = { 0, }, cpu_clock_end = { 0, };
#endif
unsigned int i = 0;
if ((test->options & MUNIT_TEST_OPTION_SINGLE_ITERATION) == MUNIT_TEST_OPTION_SINGLE_ITERATION)
iterations = 1;
else if (iterations == 0)
iterations = runner->suite->iterations;
munit_rand_seed(runner->seed);
do {
void* data = (test->setup == NULL) ? runner->user_data : test->setup(params, runner->user_data);
#if defined(MUNIT_ENABLE_TIMING)
psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_begin);
psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_begin);
#endif
result = test->test(params, data);
#if defined(MUNIT_ENABLE_TIMING)
psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_end);
psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_end);
#endif
if (test->tear_down != NULL)
test->tear_down(data);
if (MUNIT_LIKELY(result == MUNIT_OK)) {
report->successful++;
#if defined(MUNIT_ENABLE_TIMING)
report->wall_clock += munit_clock_get_elapsed(&wall_clock_begin, &wall_clock_end);
report->cpu_clock += munit_clock_get_elapsed(&cpu_clock_begin, &cpu_clock_end);
#endif
} else {
switch ((int) result) {
case MUNIT_SKIP:
report->skipped++;
break;
case MUNIT_FAIL:
report->failed++;
break;
case MUNIT_ERROR:
report->errored++;
break;
default:
break;
}
break;
}
} while (++i < iterations);
return result;
}
#if defined(MUNIT_EMOTICON)
# define MUNIT_RESULT_STRING_OK ":)"
# define MUNIT_RESULT_STRING_SKIP ":|"
# define MUNIT_RESULT_STRING_FAIL ":("
# define MUNIT_RESULT_STRING_ERROR ":o"
# define MUNIT_RESULT_STRING_TODO ":/"
#else
# define MUNIT_RESULT_STRING_OK "OK "
# define MUNIT_RESULT_STRING_SKIP "SKIP "
# define MUNIT_RESULT_STRING_FAIL "FAIL "
# define MUNIT_RESULT_STRING_ERROR "ERROR"
# define MUNIT_RESULT_STRING_TODO "TODO "
#endif
static void
munit_test_runner_print_color(const MunitTestRunner* runner, const char* string, char color) {
if (runner->colorize)
fprintf(MUNIT_OUTPUT_FILE, "\x1b[3%cm%s\x1b[39m", color, string);
else
fputs(string, MUNIT_OUTPUT_FILE);
}
#if !defined(MUNIT_NO_BUFFER)
static int
munit_replace_stderr(FILE* stderr_buf) {
if (stderr_buf != NULL) {
const int orig_stderr = dup(STDERR_FILENO);
int errfd = fileno(stderr_buf);
if (MUNIT_UNLIKELY(errfd == -1)) {
exit(EXIT_FAILURE);
}
dup2(errfd, STDERR_FILENO);
return orig_stderr;
}
return -1;
}
static void
munit_restore_stderr(int orig_stderr) {
if (orig_stderr != -1) {
dup2(orig_stderr, STDERR_FILENO);
close(orig_stderr);
}
}
#endif /* !defined(MUNIT_NO_BUFFER) */
/* Run a test with the specified parameters. */
static void
munit_test_runner_run_test_with_params(MunitTestRunner* runner, const MunitTest* test, const MunitParameter params[]) {
MunitResult result = MUNIT_OK;
MunitReport report = {
0, 0, 0, 0,
#if defined(MUNIT_ENABLE_TIMING)
0, 0
#endif
};
unsigned int output_l;
munit_bool first;
const MunitParameter* param;
FILE* stderr_buf;
#if !defined(MUNIT_NO_FORK)
int pipefd[2];
pid_t fork_pid;
int orig_stderr;
ssize_t bytes_written = 0;
ssize_t write_res;
ssize_t bytes_read = 0;
ssize_t read_res;
int status = 0;
pid_t changed_pid;
#endif
if (params != NULL) {
output_l = 2;
fputs(" ", MUNIT_OUTPUT_FILE);
first = 1;
for (param = params ; param != NULL && param->name != NULL ; param++) {
if (!first) {
fputs(", ", MUNIT_OUTPUT_FILE);
output_l += 2;
} else {
first = 0;
}
output_l += fprintf(MUNIT_OUTPUT_FILE, "%s=%s", param->name, param->value);
}
while (output_l++ < MUNIT_TEST_NAME_LEN) {
fputc(' ', MUNIT_OUTPUT_FILE);
}
}
fflush(MUNIT_OUTPUT_FILE);
stderr_buf = NULL;
#if !defined(_WIN32) || defined(__MINGW32__)
stderr_buf = tmpfile();
#else
tmpfile_s(&stderr_buf);
#endif
if (stderr_buf == NULL) {
munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create buffer for stderr");
result = MUNIT_ERROR;
goto print_result;
}
#if !defined(MUNIT_NO_FORK)
if (runner->fork) {
pipefd[0] = -1;
pipefd[1] = -1;
if (pipe(pipefd) != 0) {
munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create pipe");
result = MUNIT_ERROR;
goto print_result;
}
fork_pid = fork();
if (fork_pid == 0) {
close(pipefd[0]);
orig_stderr = munit_replace_stderr(stderr_buf);
munit_test_runner_exec(runner, test, params, &report);
/* Note that we don't restore stderr. This is so we can buffer
* things written to stderr later on (such as by
* asan/tsan/ubsan, valgrind, etc.) */
close(orig_stderr);
do {
write_res = write(pipefd[1], ((munit_uint8_t*) (&report)) + bytes_written, sizeof(report) - bytes_written);
if (write_res < 0) {
if (stderr_buf != NULL) {
munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to write to pipe");
}
exit(EXIT_FAILURE);
}
bytes_written += write_res;
} while ((size_t) bytes_written < sizeof(report));
if (stderr_buf != NULL)
fclose(stderr_buf);
close(pipefd[1]);
exit(EXIT_SUCCESS);
} else if (fork_pid == -1) {
close(pipefd[0]);
close(pipefd[1]);
if (stderr_buf != NULL) {
munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to fork");
}
report.errored++;
result = MUNIT_ERROR;
} else {
close(pipefd[1]);
do {
read_res = read(pipefd[0], ((munit_uint8_t*) (&report)) + bytes_read, sizeof(report) - bytes_read);
if (read_res < 1)
break;
bytes_read += read_res;
} while (bytes_read < (ssize_t) sizeof(report));
changed_pid = waitpid(fork_pid, &status, 0);
if (MUNIT_LIKELY(changed_pid == fork_pid) && MUNIT_LIKELY(WIFEXITED(status))) {
if (bytes_read != sizeof(report)) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited unexpectedly with status %d", WEXITSTATUS(status));
report.errored++;
} else if (WEXITSTATUS(status) != EXIT_SUCCESS) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited with status %d", WEXITSTATUS(status));
report.errored++;
}
} else {
if (WIFSIGNALED(status)) {
#if defined(_XOPEN_VERSION) && (_XOPEN_VERSION >= 700)
munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d (%s)", WTERMSIG(status), strsignal(WTERMSIG(status)));
#else
munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d", WTERMSIG(status));
#endif
} else if (WIFSTOPPED(status)) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child stopped by signal %d", WSTOPSIG(status));
}
report.errored++;
}
close(pipefd[0]);
waitpid(fork_pid, NULL, 0);
}
} else
#endif
{
#if !defined(MUNIT_NO_BUFFER)
const volatile int orig_stderr = munit_replace_stderr(stderr_buf);
#endif
#if defined(MUNIT_THREAD_LOCAL)
if (MUNIT_UNLIKELY(setjmp(munit_error_jmp_buf) != 0)) {
result = MUNIT_FAIL;
report.failed++;
} else {
munit_error_jmp_buf_valid = 1;
result = munit_test_runner_exec(runner, test, params, &report);
}
#else
result = munit_test_runner_exec(runner, test, params, &report);
#endif
#if !defined(MUNIT_NO_BUFFER)
munit_restore_stderr(orig_stderr);
#endif
/* Here just so that the label is used on Windows and we don't get
* a warning */
goto print_result;
}
print_result:
fputs("[ ", MUNIT_OUTPUT_FILE);
if ((test->options & MUNIT_TEST_OPTION_TODO) == MUNIT_TEST_OPTION_TODO) {
if (report.failed != 0 || report.errored != 0 || report.skipped != 0) {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_TODO, '3');
result = MUNIT_OK;
} else {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1');
if (MUNIT_LIKELY(stderr_buf != NULL))
munit_log_internal(MUNIT_LOG_ERROR, stderr_buf, "Test marked TODO, but was successful.");
runner->report.failed++;
result = MUNIT_ERROR;
}
} else if (report.failed > 0) {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_FAIL, '1');
runner->report.failed++;
result = MUNIT_FAIL;
} else if (report.errored > 0) {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1');
runner->report.errored++;
result = MUNIT_ERROR;
} else if (report.skipped > 0) {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_SKIP, '3');
runner->report.skipped++;
result = MUNIT_SKIP;
} else if (report.successful > 1) {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2');
#if defined(MUNIT_ENABLE_TIMING)
fputs(" ] [ ", MUNIT_OUTPUT_FILE);
munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock / report.successful);
fputs(" / ", MUNIT_OUTPUT_FILE);
munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock / report.successful);
fprintf(MUNIT_OUTPUT_FILE, " CPU ]\n %-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s Total: [ ", "");
munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock);
fputs(" / ", MUNIT_OUTPUT_FILE);
munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock);
fputs(" CPU", MUNIT_OUTPUT_FILE);
#endif
runner->report.successful++;
result = MUNIT_OK;
} else if (report.successful > 0) {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2');
#if defined(MUNIT_ENABLE_TIMING)
fputs(" ] [ ", MUNIT_OUTPUT_FILE);
munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock);
fputs(" / ", MUNIT_OUTPUT_FILE);
munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock);
fputs(" CPU", MUNIT_OUTPUT_FILE);
#endif
runner->report.successful++;
result = MUNIT_OK;
}
fputs(" ]\n", MUNIT_OUTPUT_FILE);
if (stderr_buf != NULL) {
if (result == MUNIT_FAIL || result == MUNIT_ERROR || runner->show_stderr) {
fflush(MUNIT_OUTPUT_FILE);
rewind(stderr_buf);
munit_splice(fileno(stderr_buf), STDERR_FILENO);
fflush(stderr);
}
fclose(stderr_buf);
}
}
static void
munit_test_runner_run_test_wild(MunitTestRunner* runner,
const MunitTest* test,
const char* test_name,
MunitParameter* params,
MunitParameter* p) {
const MunitParameterEnum* pe;
char** values;
MunitParameter* next;
for (pe = test->parameters ; pe != NULL && pe->name != NULL ; pe++) {
if (p->name == pe->name)
break;
}
if (pe == NULL)
return;
for (values = pe->values ; *values != NULL ; values++) {
next = p + 1;
p->value = *values;
if (next->name == NULL) {
munit_test_runner_run_test_with_params(runner, test, params);
} else {
munit_test_runner_run_test_wild(runner, test, test_name, params, next);
}
if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0))
break;
}
}
/* Run a single test, with every combination of parameters
* requested. */
static void
munit_test_runner_run_test(MunitTestRunner* runner,
const MunitTest* test,
const char* prefix) {
char* test_name = munit_maybe_concat(NULL, (char*) prefix, (char*) test->name);
/* The array of parameters to pass to
* munit_test_runner_run_test_with_params */
MunitParameter* params = NULL;
size_t params_l = 0;
/* Wildcard parameters are parameters which have possible values
* specified in the test, but no specific value was passed to the
* CLI. That means we want to run the test once for every
* possible combination of parameter values or, if --single was
* passed to the CLI, a single time with a random set of
* parameters. */
MunitParameter* wild_params = NULL;
size_t wild_params_l = 0;
const MunitParameterEnum* pe;
const MunitParameter* cli_p;
munit_bool filled;
unsigned int possible;
char** vals;
size_t first_wild;
const MunitParameter* wp;
int pidx;
munit_rand_seed(runner->seed);
fprintf(MUNIT_OUTPUT_FILE, "%-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s", test_name);
if (test->parameters == NULL) {
/* No parameters. Simple, nice. */
munit_test_runner_run_test_with_params(runner, test, NULL);
} else {
fputc('\n', MUNIT_OUTPUT_FILE);
for (pe = test->parameters ; pe != NULL && pe->name != NULL ; pe++) {
/* Did we received a value for this parameter from the CLI? */
filled = 0;
for (cli_p = runner->parameters ; cli_p != NULL && cli_p->name != NULL ; cli_p++) {
if (strcmp(cli_p->name, pe->name) == 0) {
if (MUNIT_UNLIKELY(munit_parameters_add(¶ms_l, ¶ms, pe->name, cli_p->value) != MUNIT_OK))
goto cleanup;
filled = 1;
break;
}
}
if (filled)
continue;
/* Nothing from CLI, is the enum NULL/empty? We're not a
* fuzzer… */
if (pe->values == NULL || pe->values[0] == NULL)
continue;
/* If --single was passed to the CLI, choose a value from the
* list of possibilities randomly. */
if (runner->single_parameter_mode) {
possible = 0;
for (vals = pe->values ; *vals != NULL ; vals++)
possible++;
/* We want the tests to be reproducible, even if you're only
* running a single test, but we don't want every test with
* the same number of parameters to choose the same parameter
* number, so use the test name as a primitive salt. */
pidx = munit_rand_at_most(munit_str_hash(test_name), possible - 1);
if (MUNIT_UNLIKELY(munit_parameters_add(¶ms_l, ¶ms, pe->name, pe->values[pidx]) != MUNIT_OK))
goto cleanup;
} else {
/* We want to try every permutation. Put in a placeholder
* entry, we'll iterate through them later. */
if (MUNIT_UNLIKELY(munit_parameters_add(&wild_params_l, &wild_params, pe->name, NULL) != MUNIT_OK))
goto cleanup;
}
}
if (wild_params_l != 0) {
first_wild = params_l;
for (wp = wild_params ; wp != NULL && wp->name != NULL ; wp++) {
for (pe = test->parameters ; pe != NULL && pe->name != NULL && pe->values != NULL ; pe++) {
if (strcmp(wp->name, pe->name) == 0) {
if (MUNIT_UNLIKELY(munit_parameters_add(¶ms_l, ¶ms, pe->name, pe->values[0]) != MUNIT_OK))
goto cleanup;
}
}
}
munit_test_runner_run_test_wild(runner, test, test_name, params, params + first_wild);
} else {
munit_test_runner_run_test_with_params(runner, test, params);
}
cleanup:
free(params);
free(wild_params);
}
munit_maybe_free_concat(test_name, prefix, test->name);
}
/* Recurse through the suite and run all the tests. If a list of
* tests to run was provied on the command line, run only those
* tests. */
static void
munit_test_runner_run_suite(MunitTestRunner* runner,
const MunitSuite* suite,
const char* prefix) {
size_t pre_l;
char* pre = munit_maybe_concat(&pre_l, (char*) prefix, (char*) suite->prefix);
const MunitTest* test;
const char** test_name;
const MunitSuite* child_suite;
/* Run the tests. */
for (test = suite->tests ; test != NULL && test->test != NULL ; test++) {
if (runner->tests != NULL) { /* Specific tests were requested on the CLI */
for (test_name = runner->tests ; test_name != NULL && *test_name != NULL ; test_name++) {
if ((pre_l == 0 || strncmp(pre, *test_name, pre_l) == 0) &&
strncmp(test->name, *test_name + pre_l, strlen(*test_name + pre_l)) == 0) {
munit_test_runner_run_test(runner, test, pre);
if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0))
goto cleanup;
}
}
} else { /* Run all tests */
munit_test_runner_run_test(runner, test, pre);
}
}
if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0))
goto cleanup;
/* Run any child suites. */
for (child_suite = suite->suites ; child_suite != NULL && child_suite->prefix != NULL ; child_suite++) {
munit_test_runner_run_suite(runner, child_suite, pre);
}
cleanup:
munit_maybe_free_concat(pre, prefix, suite->prefix);
}
static void
munit_test_runner_run(MunitTestRunner* runner) {
munit_test_runner_run_suite(runner, runner->suite, NULL);
}
static void
munit_print_help(int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)], void* user_data, const MunitArgument arguments[]) {
const MunitArgument* arg;
(void) argc;
printf("USAGE: %s [OPTIONS...] [TEST...]\n\n", argv[0]);
puts(" --seed SEED\n"
" Value used to seed the PRNG. Must be a 32-bit integer in decimal\n"
" notation with no separators (commas, decimals, spaces, etc.), or\n"
" hexidecimal prefixed by \"0x\".\n"
" --iterations N\n"
" Run each test N times. 0 means the default number.\n"
" --param name value\n"
" A parameter key/value pair which will be passed to any test with\n"
" takes a parameter of that name. If not provided, the test will be\n"
" run once for each possible parameter value.\n"
" --list Write a list of all available tests.\n"
" --list-params\n"
" Write a list of all available tests and their possible parameters.\n"
" --single Run each parameterized test in a single configuration instead of\n"
" every possible combination\n"
" --log-visible debug|info|warning|error\n"
" --log-fatal debug|info|warning|error\n"
" Set the level at which messages of different severities are visible,\n"
" or cause the test to terminate.\n"
#if !defined(MUNIT_NO_FORK)
" --no-fork Do not execute tests in a child process. If this option is supplied\n"
" and a test crashes (including by failing an assertion), no further\n"
" tests will be performed.\n"
#endif
" --fatal-failures\n"
" Stop executing tests as soon as a failure is found.\n"
" --show-stderr\n"
" Show data written to stderr by the tests, even if the test succeeds.\n"
" --color auto|always|never\n"
" Colorize (or don't) the output.\n"
/* 12345678901234567890123456789012345678901234567890123456789012345678901234567890 */
" --help Print this help message and exit.\n");
#if defined(MUNIT_NL_LANGINFO)
setlocale(LC_ALL, "");
fputs((strcasecmp("UTF-8", nl_langinfo(CODESET)) == 0) ? "µnit" : "munit", stdout);
#else
puts("munit");
#endif
printf(" %d.%d.%d\n"
"Full documentation at: https://nemequ.github.io/munit/\n",
(MUNIT_CURRENT_VERSION >> 16) & 0xff,
(MUNIT_CURRENT_VERSION >> 8) & 0xff,
(MUNIT_CURRENT_VERSION >> 0) & 0xff);
for (arg = arguments ; arg != NULL && arg->name != NULL ; arg++)
arg->write_help(arg, user_data);
}
static const MunitArgument*
munit_arguments_find(const MunitArgument arguments[], const char* name) {
const MunitArgument* arg;
for (arg = arguments ; arg != NULL && arg->name != NULL ; arg++)
if (strcmp(arg->name, name) == 0)
return arg;
return NULL;
}
static void
munit_suite_list_tests(const MunitSuite* suite, munit_bool show_params, const char* prefix) {
size_t pre_l;
char* pre = munit_maybe_concat(&pre_l, (char*) prefix, (char*) suite->prefix);
const MunitTest* test;
const MunitParameterEnum* params;
munit_bool first;
char** val;
const MunitSuite* child_suite;
for (test = suite->tests ;
test != NULL && test->name != NULL ;
test++) {
if (pre != NULL)
fputs(pre, stdout);
puts(test->name);
if (show_params) {
for (params = test->parameters ;
params != NULL && params->name != NULL ;
params++) {
fprintf(stdout, " - %s: ", params->name);
if (params->values == NULL) {
puts("Any");
} else {
first = 1;
for (val = params->values ;
*val != NULL ;
val++ ) {
if(!first) {
fputs(", ", stdout);
} else {
first = 0;
}
fputs(*val, stdout);
}
putc('\n', stdout);
}
}
}
}
for (child_suite = suite->suites ; child_suite != NULL && child_suite->prefix != NULL ; child_suite++) {
munit_suite_list_tests(child_suite, show_params, pre);
}
munit_maybe_free_concat(pre, prefix, suite->prefix);
}
static munit_bool
munit_stream_supports_ansi(FILE *stream) {
#if !defined(_WIN32)
return isatty(fileno(stream));
#else
#if !defined(__MINGW32__)
size_t ansicon_size = 0;
#endif
if (isatty(fileno(stream))) {
#if !defined(__MINGW32__)
getenv_s(&ansicon_size, NULL, 0, "ANSICON");
return ansicon_size != 0;
#else
return getenv("ANSICON") != NULL;
#endif
}
return 0;
#endif
}
int
munit_suite_main_custom(const MunitSuite* suite, void* user_data,
int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)],
const MunitArgument arguments[]) {
int result = EXIT_FAILURE;
MunitTestRunner runner;
size_t parameters_size = 0;
size_t tests_size = 0;
int arg;
char* envptr;
unsigned long ts;
char* endptr;
unsigned long long iterations;
MunitLogLevel level;
const MunitArgument* argument;
const char** runner_tests;
unsigned int tests_run;
unsigned int tests_total;
runner.prefix = NULL;
runner.suite = NULL;
runner.tests = NULL;
runner.seed = 0;
runner.iterations = 0;
runner.parameters = NULL;
runner.single_parameter_mode = 0;
runner.user_data = NULL;
runner.report.successful = 0;
runner.report.skipped = 0;
runner.report.failed = 0;
runner.report.errored = 0;
#if defined(MUNIT_ENABLE_TIMING)
runner.report.cpu_clock = 0;
runner.report.wall_clock = 0;
#endif
runner.colorize = 0;
#if !defined(_WIN32)
runner.fork = 1;
#else
runner.fork = 0;
#endif
runner.show_stderr = 0;
runner.fatal_failures = 0;
runner.suite = suite;
runner.user_data = user_data;
runner.seed = munit_rand_generate_seed();
runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE);
for (arg = 1 ; arg < argc ; arg++) {
if (strncmp("--", argv[arg], 2) == 0) {
if (strcmp("seed", argv[arg] + 2) == 0) {
if (arg + 1 >= argc) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]);
goto cleanup;
}
envptr = argv[arg + 1];
ts = strtoul(argv[arg + 1], &envptr, 0);
if (*envptr != '\0' || ts > (~((munit_uint32_t) 0U))) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]);
goto cleanup;
}
runner.seed = (munit_uint32_t) ts;
arg++;
} else if (strcmp("iterations", argv[arg] + 2) == 0) {
if (arg + 1 >= argc) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]);
goto cleanup;
}
endptr = argv[arg + 1];
iterations = strtoul(argv[arg + 1], &endptr, 0);
if (*endptr != '\0' || iterations > UINT_MAX) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]);
goto cleanup;
}
runner.iterations = (unsigned int) iterations;
arg++;
} else if (strcmp("param", argv[arg] + 2) == 0) {
if (arg + 2 >= argc) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires two arguments", argv[arg]);
goto cleanup;
}
runner.parameters = realloc(runner.parameters, sizeof(MunitParameter) * (parameters_size + 2));
if (runner.parameters == NULL) {
munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory");
goto cleanup;
}
runner.parameters[parameters_size].name = (char*) argv[arg + 1];
runner.parameters[parameters_size].value = (char*) argv[arg + 2];
parameters_size++;
runner.parameters[parameters_size].name = NULL;
runner.parameters[parameters_size].value = NULL;
arg += 2;
} else if (strcmp("color", argv[arg] + 2) == 0) {
if (arg + 1 >= argc) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]);
goto cleanup;
}
if (strcmp(argv[arg + 1], "always") == 0)
runner.colorize = 1;
else if (strcmp(argv[arg + 1], "never") == 0)
runner.colorize = 0;
else if (strcmp(argv[arg + 1], "auto") == 0)
runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE);
else {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]);
goto cleanup;
}
arg++;
} else if (strcmp("help", argv[arg] + 2) == 0) {
munit_print_help(argc, argv, user_data, arguments);
result = EXIT_SUCCESS;
goto cleanup;
} else if (strcmp("single", argv[arg] + 2) == 0) {
runner.single_parameter_mode = 1;
} else if (strcmp("show-stderr", argv[arg] + 2) == 0) {
runner.show_stderr = 1;
#if !defined(_WIN32)
} else if (strcmp("no-fork", argv[arg] + 2) == 0) {
runner.fork = 0;
#endif
} else if (strcmp("fatal-failures", argv[arg] + 2) == 0) {
runner.fatal_failures = 1;
} else if (strcmp("log-visible", argv[arg] + 2) == 0 ||
strcmp("log-fatal", argv[arg] + 2) == 0) {
if (arg + 1 >= argc) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]);
goto cleanup;
}
if (strcmp(argv[arg + 1], "debug") == 0)
level = MUNIT_LOG_DEBUG;
else if (strcmp(argv[arg + 1], "info") == 0)
level = MUNIT_LOG_INFO;
else if (strcmp(argv[arg + 1], "warning") == 0)
level = MUNIT_LOG_WARNING;
else if (strcmp(argv[arg + 1], "error") == 0)
level = MUNIT_LOG_ERROR;
else {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]);
goto cleanup;
}
if (strcmp("log-visible", argv[arg] + 2) == 0)
munit_log_level_visible = level;
else
munit_log_level_fatal = level;
arg++;
} else if (strcmp("list", argv[arg] + 2) == 0) {
munit_suite_list_tests(suite, 0, NULL);
result = EXIT_SUCCESS;
goto cleanup;
} else if (strcmp("list-params", argv[arg] + 2) == 0) {
munit_suite_list_tests(suite, 1, NULL);
result = EXIT_SUCCESS;
goto cleanup;
} else {
argument = munit_arguments_find(arguments, argv[arg] + 2);
if (argument == NULL) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "unknown argument ('%s')", argv[arg]);
goto cleanup;
}
if (!argument->parse_argument(suite, user_data, &arg, argc, argv))
goto cleanup;
}
} else {
runner_tests = realloc((void*) runner.tests, sizeof(char*) * (tests_size + 2));
if (runner_tests == NULL) {
munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory");
goto cleanup;
}
runner.tests = runner_tests;
runner.tests[tests_size++] = argv[arg];
runner.tests[tests_size] = NULL;
}
}
fflush(stderr);
fprintf(MUNIT_OUTPUT_FILE, "Running test suite with seed 0x%08" PRIx32 "...\n", runner.seed);
munit_test_runner_run(&runner);
tests_run = runner.report.successful + runner.report.failed + runner.report.errored;
tests_total = tests_run + runner.report.skipped;
if (tests_run == 0) {
fprintf(stderr, "No tests run, %d (100%%) skipped.\n", runner.report.skipped);
} else {
fprintf(MUNIT_OUTPUT_FILE, "%d of %d (%0.0f%%) tests successful, %d (%0.0f%%) test skipped.\n",
runner.report.successful, tests_run,
(((double) runner.report.successful) / ((double) tests_run)) * 100.0,
runner.report.skipped,
(((double) runner.report.skipped) / ((double) tests_total)) * 100.0);
}
if (runner.report.failed == 0 && runner.report.errored == 0) {
result = EXIT_SUCCESS;
}
cleanup:
free(runner.parameters);
free((void*) runner.tests);
return result;
}
int
munit_suite_main(const MunitSuite* suite, void* user_data,
int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)]) {
return munit_suite_main_custom(suite, user_data, argc, argv, NULL);
}
|
idaFoodWeb_kry_omp.c | /*
* -----------------------------------------------------------------
* Programmer(s): Daniel R. Reynolds and Ting Yan @ SMU
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2021, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* Example program for IDA: Food web problem, OpenMP, GMRES,
* user-supplied preconditioner
*
* This example program uses SUNLinSol_SPGMR as the linear
* solver, and IDACalcIC for initial condition calculation.
*
* The mathematical problem solved in this example is a DAE system
* that arises from a system of partial differential equations after
* spatial discretization. The PDE system is a food web population
* model, with predator-prey interaction and diffusion on the unit
* square in two dimensions. The dependent variable vector is:
*
* 1 2 ns
* c = (c , c , ..., c ) , ns = 2 * np
*
* and the PDE's are as follows:
*
* i i i
* dc /dt = d(i)*(c + c ) + R (x,y,c) (i = 1,...,np)
* xx yy i
*
* i i
* 0 = d(i)*(c + c ) + R (x,y,c) (i = np+1,...,ns)
* xx yy i
*
* where the reaction terms R are:
*
* i ns j
* R (x,y,c) = c * (b(i) + sum a(i,j)*c )
* i j=1
*
* The number of species is ns = 2 * np, with the first np being
* prey and the last np being predators. The coefficients a(i,j),
* b(i), d(i) are:
*
* a(i,i) = -AA (all i)
* a(i,j) = -GG (i <= np , j > np)
* a(i,j) = EE (i > np, j <= np)
* all other a(i,j) = 0
* b(i) = BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i <= np)
* b(i) =-BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i > np)
* d(i) = DPREY (i <= np)
* d(i) = DPRED (i > np)
*
* The various scalar parameters required are set using '#define'
* statements or directly in routine InitUserData. In this program,
* np = 1, ns = 2. The boundary conditions are homogeneous Neumann:
* normal derivative = 0.
*
* A polynomial in x and y is used to set the initial values of the
* first np variables (the prey variables) at each x,y location,
* while initial values for the remaining (predator) variables are
* set to a flat value, which is corrected by IDACalcIC.
*
* The PDEs are discretized by central differencing on a MX by MY
* mesh.
*
* The DAE system is solved by IDA using the SUNLinSol_SPGMR linear solver.
* Output is printed at t = 0, .001, .01, .1, .4, .7, 1.
*
* Optionally, we can set the number of threads from environment
* variable or command line. To check the current value for number
* of threads from environment:
* % echo $OMP_NUM_THREADS
*
* Execution:
*
* To use the default value for the number of threads from
* the OMP_NUM_THREADS environment value:
* % ./idaFoodWeb_kry_omp
* To specify the number of threads at the command line, use
* % ./idaFoodWeb_kry_omp num_threads
* where num_threads is the desired number of threads.
*
* -----------------------------------------------------------------
* References:
* [1] Peter N. Brown and Alan C. Hindmarsh,
* Reduced Storage Matrix Methods in Stiff ODE systems, Journal
* of Applied Mathematics and Computation, Vol. 31 (May 1989),
* pp. 40-91.
*
* [2] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold,
* Using Krylov Methods in the Solution of Large-Scale
* Differential-Algebraic Systems, SIAM J. Sci. Comput., 15
* (1994), pp. 1467-1488.
*
* [3] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold,
* Consistent Initial Condition Calculation for Differential-
* Algebraic Systems, SIAM J. Sci. Comput., 19 (1998),
* pp. 1495-1512.
* -----------------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <ida/ida.h>
#include <sunlinsol/sunlinsol_spgmr.h>
#include <nvector/nvector_openmp.h>
#include <sundials/sundials_dense.h>
#include <sundials/sundials_types.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/* helpful macros */
#ifndef MAX
#define MAX(A, B) ((A) > (B) ? (A) : (B))
#endif
/* Problem Constants. */
#define NPREY 1 /* No. of prey (= no. of predators). */
#define NUM_SPECIES 2*NPREY
#define PI RCONST(3.1415926535898)
#define FOURPI (RCONST(4.0)*PI)
#define MX 20 /* MX = number of x mesh points */
#define MY 20 /* MY = number of y mesh points */
#define NSMX (NUM_SPECIES * MX)
#define NEQ (NUM_SPECIES*MX*MY)
#define AA RCONST(1.0) /* Coefficient in above eqns. for a */
#define EE RCONST(10000.) /* Coefficient in above eqns. for a */
#define GG RCONST(0.5e-6) /* Coefficient in above eqns. for a */
#define BB RCONST(1.0) /* Coefficient in above eqns. for b */
#define DPREY RCONST(1.0) /* Coefficient in above eqns. for d */
#define DPRED RCONST(0.05) /* Coefficient in above eqns. for d */
#define ALPHA RCONST(50.) /* Coefficient alpha in above eqns. */
#define BETA RCONST(1000.) /* Coefficient beta in above eqns. */
#define AX RCONST(1.0) /* Total range of x variable */
#define AY RCONST(1.0) /* Total range of y variable */
#define RTOL RCONST(1.e-5) /* Relative tolerance */
#define ATOL RCONST(1.e-5) /* Absolute tolerance */
#define NOUT 6 /* Number of output times */
#define TMULT RCONST(10.0) /* Multiplier for tout values */
#define TADD RCONST(0.3) /* Increment for tout values */
#define ZERO RCONST(0.)
#define ONE RCONST(1.0)
/*
* User-defined vector and accessor macro: IJ_Vptr.
* IJ_Vptr is defined in order to express the underlying 3-D structure of
* the dependent variable vector from its underlying 1-D storage (an N_Vector).
* IJ_Vptr(vv,i,j) returns a pointer to the location in vv corresponding to
* species index is = 0, x-index ix = i, and y-index jy = j.
*/
#define IJ_Vptr(vv,i,j) (&NV_Ith_OMP(vv, (i)*NUM_SPECIES + (j)*NSMX))
/* Type: UserData. Contains problem constants, etc. */
typedef struct {
sunindextype Neq, ns, np, mx, my;
realtype dx, dy, **acoef;
realtype cox[NUM_SPECIES], coy[NUM_SPECIES], bcoef[NUM_SPECIES];
realtype **PP[MX][MY];
sunindextype *pivot[MX][MY];
N_Vector rates;
N_Vector ewt;
void *ida_mem;
int nthreads;
} *UserData;
/* Prototypes for functions called by the IDA Solver. */
static int resweb(realtype time, N_Vector cc, N_Vector cp, N_Vector resval,
void *user_data);
static int Precond(realtype tt, N_Vector cc, N_Vector cp,
N_Vector rr, realtype cj, void *user_data);
static int PSolve(realtype tt, N_Vector cc, N_Vector cp,
N_Vector rr, N_Vector rvec, N_Vector zvec,
realtype cj, realtype delta, void *user_data);
/* Prototypes for private Helper Functions. */
static void InitUserData(UserData webdata);
static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id,
UserData webdata);
static void PrintHeader(int maxl, realtype rtol, realtype atol);
static void PrintOutput(void *ida_mem, N_Vector c, realtype t);
static void PrintFinalStats(void *ida_mem);
static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata);
static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy,
UserData webdata);
static realtype dotprod(sunindextype size, realtype *x1, realtype *x2);
static int check_retval(void *returnvalue, char *funcname, int opt);
/*
*--------------------------------------------------------------------
* MAIN PROGRAM
*--------------------------------------------------------------------
*/
int main(int argc, char *argv[])
{
void *ida_mem;
SUNLinearSolver LS;
UserData webdata;
N_Vector cc, cp, id;
int iout, jx, jy, retval;
int maxl;
realtype rtol, atol, t0, tout, tret;
int num_threads;
SUNContext ctx;
ida_mem = NULL;
LS = NULL;
webdata = NULL;
cc = cp = id = NULL;
/* Set the number of threads to use */
num_threads = 1; /* default value */
#ifdef _OPENMP
num_threads = omp_get_max_threads(); /* overwrite with OMP_NUM_THREADS */
#endif
if (argc > 1) /* overwrithe with command line value, if supplied */
num_threads = (int) strtol(argv[1], NULL, 0);
/* Create the SUNDIALS context object for this simulation */
retval = SUNContext_Create(NULL, &ctx);
if (check_retval(&retval, "SUNContext_Create", 1)) return 1;
/* Allocate and initialize user data block webdata. */
webdata = (UserData) malloc(sizeof *webdata);
webdata->rates = N_VNew_OpenMP(NEQ, num_threads, ctx);
webdata->acoef = SUNDlsMat_newDenseMat(NUM_SPECIES, NUM_SPECIES);
webdata->ewt = N_VNew_OpenMP(NEQ, num_threads, ctx);
for (jx = 0; jx < MX; jx++) {
for (jy = 0; jy < MY; jy++) {
(webdata->pivot)[jx][jy] = SUNDlsMat_newIndexArray(NUM_SPECIES);
(webdata->PP)[jx][jy] = SUNDlsMat_newDenseMat(NUM_SPECIES, NUM_SPECIES);
}
}
webdata->nthreads = num_threads;
InitUserData(webdata);
/* Allocate N-vectors and initialize cc, cp, and id. */
cc = N_VNew_OpenMP(NEQ, num_threads, ctx);
if(check_retval((void *)cc, "N_VNew_OpenMP", 0)) return(1);
cp = N_VNew_OpenMP(NEQ, num_threads, ctx);
if(check_retval((void *)cp, "N_VNew_OpenMP", 0)) return(1);
id = N_VNew_OpenMP(NEQ, num_threads, ctx);
if(check_retval((void *)id, "N_VNew_OpenMP", 0)) return(1);
SetInitialProfiles(cc, cp, id, webdata);
/* Set remaining inputs to IDAMalloc. */
t0 = ZERO;
rtol = RTOL;
atol = ATOL;
/* Call IDACreate and IDAMalloc to initialize IDA. */
ida_mem = IDACreate(ctx);
if(check_retval((void *)ida_mem, "IDACreate", 0)) return(1);
retval = IDASetUserData(ida_mem, webdata);
if(check_retval(&retval, "IDASetUserData", 1)) return(1);
retval = IDASetId(ida_mem, id);
if(check_retval(&retval, "IDASetId", 1)) return(1);
retval = IDAInit(ida_mem, resweb, t0, cc, cp);
if(check_retval(&retval, "IDAInit", 1)) return(1);
retval = IDASStolerances(ida_mem, rtol, atol);
if(check_retval(&retval, "IDASStolerances", 1)) return(1);
webdata->ida_mem = ida_mem;
/* Create SUNLinSol_SPGMR linear solver, attach to IDA, and set
preconditioning routines. */
maxl = 16; /* max dimension of the Krylov subspace */
LS = SUNLinSol_SPGMR(cc, SUN_PREC_LEFT, maxl, ctx); /* IDA only allows left preconditioning */
if(check_retval((void *)LS, "SUNLinSol_SPGMR", 0)) return(1);
retval = IDASetLinearSolver(ida_mem, LS, NULL);
if(check_retval(&retval, "IDASetLinearSolver", 1)) return(1);
retval = IDASetPreconditioner(ida_mem, Precond, PSolve);
if(check_retval(&retval, "IDASetPreconditioner", 1)) return(1);
/* Call IDACalcIC (with default options) to correct the initial values. */
tout = RCONST(0.001);
retval = IDACalcIC(ida_mem, IDA_YA_YDP_INIT, tout);
if(check_retval(&retval, "IDACalcIC", 1)) return(1);
/* Print heading, basic parameters, and initial values. */
PrintHeader(maxl, rtol, atol);
PrintOutput(ida_mem, cc, ZERO);
/* Loop over iout, call IDASolve (normal mode), print selected output. */
for (iout = 1; iout <= NOUT; iout++) {
retval = IDASolve(ida_mem, tout, &tret, cc, cp, IDA_NORMAL);
if(check_retval(&retval, "IDASolve", 1)) return(retval);
PrintOutput(ida_mem, cc, tret);
if (iout < 3) tout *= TMULT; else tout += TADD;
}
/* Print final statistics and free memory. */
PrintFinalStats(ida_mem);
printf("num_threads = %i\n\n", num_threads);
/* Free memory */
IDAFree(&ida_mem);
SUNLinSolFree(LS);
N_VDestroy(cc);
N_VDestroy(cp);
N_VDestroy(id);
SUNDlsMat_destroyMat(webdata->acoef);
N_VDestroy(webdata->rates);
N_VDestroy(webdata->ewt);
for (jx = 0; jx < MX; jx++) {
for (jy = 0; jy < MY; jy ++) {
SUNDlsMat_destroyArray((webdata->pivot)[jx][jy]);
SUNDlsMat_destroyMat((webdata->PP)[jx][jy]);
}
}
free(webdata);
SUNContext_Free(&ctx);
return(0);
}
/* Define lines for readability in later routines */
#define acoef (webdata->acoef)
#define bcoef (webdata->bcoef)
#define cox (webdata->cox)
#define coy (webdata->coy)
/*
*--------------------------------------------------------------------
* FUNCTIONS CALLED BY IDA
*--------------------------------------------------------------------
*/
/*
* resweb: System residual function for predator-prey system.
* This routine calls Fweb to get all the right-hand sides of the
* equations, then loads the residual vector accordingly,
* using cp in the case of prey species.
*/
static int resweb(realtype tt, N_Vector cc, N_Vector cp,
N_Vector res, void *user_data)
{
sunindextype jx, jy, is, yloc, loc, np;
realtype *resv, *cpv;
UserData webdata;
jx = jy = is = 0;
webdata = (UserData)user_data;
cpv = NV_DATA_OMP(cp);
resv = NV_DATA_OMP(res);
np = webdata->np;
/* Call Fweb to set res to vector of right-hand sides. */
Fweb(tt, cc, res, webdata);
/* Loop over all grid points, setting residual values appropriately
for differential or algebraic components. */
#pragma omp parallel for default(shared) private(jy, jx, is, yloc, loc) schedule(static) num_threads(webdata->nthreads)
for (jy = 0; jy < MY; jy++) {
yloc = NSMX * jy;
for (jx = 0; jx < MX; jx++) {
loc = yloc + NUM_SPECIES * jx;
for (is = 0; is < NUM_SPECIES; is++) {
if (is < np)
resv[loc+is] = cpv[loc+is] - resv[loc+is];
else
resv[loc+is] = -resv[loc+is];
}
}
}
return(0);
}
static int Precond(realtype tt, N_Vector cc, N_Vector cp,
N_Vector rr, realtype cj, void *user_data)
{
int retval;
sunindextype ret;
realtype uround, xx, yy, del_x, del_y;
realtype **Pxy, *ratesxy, *Pxycol, *cxy, *cpxy, *ewtxy, cctmp;
realtype inc, fac, sqru, perturb_rates[NUM_SPECIES];
int is, js, jx, jy;
void *ida_mem;
N_Vector ewt;
realtype hh;
UserData webdata;
webdata = (UserData) user_data;
del_x = webdata->dx;
del_y = webdata->dy;
uround = UNIT_ROUNDOFF;
sqru = sqrt(uround);
ida_mem = webdata->ida_mem;
ewt = webdata->ewt;
retval = IDAGetErrWeights(ida_mem, ewt);
if(check_retval(&retval, "IDAGetErrWeights", 1)) return(1);
retval = IDAGetCurrentStep(ida_mem, &hh);
if(check_retval(&retval, "IDAGetCurrentStep", 1)) return(1);
for (jy = 0; jy < MY; jy++) {
yy = jy * del_y;
for (jx = 0; jx < MX; jx++) {
xx = jx * del_x;
Pxy = (webdata->PP)[jx][jy];
cxy = IJ_Vptr(cc, jx, jy);
cpxy = IJ_Vptr(cp, jx, jy);
ewtxy = IJ_Vptr(ewt, jx, jy);
ratesxy = IJ_Vptr((webdata->rates), jx, jy);
for (js = 0; js < NUM_SPECIES; js++) {
inc = sqru*(MAX(fabs(cxy[js]), MAX(hh*fabs(cpxy[js]), ONE/ewtxy[js])));
cctmp = cxy[js];
cxy[js] += inc;
fac = -ONE/inc;
WebRates(xx, yy, cxy, perturb_rates, webdata);
Pxycol = Pxy[js];
for (is = 0; is < NUM_SPECIES; is++)
Pxycol[is] = (perturb_rates[is] - ratesxy[is])*fac;
if (js < 1) Pxycol[js] += cj;
cxy[js] = cctmp;
}
ret = SUNDlsMat_denseGETRF(Pxy, NUM_SPECIES, NUM_SPECIES, (webdata->pivot)[jx][jy]);
if (ret != 0) return(1);
}
}
return(0);
}
static int PSolve(realtype tt, N_Vector cc, N_Vector cp,
N_Vector rr, N_Vector rvec, N_Vector zvec,
realtype cj, realtype dalta, void *user_data)
{
realtype **Pxy, *zxy;
sunindextype *pivot;
sunindextype jx, jy;
UserData webdata;
jx = jy = 0;
webdata = (UserData) user_data;
N_VScale(ONE, rvec, zvec);
#pragma omp parallel for collapse(2) default(shared) private(jx, jy, zxy, Pxy, pivot) schedule(static) num_threads(webdata->nthreads)
for (jx = 0; jx < MX; jx++) {
for (jy = 0; jy <MY; jy++) {
zxy = IJ_Vptr(zvec, jx, jy);
Pxy = (webdata->PP)[jx][jy];
pivot = (webdata->pivot)[jx][jy];
SUNDlsMat_denseGETRS(Pxy, NUM_SPECIES, pivot, zxy);
}
}
return(0);
}
/*
*--------------------------------------------------------------------
* PRIVATE FUNCTIONS
*--------------------------------------------------------------------
*/
/*
* InitUserData: Load problem constants in webdata (of type UserData).
*/
static void InitUserData(UserData webdata)
{
sunindextype i, j, np;
realtype *a1,*a2, *a3, *a4, dx2, dy2;
webdata->mx = MX;
webdata->my = MY;
webdata->ns = NUM_SPECIES;
webdata->np = NPREY;
webdata->dx = AX/(MX-1);
webdata->dy = AY/(MY-1);
webdata->Neq= NEQ;
/* Set up the coefficients a and b, and others found in the equations. */
np = webdata->np;
dx2 = (webdata->dx)*(webdata->dx); dy2 = (webdata->dy)*(webdata->dy);
for (i = 0; i < np; i++) {
a1 = &(acoef[i][np]);
a2 = &(acoef[i+np][0]);
a3 = &(acoef[i][0]);
a4 = &(acoef[i+np][np]);
/* Fill in the portion of acoef in the four quadrants, row by row. */
for (j = 0; j < np; j++) {
*a1++ = -GG;
*a2++ = EE;
*a3++ = ZERO;
*a4++ = ZERO;
}
/* Reset the diagonal elements of acoef to -AA. */
acoef[i][i] = -AA; acoef[i+np][i+np] = -AA;
/* Set coefficients for b and diffusion terms. */
bcoef[i] = BB; bcoef[i+np] = -BB;
cox[i] = DPREY/dx2; cox[i+np] = DPRED/dx2;
coy[i] = DPREY/dy2; coy[i+np] = DPRED/dy2;
}
}
/*
* SetInitialProfiles: Set initial conditions in cc, cp, and id.
* A polynomial profile is used for the prey cc values, and a constant
* (1.0e5) is loaded as the initial guess for the predator cc values.
* The id values are set to 1 for the prey and 0 for the predators.
* The prey cp values are set according to the given system, and
* the predator cp values are set to zero.
*/
static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id,
UserData webdata)
{
sunindextype loc, yloc, is, jx, jy, np;
realtype xx, yy, xyfactor;
realtype *ccv, *cpv, *idv;
ccv = NV_DATA_OMP(cc);
cpv = NV_DATA_OMP(cp);
idv = NV_DATA_OMP(id);
np = webdata->np;
/* Loop over grid, load cc values and id values. */
for (jy = 0; jy < MY; jy++) {
yy = jy * webdata->dy;
yloc = NSMX * jy;
for (jx = 0; jx < MX; jx++) {
xx = jx * webdata->dx;
xyfactor = RCONST(16.0)*xx*(ONE-xx)*yy*(ONE-yy);
xyfactor *= xyfactor;
loc = yloc + NUM_SPECIES*jx;
for (is = 0; is < NUM_SPECIES; is++) {
if (is < np) {
ccv[loc+is] = RCONST(10.0) + (realtype)(is+1) * xyfactor;
idv[loc+is] = ONE;
}
else {
ccv[loc+is] = RCONST(1.0e5);
idv[loc+is] = ZERO;
}
}
}
}
/* Set c' for the prey by calling the function Fweb. */
Fweb(ZERO, cc, cp, webdata);
/* Set c' for predators to 0. */
for (jy = 0; jy < MY; jy++) {
yloc = NSMX * jy;
for (jx = 0; jx < MX; jx++) {
loc = yloc + NUM_SPECIES * jx;
for (is = np; is < NUM_SPECIES; is++) {
cpv[loc+is] = ZERO;
}
}
}
}
/*
* Print first lines of output (problem description)
*/
static void PrintHeader(int maxl, realtype rtol, realtype atol)
{
printf("\nidaFoodWeb_kry_omp: Predator-prey DAE OpenMP example problem using Krylov solver for IDA \n\n");
printf("Number of species ns: %d", NUM_SPECIES);
printf(" Mesh dimensions: %d x %d", MX, MY);
printf(" System size: %d\n", NEQ);
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("Tolerance parameters: rtol = %Lg atol = %Lg\n", rtol, atol);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol);
#else
printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol);
#endif
printf("Linear solver: SUNLinSol_SPGMR, maxl = %d\n",maxl);
printf("CalcIC called to correct initial predator concentrations.\n\n");
printf("-----------------------------------------------------------\n");
printf(" t bottom-left top-right");
printf(" | nst k h\n");
printf("-----------------------------------------------------------\n\n");
}
/*
* PrintOutput: Print output values at output time t = tt.
* Selected run statistics are printed. Then values of the concentrations
* are printed for the bottom left and top right grid points only.
*/
static void PrintOutput(void *ida_mem, N_Vector c, realtype t)
{
int i, kused, retval;
long int nst;
realtype *c_bl, *c_tr, hused;
retval = IDAGetLastOrder(ida_mem, &kused);
check_retval(&retval, "IDAGetLastOrder", 1);
retval = IDAGetNumSteps(ida_mem, &nst);
check_retval(&retval, "IDAGetNumSteps", 1);
retval = IDAGetLastStep(ida_mem, &hused);
check_retval(&retval, "IDAGetLastStep", 1);
c_bl = IJ_Vptr(c,0,0);
c_tr = IJ_Vptr(c,MX-1,MY-1);
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("%8.2Le %12.4Le %12.4Le | %3ld %1d %12.4Le\n",
t, c_bl[0], c_tr[0], nst, kused, hused);
for (i=1;i<NUM_SPECIES;i++)
printf(" %12.4Le %12.4Le |\n",c_bl[i],c_tr[i]);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n",
t, c_bl[0], c_tr[0], nst, kused, hused);
for (i=1;i<NUM_SPECIES;i++)
printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]);
#else
printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n",
t, c_bl[0], c_tr[0], nst, kused, hused);
for (i=1;i<NUM_SPECIES;i++)
printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]);
#endif
printf("\n");
}
/*
* PrintFinalStats: Print final run data contained in iopt.
*/
static void PrintFinalStats(void *ida_mem)
{
long int nst, nre, sli, netf, nps, npevals, nrevalsLS;
int retval;
retval = IDAGetNumSteps(ida_mem, &nst);
check_retval(&retval, "IDAGetNumSteps", 1);
retval = IDAGetNumLinIters(ida_mem, &sli);
check_retval(&retval, "IDAGetNumLinIters", 1);
retval = IDAGetNumResEvals(ida_mem, &nre);
check_retval(&retval, "IDAGetNumResEvals", 1);
retval = IDAGetNumErrTestFails(ida_mem, &netf);
check_retval(&retval, "IDAGetNumErrTestFails", 1);
retval = IDAGetNumPrecSolves(ida_mem, &nps);
check_retval(&retval, "IDAGetNumPrecSolves", 1);
retval = IDAGetNumPrecEvals(ida_mem, &npevals);
check_retval(&retval, "IDAGetNumPrecEvals", 1);
retval = IDAGetNumLinResEvals(ida_mem, &nrevalsLS);
check_retval(&retval, "IDAGetNumLinResEvals", 1);
printf("-----------------------------------------------------------\n");
printf("Final run statistics: \n\n");
printf("Number of steps = %ld\n", nst);
printf("Number of residual evaluations = %ld\n", nre);
printf("Number of Preconditioner evaluations = %ld\n", npevals);
printf("Number of linear iterations = %ld\n", sli);
printf("Number of error test failures = %ld\n", netf);
printf("Number of precond solve fun called = %ld\n", nps);
}
/*
* Fweb: Rate function for the food-web problem.
* This routine computes the right-hand sides of the system equations,
* consisting of the diffusion term and interaction term.
* The interaction term is computed by the function WebRates.
*/
static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate,
UserData webdata)
{
sunindextype jx, jy, is, idyu, idyl, idxu, idxl;
realtype xx, yy, *cxy, *ratesxy, *cratexy, dcyli, dcyui, dcxli, dcxui;
/* Loop over grid points, evaluate interaction vector (length ns),
form diffusion difference terms, and load crate. */
jx = jy = is = 0;
for (jy = 0; jy < MY; jy++) {
yy = (webdata->dy) * jy ;
idyu = (jy!=MY-1) ? NSMX : -NSMX;
idyl = (jy!= 0 ) ? NSMX : -NSMX;
for (jx = 0; jx < MX; jx++) {
xx = (webdata->dx) * jx;
idxu = (jx!= MX-1) ? NUM_SPECIES : -NUM_SPECIES;
idxl = (jx!= 0 ) ? NUM_SPECIES : -NUM_SPECIES;
cxy = IJ_Vptr(cc,jx,jy);
ratesxy = IJ_Vptr(webdata->rates,jx,jy);
cratexy = IJ_Vptr(crate,jx,jy);
/* Get interaction vector at this grid point. */
WebRates(xx, yy, cxy, ratesxy, webdata);
/* Loop over species, do differencing, load crate segment. */
#pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) schedule(static) num_threads(webdata->nthreads)
for (is = 0; is < NUM_SPECIES; is++) {
/* Differencing in y. */
dcyli = *(cxy+is) - *(cxy - idyl + is) ;
dcyui = *(cxy + idyu + is) - *(cxy+is);
/* Differencing in x. */
dcxli = *(cxy+is) - *(cxy - idxl + is);
dcxui = *(cxy + idxu +is) - *(cxy+is);
/* Compute the crate values at (xx,yy). */
cratexy[is] = coy[is] * (dcyui - dcyli) +
cox[is] * (dcxui - dcxli) + ratesxy[is];
} /* End is loop */
} /* End of jx loop */
} /* End of jy loop */
}
/*
* WebRates: Evaluate reaction rates at a given spatial point.
* At a given (x,y), evaluate the array of ns reaction terms R.
*/
static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy,
UserData webdata)
{
int is;
realtype fac;
for (is = 0; is < NUM_SPECIES; is++)
ratesxy[is] = dotprod(NUM_SPECIES, cxy, acoef[is]);
fac = ONE + ALPHA*xx*yy + BETA*sin(FOURPI*xx)*sin(FOURPI*yy);
for (is = 0; is < NUM_SPECIES; is++)
ratesxy[is] = cxy[is]*( bcoef[is]*fac + ratesxy[is] );
}
/*
* dotprod: dot product routine for realtype arrays, for use by WebRates.
*/
static realtype dotprod(sunindextype size, realtype *x1, realtype *x2)
{
sunindextype i;
realtype *xx1, *xx2, temp = ZERO;
xx1 = x1; xx2 = x2;
for (i = 0; i < size; i++) temp += (*xx1++) * (*xx2++);
return(temp);
}
/*
* Check function return value...
* opt == 0 means SUNDIALS function allocates memory so check if
* returned NULL pointer
* opt == 1 means SUNDIALS function returns an integer value so check if
* retval < 0
* opt == 2 means function allocates memory so check if returned
* NULL pointer
*/
static int check_retval(void *returnvalue, char *funcname, int opt)
{
int *retval;
if (opt == 0 && returnvalue == NULL) {
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
fprintf(stderr,
"\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1);
} else if (opt == 1) {
/* Check if retval < 0 */
retval = (int *) returnvalue;
if (*retval < 0) {
fprintf(stderr,
"\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n",
funcname, *retval);
return(1);
}
} else if (opt == 2 && returnvalue == NULL) {
/* Check if function returned NULL pointer - no memory allocated */
fprintf(stderr,
"\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1);
}
return(0);
}
|
advection.h | //*****************************************************************************
// Title : src/equation/advection.h
// Author : Tanabe Yuta
// Date : 2021/08/02
// Copyright : (C)2021 TanabeYuta
//*****************************************************************************
#pragma once
#include "navierstokes.h"
#ifdef _USE_AVX_DEFINES
#include "../equation_avx/advection_avx.h"
#endif
namespace PANSLBM2 {
namespace AD {
// Function of updating macroscopic values of AD for 2D
template<class T, template<class>class Q>
void Macro(T &_tem, T &_qx, T &_qy, T _ux, T _uy, const T *_g0, const T *_g, T _omegag, int _idx) {
_tem = _g0[_idx];
_qx = T();
_qy = T();
for (int c = 1; c < Q<T>::nc; ++c) {
T g = _g[Q<T>::IndexF(_idx, c)];
_tem += g;
_qx += Q<T>::cx[c]*g;
_qy += Q<T>::cy[c]*g;
}
T coef = 1.0 - 0.5*_omegag;
_qx = coef*(_qx - _tem*_ux);
_qy = coef*(_qy - _tem*_uy);
}
// Function of updating macroscopic values of AD for 3D
template<class T, template<class>class Q>
void Macro(T &_tem, T &_qx, T &_qy, T &_qz, T _ux, T _uy, T _uz, const T *_g0, const T *_g, T _omegag, int _idx) {
_tem = _g0[_idx];
_qx = T();
_qy = T();
_qz = T();
for (int c = 1; c < Q<T>::nc; ++c) {
T g = _g[Q<T>::IndexF(_idx, c)];
_tem += g;
_qx += Q<T>::cx[c]*g;
_qy += Q<T>::cy[c]*g;
_qz += Q<T>::cz[c]*g;
}
T coef = 1.0 - 0.5*_omegag;
_qx = coef*(_qx - _tem*_ux);
_qy = coef*(_qy - _tem*_uy);
_qz = coef*(_qz - _tem*_uz);
}
// Function of getting equilibrium of AD for 2D
template<class T, template<class>class Q>
void Equilibrium(T *_geq, T _tem, T _ux, T _uy) {
for (int c = 0; c < Q<T>::nc; ++c) {
T ciu = Q<T>::cx[c]*_ux + Q<T>::cy[c]*_uy;
_geq[c] = Q<T>::ei[c]*_tem*(1.0 + 3.0*ciu);
}
}
// Function of getting equilibrium of AD for 3D
template<class T, template<class>class Q>
void Equilibrium(T *_geq, T _tem, T _ux, T _uy, T _uz) {
for (int c = 0; c < Q<T>::nc; ++c) {
T ciu = Q<T>::cx[c]*_ux + Q<T>::cy[c]*_uy + Q<T>::cz[c]*_uz;
_geq[c] = Q<T>::ei[c]*_tem*(1.0 + 3.0*ciu);
}
}
// Function of applying external force of AD with natural convection for 2D
template<class T, template<class>class P>
void ExternalForceNaturalConvection(T _tem, T _gx, T _gy, T _tem0, T *_f, int _idx) {
for (int c = 1; c < P<T>::nc; ++c) {
_f[P<T>::IndexF(_idx, c)] += 3.0*P<T>::ei[c]*(P<T>::cx[c]*_gx + P<T>::cy[c]*_gy)*(_tem - _tem0);
}
}
// Function of applying external force of AD with natural convection for 3D
template<class T, template<class>class P>
void ExternalForceNaturalConvection(T _tem, T _gx, T _gy, T _gz, T _tem0, T *_f, int _idx) {
for (int c = 1; c < P<T>::nc; ++c) {
_f[P<T>::IndexF(_idx, c)] += 3.0*P<T>::ei[c]*(P<T>::cx[c]*_gx + P<T>::cy[c]*_gy + P<T>::cz[c]*_gz)*(_tem - _tem0);
}
}
// Function of applying external force of AD with heat exchange for 2D/3D
template<class T, template<class>class Q>
void ExternalForceHeatExchange(T _tem, T _beta, T *_g0, T *_g, int _idx) {
T coef = _beta*(1.0 - _tem)/(1.0 + _beta);
_g0[_idx] += Q<T>::ei[0]*coef;
for (int c = 1; c < Q<T>::nc; ++c) {
_g[Q<T>::IndexF(_idx, c)] += Q<T>::ei[c]*coef;
}
}
// Function of Update macro and Collide of force convection for 2D
template<class T, template<class>class P, template<class>class Q>
void MacroCollideForceConvection(
P<T>& _p, T *_rho, T *_ux, T *_uy, T _viscosity,
Q<T>& _q, T *_tem, T *_qx, T *_qy, T _diffusivity,
bool _issave = false
) {
T omegaf = 1.0/(3.0*_viscosity + 0.5), iomegaf = 1.0 - omegaf, feq[P<T>::nc];
T omegag = 1.0/(3.0*_diffusivity + 0.5), iomegag = 1.0 - omegag, geq[Q<T>::nc];
#pragma omp parallel for private(feq, geq)
for (int idx = 0; idx < _p.nxyz; ++idx) {
// Update macro
T rho, ux, uy;
NS::Macro<T, P>(rho, ux, uy, _p.f0, _p.f, idx);
T tem, qx, qy;
Macro<T, Q>(tem, qx, qy, ux, uy, _q.f0, _q.f, omegag, idx);
// Save macro if need
if (_issave) {
_rho[idx] = rho;
_ux[idx] = ux;
_uy[idx] = uy;
_tem[idx] = tem;
_qx[idx] = qx;
_qy[idx] = qy;
}
// Collide
NS::Equilibrium<T, P>(feq, rho, ux, uy);
_p.f0[idx] = iomegaf*_p.f0[idx] + omegaf*feq[0];
for (int c = 1; c < P<T>::nc; ++c) {
int idxf = P<T>::IndexF(idx, c);
_p.f[idxf] = iomegaf*_p.f[idxf] + omegaf*feq[c];
}
Equilibrium<T, Q>(geq, tem, ux, uy);
_q.f0[idx] = iomegag*_q.f0[idx] + omegag*geq[0];
for (int c = 1; c < Q<T>::nc; ++c) {
int idxf = Q<T>::IndexF(idx, c);
_q.f[idxf] = iomegag*_q.f[idxf] + omegag*geq[c];
}
}
}
// Function of Update macro and Collide of force convection for 3D
template<class T, template<class>class P, template<class>class Q>
void MacroCollideForceConvection(
P<T>& _p, T *_rho, T *_ux, T *_uy, T *_uz, T _viscosity,
Q<T>& _q, T *_tem, T *_qx, T *_qy, T *_qz, T _diffusivity,
bool _issave = false
) {
T omegaf = 1.0/(3.0*_viscosity + 0.5), iomegaf = 1.0 - omegaf, feq[P<T>::nc];
T omegag = 1.0/(3.0*_diffusivity + 0.5), iomegag = 1.0 - omegag, geq[Q<T>::nc];
#pragma omp parallel for private(feq, geq)
for (int idx = 0; idx < _p.nxyz; ++idx) {
// Update macro
T rho, ux, uy, uz;
NS::Macro<T, P>(rho, ux, uy, uz, _p.f0, _p.f, idx);
T tem, qx, qy, qz;
Macro<T, Q>(tem, qx, qy, qz, ux, uy, uz, _q.f0, _q.f, omegag, idx);
// Save macro if need
if (_issave) {
_rho[idx] = rho;
_ux[idx] = ux;
_uy[idx] = uy;
_uz[idx] = uz;
_tem[idx] = tem;
_qx[idx] = qx;
_qy[idx] = qy;
_qz[idx] = qz;
}
// Collide
NS::Equilibrium<T, P>(feq, rho, ux, uy, uz);
_p.f0[idx] = iomegaf*_p.f0[idx] + omegaf*feq[0];
for (int c = 1; c < P<T>::nc; ++c) {
int idxf = P<T>::IndexF(idx, c);
_p.f[idxf] = iomegaf*_p.f[idxf] + omegaf*feq[c];
}
Equilibrium<T, Q>(geq, tem, ux, uy, uz);
_q.f0[idx] = iomegag*_q.f0[idx] + omegag*geq[0];
for (int c = 1; c < Q<T>::nc; ++c) {
int idxf = Q<T>::IndexF(idx, c);
_q.f[idxf] = iomegag*_q.f[idxf] + omegag*geq[c];
}
}
}
// Function of Update macro and Collide of natural convection for 2D
template<class T, template<class>class P, template<class>class Q>
void MacroCollideNaturalConvection(
P<T>& _p, T *_rho, T *_ux, T *_uy, T _viscosity,
Q<T>& _q, T *_tem, T *_qx, T *_qy, T _diffusivity,
T _gx, T _gy, T _tem0, bool _issave = false
) {
T omegaf = 1.0/(3.0*_viscosity + 0.5), iomegaf = 1.0 - omegaf, feq[P<T>::nc];
T omegag = 1.0/(3.0*_diffusivity + 0.5), iomegag = 1.0 - omegag, geq[Q<T>::nc];
#pragma omp parallel for private(feq, geq)
for (int idx = 0; idx < _p.nxyz; ++idx) {
// Update macro
T rho, ux, uy;
NS::Macro<T, P>(rho, ux, uy, _p.f0, _p.f, idx);
T tem, qx, qy;
Macro<T, Q>(tem, qx, qy, ux, uy, _q.f0, _q.f, omegag, idx);
// External force with natural convection
ExternalForceNaturalConvection<T, P>(tem, _gx, _gy, _tem0, _p.f, idx);
NS::Macro<T, P>(rho, ux, uy, _p.f0, _p.f, idx);
Macro<T, Q>(tem, qx, qy, ux, uy, _q.f0, _q.f, omegag, idx);
// Save macro if need
if (_issave) {
_rho[idx] = rho;
_ux[idx] = ux;
_uy[idx] = uy;
_tem[idx] = tem;
_qx[idx] = qx;
_qy[idx] = qy;
}
// Collide
NS::Equilibrium<T, P>(feq, rho, ux, uy);
_p.f0[idx] = iomegaf*_p.f0[idx] + omegaf*feq[0];
for (int c = 1; c < P<T>::nc; ++c) {
int idxf = P<T>::IndexF(idx, c);
_p.f[idxf] = iomegaf*_p.f[idxf] + omegaf*feq[c];
}
Equilibrium<T, Q>(geq, tem, ux, uy);
_q.f0[idx] = iomegag*_q.f0[idx] + omegag*geq[0];
for (int c = 1; c < Q<T>::nc; ++c) {
int idxf = Q<T>::IndexF(idx, c);
_q.f[idxf] = iomegag*_q.f[idxf] + omegag*geq[c];
}
}
}
// Function of Update macro and Collide of natural convection for 3D
template<class T, template<class>class P, template<class>class Q>
void MacroCollideNaturalConvection(
P<T>& _p, T *_rho, T *_ux, T *_uy, T *_uz, T _viscosity,
Q<T>& _q, T *_tem, T *_qx, T *_qy, T *_qz, T _diffusivity,
T _gx, T _gy, T _gz, T _tem0, bool _issave = false
) {
T omegaf = 1.0/(3.0*_viscosity + 0.5), iomegaf = 1.0 - omegaf, feq[P<T>::nc];
T omegag = 1.0/(3.0*_diffusivity + 0.5), iomegag = 1.0 - omegag, geq[Q<T>::nc];
#pragma omp parallel for private(feq, geq)
for (int idx = 0; idx < _p.nxyz; ++idx) {
// Update macro
T rho, ux, uy, uz;
NS::Macro<T, P>(rho, ux, uy, uz, _p.f0, _p.f, idx);
T tem, qx, qy, qz;
Macro<T, Q>(tem, qx, qy, qz, ux, uy, uz, _q.f0, _q.f, omegag, idx);
// External force with natural convection
ExternalForceNaturalConvection<T, P>(tem, _gx, _gy, _gz, _tem0, _p.f, idx);
NS::Macro<T, P>(rho, ux, uy, uz, _p.f0, _p.f, idx);
Macro<T, Q>(tem, qx, qy, qz, ux, uy, uz, _q.f0, _q.f, omegag, idx);
// Save macro if need
if (_issave) {
_rho[idx] = rho;
_ux[idx] = ux;
_uy[idx] = uy;
_uz[idx] = uz;
_tem[idx] = tem;
_qx[idx] = qx;
_qy[idx] = qy;
_qz[idx] = qz;
}
// Collide
NS::Equilibrium<T, P>(feq, rho, ux, uy, uz);
_p.f0[idx] = iomegaf*_p.f0[idx] + omegaf*feq[0];
for (int c = 1; c < P<T>::nc; ++c) {
int idxf = P<T>::IndexF(idx, c);
_p.f[idxf] = iomegaf*_p.f[idxf] + omegaf*feq[c];
}
Equilibrium<T, Q>(geq, tem, ux, uy, uz);
_q.f0[idx] = iomegag*_q.f0[idx] + omegag*geq[0];
for (int c = 1; c < Q<T>::nc; ++c) {
int idxf = Q<T>::IndexF(idx, c);
_q.f[idxf] = iomegag*_q.f[idxf] + omegag*geq[c];
}
}
}
// Function of Update macro and Collide of Brinkman and heat exchange for 2D
template<class T, template<class>class P, template<class>class Q>
void MacroBrinkmanCollideHeatExchange(
P<T>& _p, T *_rho, T *_ux, T *_uy, const T *_alpha, T _viscosity,
Q<T>& _q, T *_tem, T *_qx, T *_qy, const T *_beta, T _diffusivity, bool _issave = false
) {
T omegaf = 1.0/(3.0*_viscosity + 0.5), iomegaf = 1.0 - omegaf, feq[P<T>::nc];
T omegag = 1.0/(3.0*_diffusivity + 0.5), iomegag = 1.0 - omegag, geq[Q<T>::nc];
#pragma omp parallel for private(feq, geq)
for (int idx = 0; idx < _p.nxyz; ++idx) {
// Update macro
T rho, ux, uy;
NS::Macro<T, P>(rho, ux, uy, _p.f0, _p.f, idx);
T tem, qx, qy;
Macro<T, Q>(tem, qx, qy, ux, uy, _q.f0, _q.f, omegag, idx);
// External force with Brinkman and heat exchange
NS::ExternalForceBrinkman<T, P>(rho, ux, uy, _alpha[idx], _p.f, idx);
NS::Macro<T, P>(rho, ux, uy, _p.f0, _p.f, idx);
ExternalForceHeatExchange<T, Q>(tem, _beta[idx], _q.f0, _q.f, idx);
Macro<T, Q>(tem, qx, qy, ux, uy, _q.f0, _q.f, omegag, idx);
// Save macro if need
if (_issave) {
_rho[idx] = rho;
_ux[idx] = ux;
_uy[idx] = uy;
_tem[idx] = tem;
_qx[idx] = qx;
_qy[idx] = qy;
}
// Collide
NS::Equilibrium<T, P>(feq, rho, ux, uy);
_p.f0[idx] = iomegaf*_p.f0[idx] + omegaf*feq[0];
for (int c = 1; c < P<T>::nc; ++c) {
int idxf = P<T>::IndexF(idx, c);
_p.f[idxf] = iomegaf*_p.f[idxf] + omegaf*feq[c];
}
Equilibrium<T, Q>(geq, tem, ux, uy);
_q.f0[idx] = iomegag*_q.f0[idx] + omegag*geq[0];
for (int c = 1; c < Q<T>::nc; ++c) {
int idxf = Q<T>::IndexF(idx, c);
_q.f[idxf] = iomegag*_q.f[idxf] + omegag*geq[c];
}
}
}
// Function of Update macro and Collide of Brinkman and heat exchange for 3D
template<class T, template<class>class P, template<class>class Q>
void MacroBrinkmanCollideHeatExchange(
P<T>& _p, T *_rho, T *_ux, T *_uy, T *_uz, const T *_alpha, T _viscosity,
Q<T>& _q, T *_tem, T *_qx, T *_qy, T *_qz, const T *_beta, T _diffusivity, bool _issave = false
) {
T omegaf = 1.0/(3.0*_viscosity + 0.5), iomegaf = 1.0 - omegaf, feq[P<T>::nc];
T omegag = 1.0/(3.0*_diffusivity + 0.5), iomegag = 1.0 - omegag, geq[Q<T>::nc];
#pragma omp parallel for private(feq, geq)
for (int idx = 0; idx < _p.nxyz; ++idx) {
// Update macro
T rho, ux, uy, uz;
NS::Macro<T, P>(rho, ux, uy, uz, _p.f0, _p.f, idx);
T tem, qx, qy, qz;
Macro<T, Q>(tem, qx, qy, qz, ux, uy, uz, _q.f0, _q.f, omegag, idx);
// External force with Brinkman and heat exchange
NS::ExternalForceBrinkman<T, P>(rho, ux, uy, uz, _alpha[idx], _p.f, idx);
NS::Macro<T, P>(rho, ux, uy, uz, _p.f0, _p.f, idx);
ExternalForceHeatExchange<T, Q>(tem, _beta[idx], _q.f0, _q.f, idx);
Macro<T, Q>(tem, qx, qy, qz, ux, uy, uz, _q.f0, _q.f, omegag, idx);
// Save macro if need
if (_issave) {
_rho[idx] = rho;
_ux[idx] = ux;
_uy[idx] = uy;
_uz[idx] = uz;
_tem[idx] = tem;
_qx[idx] = qx;
_qy[idx] = qy;
_qz[idx] = qz;
}
// Collide
NS::Equilibrium<T, P>(feq, rho, ux, uy, uz);
_p.f0[idx] = iomegaf*_p.f0[idx] + omegaf*feq[0];
for (int c = 1; c < P<T>::nc; ++c) {
int idxf = P<T>::IndexF(idx, c);
_p.f[idxf] = iomegaf*_p.f[idxf] + omegaf*feq[c];
}
Equilibrium<T, Q>(geq, tem, ux, uy, uz);
_q.f0[idx] = iomegag*_q.f0[idx] + omegag*geq[0];
for (int c = 1; c < Q<T>::nc; ++c) {
int idxf = Q<T>::IndexF(idx, c);
_q.f[idxf] = iomegag*_q.f[idxf] + omegag*geq[c];
}
}
}
// Function of Update macro and Collide of Brinkman and force convection for 2D
template<class T, template<class>class P, template<class>class Q>
void MacroBrinkmanCollideForceConvection(
P<T>& _p, T *_rho, T *_ux, T *_uy, const T *_alpha, T _viscosity,
Q<T>& _q, T *_tem, T *_qx, T *_qy, const T *_diffusivity,
bool _issave = false, T *_g = nullptr
) {
T omegaf = 1.0/(3.0*_viscosity + 0.5), iomegaf = 1.0 - omegaf, feq[P<T>::nc], geq[Q<T>::nc];
#pragma omp parallel for private(feq, geq)
for (int idx = 0; idx < _p.nxyz; ++idx) {
T omegag = 1.0/(3.0*_diffusivity[idx] + 0.5), iomegag = 1.0 - omegag;
// Update macro
T rho, ux, uy;
NS::Macro<T, P>(rho, ux, uy, _p.f0, _p.f, idx);
T tem, qx, qy;
Macro<T, Q>(tem, qx, qy, ux, uy, _q.f0, _q.f, omegag, idx);
// External force with Brinkman model
NS::ExternalForceBrinkman<T, P>(rho, ux, uy, _alpha[idx], _p.f, idx);
NS::Macro<T, P>(rho, ux, uy, _p.f0, _p.f, idx);
Macro<T, Q>(tem, qx, qy, ux, uy, _q.f0, _q.f, omegag, idx);
// Save macro if need
if (_issave) {
_rho[idx] = rho;
_ux[idx] = ux;
_uy[idx] = uy;
_tem[idx] = tem;
_qx[idx] = qx;
_qy[idx] = qy;
if (_g) {
int offsetf = Q<T>::nc*idx;
_g[offsetf] = _q.f0[idx];
for (int c = 1; c < Q<T>::nc; ++c) {
_g[offsetf + c] = _q.f[Q<T>::IndexF(idx, c)];
}
}
}
// Collide
NS::Equilibrium<T, P>(feq, rho, ux, uy);
_p.f0[idx] = iomegaf*_p.f0[idx] + omegaf*feq[0];
for (int c = 1; c < P<T>::nc; ++c) {
int idxf = P<T>::IndexF(idx, c);
_p.f[idxf] = iomegaf*_p.f[idxf] + omegaf*feq[c];
}
Equilibrium<T, Q>(geq, tem, ux, uy);
_q.f0[idx] = iomegag*_q.f0[idx] + omegag*geq[0];
for (int c = 1; c < Q<T>::nc; ++c) {
int idxf = Q<T>::IndexF(idx, c);
_q.f[idxf] = iomegag*_q.f[idxf] + omegag*geq[c];
}
}
}
// Function of Update macro and Collide of Brinkman and force convection for 3D
template<class T, template<class>class P, template<class>class Q>
void MacroBrinkmanCollideForceConvection(
P<T>& _p, T *_rho, T *_ux, T *_uy, T *_uz, const T *_alpha, T _viscosity,
Q<T>& _q, T *_tem, T *_qx, T *_qy, T *_qz, const T *_diffusivity,
bool _issave = false, T *_g = nullptr
) {
T omegaf = 1.0/(3.0*_viscosity + 0.5), iomegaf = 1.0 - omegaf, feq[P<T>::nc], geq[Q<T>::nc];
#pragma omp parallel for private(feq, geq)
for (int idx = 0; idx < _p.nxyz; ++idx) {
T omegag = 1.0/(3.0*_diffusivity[idx] + 0.5), iomegag = 1.0 - omegag;
// Update macro
T rho, ux, uy, uz;
NS::Macro<T, P>(rho, ux, uy, uz, _p.f0, _p.f, idx);
T tem, qx, qy, qz;
Macro<T, Q>(tem, qx, qy, qz, ux, uy, uz, _q.f0, _q.f, omegag, idx);
// External force with Brinkman model
NS::ExternalForceBrinkman<T, P>(rho, ux, uy, uz, _alpha[idx], _p.f, idx);
NS::Macro<T, P>(rho, ux, uy, uz, _p.f0, _p.f, idx);
Macro<T, Q>(tem, qx, qy, qz, ux, uy, uz, _q.f0, _q.f, omegag, idx);
// Save macro if need
if (_issave) {
_rho[idx] = rho;
_ux[idx] = ux;
_uy[idx] = uy;
_uz[idx] = uz;
_tem[idx] = tem;
_qx[idx] = qx;
_qy[idx] = qy;
_qz[idx] = qz;
if (_g) {
int offsetf = Q<T>::nc*idx;
_g[offsetf] = _q.f0[idx];
for (int c = 1; c < Q<T>::nc; ++c) {
_g[offsetf + c] = _q.f[Q<T>::IndexF(idx, c)];
}
}
}
// Collide
NS::Equilibrium<T, P>(feq, rho, ux, uy, uz);
_p.f0[idx] = iomegaf*_p.f0[idx] + omegaf*feq[0];
for (int c = 1; c < P<T>::nc; ++c) {
int idxf = P<T>::IndexF(idx, c);
_p.f[idxf] = iomegaf*_p.f[idxf] + omegaf*feq[c];
}
Equilibrium<T, Q>(geq, tem, ux, uy, uz);
_q.f0[idx] = iomegag*_q.f0[idx] + omegag*geq[0];
for (int c = 1; c < Q<T>::nc; ++c) {
int idxf = Q<T>::IndexF(idx, c);
_q.f[idxf] = iomegag*_q.f[idxf] + omegag*geq[c];
}
}
}
// Function of Update macro and Collide of Brinkman and natural convection for 2D
template<class T, template<class>class P, template<class>class Q>
void MacroBrinkmanCollideNaturalConvection(
P<T>& _p, T *_rho, T *_ux, T *_uy, const T *_alpha, T _viscosity,
Q<T>& _q, T *_tem, T *_qx, T *_qy, const T *_diffusivity,
T _gx, T _gy, T _tem0, bool _issave = false, T *_g = nullptr
) {
T omegaf = 1.0/(3.0*_viscosity + 0.5), iomegaf = 1.0 - omegaf, feq[P<T>::nc], geq[Q<T>::nc];
#pragma omp parallel for private(feq, geq)
for (int idx = 0; idx < _p.nxyz; ++idx) {
T omegag = 1.0/(3.0*_diffusivity[idx] + 0.5), iomegag = 1.0 - omegag;
// Update macro
T rho, ux, uy;
NS::Macro<T, P>(rho, ux, uy, _p.f0, _p.f, idx);
T tem, qx, qy;
Macro<T, Q>(tem, qx, qy, ux, uy, _q.f0, _q.f, omegag, idx);
// External force with Brinkman model and natural convection
ExternalForceNaturalConvection<T, P>(tem, _gx, _gy, _tem0, _p.f, idx);
NS::ExternalForceBrinkman<T, P>(rho, ux, uy, _alpha[idx], _p.f, idx);
NS::Macro<T, P>(rho, ux, uy, _p.f0, _p.f, idx);
Macro<T, Q>(tem, qx, qy, ux, uy, _q.f0, _q.f, omegag, idx);
// Save macro if need
if (_issave) {
_rho[idx] = rho;
_ux[idx] = ux;
_uy[idx] = uy;
_tem[idx] = tem;
_qx[idx] = qx;
_qy[idx] = qy;
if (_g) {
int offsetf = Q<T>::nc*idx;
_g[offsetf] = _q.f0[idx];
for (int c = 1; c < Q<T>::nc; ++c) {
_g[offsetf + c] = _q.f[Q<T>::IndexF(idx, c)];
}
}
}
// Collide
NS::Equilibrium<T, P>(feq, rho, ux, uy);
_p.f0[idx] = iomegaf*_p.f0[idx] + omegaf*feq[0];
for (int c = 1; c < P<T>::nc; ++c) {
int idxf = P<T>::IndexF(idx, c);
_p.f[idxf] = iomegaf*_p.f[idxf] + omegaf*feq[c];
}
Equilibrium<T, Q>(geq, tem, ux, uy);
_q.f0[idx] = iomegag*_q.f0[idx] + omegag*geq[0];
for (int c = 1; c < Q<T>::nc; ++c) {
int idxf = Q<T>::IndexF(idx, c);
_q.f[idxf] = iomegag*_q.f[idxf] + omegag*geq[c];
}
}
}
// Function of Update macro and Collide of Brinkman and natural convection for 3D
template<class T, template<class>class P, template<class>class Q>
void MacroBrinkmanCollideNaturalConvection(
P<T>& _p, T *_rho, T *_ux, T *_uy, T *_uz, const T *_alpha, T _viscosity,
Q<T>& _q, T *_tem, T *_qx, T *_qy, T *_qz, const T *_diffusivity,
T _gx, T _gy, T _gz, T _tem0, bool _issave = false, T *_g = nullptr
) {
T omegaf = 1.0/(3.0*_viscosity + 0.5), iomegaf = 1.0 - omegaf, feq[P<T>::nc], geq[Q<T>::nc];
#pragma omp parallel for private(feq, geq)
for (int idx = 0; idx < _p.nxyz; ++idx) {
T omegag = 1.0/(3.0*_diffusivity[idx] + 0.5), iomegag = 1.0 - omegag;
// Update macro
T rho, ux, uy, uz;
NS::Macro<T, P>(rho, ux, uy, uz, _p.f0, _p.f, idx);
T tem, qx, qy, qz;
Macro<T, Q>(tem, qx, qy, qz, ux, uy, uz, _q.f0, _q.f, omegag, idx);
// External force with Brinkman model and natural convection
ExternalForceNaturalConvection<T, P>(tem, _gx, _gy, _gz, _tem0, _p.f, idx);
NS::ExternalForceBrinkman<T, P>(rho, ux, uy, uz, _alpha[idx], _p.f, idx);
NS::Macro<T, P>(rho, ux, uy, uz, _p.f0, _p.f, idx);
Macro<T, Q>(tem, qx, qy, qz, ux, uy, uz, _q.f0, _q.f, omegag, idx);
// Save macro if need
if (_issave) {
_rho[idx] = rho;
_ux[idx] = ux;
_uy[idx] = uy;
_uz[idx] = uz;
_tem[idx] = tem;
_qx[idx] = qx;
_qy[idx] = qy;
_qz[idx] = qz;
if (_g) {
int offsetf = Q<T>::nc*idx;
_g[offsetf] = _q.f0[idx];
for (int c = 1; c < Q<T>::nc; ++c) {
_g[offsetf + c] = _q.f[Q<T>::IndexF(idx, c)];
}
}
}
// Collide
NS::Equilibrium<T, P>(feq, rho, ux, uy, uz);
_p.f0[idx] = iomegaf*_p.f0[idx] + omegaf*feq[0];
for (int c = 1; c < P<T>::nc; ++c) {
int idxf = P<T>::IndexF(idx, c);
_p.f[idxf] = iomegaf*_p.f[idxf] + omegaf*feq[c];
}
Equilibrium<T, Q>(geq, tem, ux, uy, uz);
_q.f0[idx] = iomegag*_q.f0[idx] + omegag*geq[0];
for (int c = 1; c < Q<T>::nc; ++c) {
int idxf = Q<T>::IndexF(idx, c);
_q.f[idxf] = iomegag*_q.f[idxf] + omegag*geq[c];
}
}
}
// Function of setting initial condition of AD for 2D
template<class T, template<class>class Q>
void InitialCondition(Q<T>& _q, const T *_tem, const T *_ux, const T *_uy) {
T geq[Q<T>::nc];
for (int idx = 0; idx < _q.nxyz; ++idx) {
Equilibrium<T, Q>(geq, _tem[idx], _ux[idx], _uy[idx]);
_q.f0[idx] = geq[0];
for (int c = 1; c < Q<T>::nc; ++c) {
_q.f[Q<T>::IndexF(idx, c)] = geq[c];
}
}
}
// Function of setting initial condition of AD for 3D
template<class T, template<class>class Q>
void InitialCondition(Q<T>& _q, const T *_tem, const T *_ux, const T *_uy, const T *_uz) {
T geq[Q<T>::nc];
for (int idx = 0; idx < _q.nxyz; ++idx) {
Equilibrium<T, Q>(geq, _tem[idx], _ux[idx], _uy[idx], _uz[idx]);
_q.f0[idx] = geq[0];
for (int c = 1; c < Q<T>::nc; ++c) {
_q.f[Q<T>::IndexF(idx, c)] = geq[c];
}
}
}
// Function of setting boundary condition set T of AD for D2Q9
template<class T, template<class>class Q, class Fv, class Ff>
void BoundaryConditionSetT(Q<T>& _q, Fv _tembc, const T *_ux, const T *_uy, Ff _bctype) {
// On xmin
if (_q.PEx == 0) {
for (int j = 0; j < _q.ny; ++j) {
if (_bctype(0 + _q.offsetx, j + _q.offsety)) {
int idx = _q.Index(0, j);
T tem0 = 6.0*(_tembc(0 + _q.offsetx, j + _q.offsety) - _q.f0[idx] - _q.f[Q<T>::IndexF(idx, 2)] - _q.f[Q<T>::IndexF(idx, 3)] - _q.f[Q<T>::IndexF(idx, 4)] - _q.f[Q<T>::IndexF(idx, 6)] - _q.f[Q<T>::IndexF(idx, 7)])/(1.0 + 3.0*_ux[idx]);
_q.f[Q<T>::IndexF(idx, 1)] = tem0*(1.0 + 3.0*_ux[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 5)] = tem0*(1.0 + 3.0*_ux[idx] + 3.0*_uy[idx])/36.0;
_q.f[Q<T>::IndexF(idx, 8)] = tem0*(1.0 + 3.0*_ux[idx] - 3.0*_uy[idx])/36.0;
}
}
}
// On xmax
if (_q.PEx == _q.mx - 1) {
for (int j = 0; j < _q.ny; ++j) {
if (_bctype((_q.nx - 1) + _q.offsetx, j + _q.offsety)) {
int idx = _q.Index(_q.nx - 1, j);
T tem0 = 6.0*(_tembc((_q.nx - 1) + _q.offsetx, j + _q.offsety) - _q.f0[idx] - _q.f[Q<T>::IndexF(idx, 1)] - _q.f[Q<T>::IndexF(idx, 2)] - _q.f[Q<T>::IndexF(idx, 4)] - _q.f[Q<T>::IndexF(idx, 5)] - _q.f[Q<T>::IndexF(idx, 8)])/(1.0 - 3.0*_ux[idx]);
_q.f[Q<T>::IndexF(idx, 3)] = tem0*(1.0 - 3.0*_ux[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 6)] = tem0*(1.0 - 3.0*_ux[idx] + 3.0*_uy[idx])/36.0;
_q.f[Q<T>::IndexF(idx, 7)] = tem0*(1.0 - 3.0*_ux[idx] - 3.0*_uy[idx])/36.0;
}
}
}
// On ymin
if (_q.PEy == 0) {
for (int i = 0; i < _q.nx; ++i) {
if (_bctype(i + _q.offsetx, 0 + _q.offsety)) {
int idx = _q.Index(i, 0);
T tem0 = 6.0*(_tembc(i + _q.offsetx, 0 + _q.offsety) - _q.f0[idx] - _q.f[Q<T>::IndexF(idx, 1)] - _q.f[Q<T>::IndexF(idx, 3)] - _q.f[Q<T>::IndexF(idx, 4)] - _q.f[Q<T>::IndexF(idx, 7)] - _q.f[Q<T>::IndexF(idx, 8)])/(1.0 + 3.0*_uy[idx]);
_q.f[Q<T>::IndexF(idx, 2)] = tem0*(1.0 + 3.0*_uy[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 5)] = tem0*(1.0 + 3.0*_ux[idx] + 3.0*_uy[idx])/36.0;
_q.f[Q<T>::IndexF(idx, 6)] = tem0*(1.0 - 3.0*_ux[idx] + 3.0*_uy[idx])/36.0;
}
}
}
// On ymax
if (_q.PEy == _q.my - 1) {
for (int i = 0; i < _q.nx; ++i) {
if (_bctype(i + _q.offsetx, (_q.ny - 1) + _q.offsety)) {
int idx = _q.Index(i, _q.ny - 1);
T tem0 = 6.0*(_tembc(i + _q.offsetx, (_q.ny - 1) + _q.offsety) - _q.f0[idx] - _q.f[Q<T>::IndexF(idx, 1)] - _q.f[Q<T>::IndexF(idx, 2)] - _q.f[Q<T>::IndexF(idx, 3)] - _q.f[Q<T>::IndexF(idx, 5)] - _q.f[Q<T>::IndexF(idx, 6)])/(1.0 - 3.0*_uy[idx]);
_q.f[Q<T>::IndexF(idx, 4)] = tem0*(1.0 - 3.0*_uy[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 7)] = tem0*(1.0 - 3.0*_ux[idx] - 3.0*_uy[idx])/36.0;
_q.f[Q<T>::IndexF(idx, 8)] = tem0*(1.0 + 3.0*_ux[idx] - 3.0*_uy[idx])/36.0;
}
}
}
}
// Function of setting boundary condition set T of AD for D3Q15
template<class T, template<class>class Q, class Fv, class Ff>
void BoundaryConditionSetT(Q<T>& _q, Fv _tembc, const T *_ux, const T *_uy, const T *_uz, Ff _bctype) {
// On xmin
if (_q.PEx == 0) {
for (int j = 0; j < _q.ny; ++j) {
for (int k = 0; k < _q.nz; ++k) {
if (_bctype(0 + _q.offsetx, j + _q.offsety, k + _q.offsetz)) {
int idx = _q.Index(0, j, k);
T tem0 = 6.0*(_tembc(0 + _q.offsetx, j + _q.offsety, k + _q.offsetz) - _q.f0[idx] - _q.f[Q<T>::IndexF(idx, 2)] - _q.f[Q<T>::IndexF(idx, 3)] - _q.f[Q<T>::IndexF(idx, 4)] - _q.f[Q<T>::IndexF(idx, 5)] - _q.f[Q<T>::IndexF(idx, 6)] - _q.f[Q<T>::IndexF(idx, 8)] - _q.f[Q<T>::IndexF(idx, 11)] - _q.f[Q<T>::IndexF(idx, 13)] - _q.f[Q<T>::IndexF(idx, 14)])/(1.0 + 3.0*_ux[idx]);
_q.f[Q<T>::IndexF(idx, 1)] = tem0*(1.0 + 3.0*_ux[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 7)] = tem0*(1.0 + 3.0*_ux[idx] + 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 9)] = tem0*(1.0 + 3.0*_ux[idx] - 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 10)] = tem0*(1.0 + 3.0*_ux[idx] + 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 12)] = tem0*(1.0 + 3.0*_ux[idx] - 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
}
}
}
}
// On xmax
if (_q.PEx == _q.mx - 1) {
for (int j = 0; j < _q.ny; ++j) {
for (int k = 0; k < _q.nz; ++k) {
if (_bctype((_q.nx - 1) + _q.offsetx, j + _q.offsety, k + _q.offsetz)) {
int idx = _q.Index(_q.nx - 1, j, k);
T tem0 = 6.0*(_tembc((_q.nx - 1) + _q.offsetx, j + _q.offsety, k + _q.offsetz) - _q.f0[idx] - _q.f[Q<T>::IndexF(idx, 1)] - _q.f[Q<T>::IndexF(idx, 2)] - _q.f[Q<T>::IndexF(idx, 3)] - _q.f[Q<T>::IndexF(idx, 5)] - _q.f[Q<T>::IndexF(idx, 6)] - _q.f[Q<T>::IndexF(idx, 7)] - _q.f[Q<T>::IndexF(idx, 9)] - _q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 12)])/(1.0 - 3.0*_ux[idx]);
_q.f[Q<T>::IndexF(idx, 4)] = tem0*(1.0 - 3.0*_ux[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 8)] = tem0*(1.0 - 3.0*_ux[idx] + 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 11)] = tem0*(1.0 - 3.0*_ux[idx] - 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 13)] = tem0*(1.0 - 3.0*_ux[idx] + 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 14)] = tem0*(1.0 - 3.0*_ux[idx] - 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
}
}
}
}
// On ymin
if (_q.PEy == 0) {
for (int k = 0; k < _q.nz; ++k) {
for (int i = 0; i < _q.nx; ++i) {
if (_bctype(i + _q.offsetx, 0 + _q.offsety, k + _q.offsetz)) {
int idx = _q.Index(i, 0, k);
T tem0 = 6.0*(_tembc(i + _q.offsetx, 0 + _q.offsety, k + _q.offsetz) - _q.f0[idx] - _q.f[Q<T>::IndexF(idx, 1)] - _q.f[Q<T>::IndexF(idx, 3)] - _q.f[Q<T>::IndexF(idx, 4)] - _q.f[Q<T>::IndexF(idx, 5)] - _q.f[Q<T>::IndexF(idx, 6)] - _q.f[Q<T>::IndexF(idx, 9)] - _q.f[Q<T>::IndexF(idx, 11)] - _q.f[Q<T>::IndexF(idx, 12)] - _q.f[Q<T>::IndexF(idx, 14)])/(1.0 + 3.0*_uy[idx]);
_q.f[Q<T>::IndexF(idx, 2)] = tem0*(1.0 + 3.0*_uy[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 7)] = tem0*(1.0 + 3.0*_ux[idx] + 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 8)] = tem0*(1.0 - 3.0*_ux[idx] + 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 10)] = tem0*(1.0 + 3.0*_ux[idx] + 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 13)] = tem0*(1.0 - 3.0*_ux[idx] + 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
}
}
}
}
// On ymax
if (_q.PEy == _q.my - 1) {
for (int k = 0; k < _q.nz; ++k) {
for (int i = 0; i < _q.nx; ++i) {
if (_bctype(i + _q.offsetx, (_q.ny - 1) + _q.offsety, k + _q.offsetz)) {
int idx = _q.Index(i, _q.ny - 1, k);
T tem0 = 6.0*(_tembc(i + _q.offsetx, (_q.ny - 1) + _q.offsety, k + _q.offsetz) - _q.f0[idx] - _q.f[Q<T>::IndexF(idx, 1)] - _q.f[Q<T>::IndexF(idx, 2)] - _q.f[Q<T>::IndexF(idx, 3)] - _q.f[Q<T>::IndexF(idx, 4)] - _q.f[Q<T>::IndexF(idx, 6)] - _q.f[Q<T>::IndexF(idx, 7)] - _q.f[Q<T>::IndexF(idx, 8)] - _q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 13)])/(1.0 - 3.0*_uy[idx]);
_q.f[Q<T>::IndexF(idx, 5)] = tem0*(1.0 - 3.0*_uy[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 9)] = tem0*(1.0 + 3.0*_ux[idx] - 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 11)] = tem0*(1.0 - 3.0*_ux[idx] - 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 12)] = tem0*(1.0 + 3.0*_ux[idx] - 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 14)] = tem0*(1.0 - 3.0*_ux[idx] - 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
}
}
}
}
// On zmin
if (_q.PEz == 0) {
for (int i = 0; i < _q.nx; ++i) {
for (int j = 0; j < _q.ny; ++j) {
if (_bctype(i + _q.offsetx, j + _q.offsety, 0 + _q.offsetz)) {
int idx = _q.Index(i, j, 0);
T tem0 = 6.0*(_tembc(i + _q.offsetx, j + _q.offsety, 0 + _q.offsetz) - _q.f0[idx] - _q.f[Q<T>::IndexF(idx, 1)] - _q.f[Q<T>::IndexF(idx, 2)] - _q.f[Q<T>::IndexF(idx, 4)] - _q.f[Q<T>::IndexF(idx, 5)] - _q.f[Q<T>::IndexF(idx, 6)] - _q.f[Q<T>::IndexF(idx, 10)] - _q.f[Q<T>::IndexF(idx, 11)] - _q.f[Q<T>::IndexF(idx, 12)] - _q.f[Q<T>::IndexF(idx, 13)])/(1.0 + 3.0*_uz[idx]);
_q.f[Q<T>::IndexF(idx, 3)] = tem0*(1.0 + 3.0*_uz[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 7)] = tem0*(1.0 + 3.0*_ux[idx] + 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 8)] = tem0*(1.0 - 3.0*_ux[idx] + 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 9)] = tem0*(1.0 + 3.0*_ux[idx] - 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 14)] = tem0*(1.0 - 3.0*_ux[idx] - 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
}
}
}
}
// On zmax
if (_q.PEz == _q.mz - 1) {
for (int i = 0; i < _q.nx; ++i) {
for (int j = 0; j < _q.ny; ++j) {
if (_bctype(i + _q.offsetx, j + _q.offsety, (_q.nz - 1) + _q.offsetz)) {
int idx = _q.Index(i, j, _q.nz - 1);
T tem0 = 6.0*(_tembc(i + _q.offsetx, j + _q.offsety, (_q.nz - 1) + _q.offsetz) - _q.f0[idx] - _q.f[Q<T>::IndexF(idx, 1)] - _q.f[Q<T>::IndexF(idx, 2)] - _q.f[Q<T>::IndexF(idx, 3)] - _q.f[Q<T>::IndexF(idx, 4)] - _q.f[Q<T>::IndexF(idx, 5)] - _q.f[Q<T>::IndexF(idx, 7)] - _q.f[Q<T>::IndexF(idx, 8)] - _q.f[Q<T>::IndexF(idx, 9)] - _q.f[Q<T>::IndexF(idx, 14)])/(1.0 - 3.0*_uz[idx]);
_q.f[Q<T>::IndexF(idx, 6)] = tem0*(1.0 - 3.0*_uz[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 10)] = tem0*(1.0 + 3.0*_ux[idx] + 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 11)] = tem0*(1.0 - 3.0*_ux[idx] - 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 12)] = tem0*(1.0 + 3.0*_ux[idx] - 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 13)] = tem0*(1.0 - 3.0*_ux[idx] + 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
}
}
}
}
}
// Function of setting boundary condition set q of AD for D2Q9 (diffusivity constant)
template<class T, template<class>class Q, class Fv, class Ff>
void BoundaryConditionSetQ(Q<T>& _q, Fv _qnbc, const T *_ux, const T *_uy, T _diffusivity, Ff _bctype) {
// On xmin
if (_q.PEx == 0) {
for (int j = 0; j < _q.ny; ++j) {
if (_bctype(0 + _q.offsetx, j + _q.offsety)) {
int idx = _q.Index(0, j);
T tem0 = 6.0*((1.0 + 1.0/(6.0*_diffusivity))*_qnbc(0 + _q.offsetx, j + _q.offsety) + _q.f[Q<T>::IndexF(idx, 3)] + _q.f[Q<T>::IndexF(idx, 6)] + _q.f[Q<T>::IndexF(idx, 7)])/(1.0 - 3.0*_ux[idx]);
_q.f[Q<T>::IndexF(idx, 1)] = tem0*(1.0 + 3.0*_ux[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 5)] = tem0*(1.0 + 3.0*_ux[idx] + 3.0*_uy[idx])/36.0;
_q.f[Q<T>::IndexF(idx, 8)] = tem0*(1.0 + 3.0*_ux[idx] - 3.0*_uy[idx])/36.0;
}
}
}
// On xmax
if (_q.PEx == _q.mx - 1) {
for (int j = 0; j < _q.ny; ++j) {
if (_bctype((_q.nx - 1) + _q.offsetx, j + _q.offsety)) {
int idx = _q.Index(_q.nx - 1, j);
T tem0 = 6.0*((1.0 + 1.0/(6.0*_diffusivity))*_qnbc((_q.nx - 1) + _q.offsetx, j + _q.offsety) + _q.f[Q<T>::IndexF(idx, 1)] + _q.f[Q<T>::IndexF(idx, 5)] + _q.f[Q<T>::IndexF(idx, 8)])/(1.0 + 3.0*_ux[idx]);
_q.f[Q<T>::IndexF(idx, 3)] = tem0*(1.0 - 3.0*_ux[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 6)] = tem0*(1.0 - 3.0*_ux[idx] + 3.0*_uy[idx])/36.0;
_q.f[Q<T>::IndexF(idx, 7)] = tem0*(1.0 - 3.0*_ux[idx] - 3.0*_uy[idx])/36.0;
}
}
}
// On ymin
if (_q.PEy == 0) {
for (int i = 0; i < _q.nx; ++i) {
if (_bctype(i + _q.offsetx, 0 + _q.offsety)) {
int idx = _q.Index(i, 0);
T tem0 = 6.0*((1.0 + 1.0/(6.0*_diffusivity))*_qnbc(i + _q.offsetx, 0 + _q.offsety) + _q.f[Q<T>::IndexF(idx, 4)] + _q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 8)])/(1.0 - 3.0*_uy[idx]);
_q.f[Q<T>::IndexF(idx, 2)] = tem0*(1.0 + 3.0*_uy[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 5)] = tem0*(1.0 + 3.0*_ux[idx] + 3.0*_uy[idx])/36.0;
_q.f[Q<T>::IndexF(idx, 6)] = tem0*(1.0 - 3.0*_ux[idx] + 3.0*_uy[idx])/36.0;
}
}
}
// On ymax
if (_q.PEy == _q.my - 1) {
for (int i = 0; i < _q.nx; ++i) {
if (_bctype(i + _q.offsetx, (_q.ny - 1) + _q.offsety)) {
int idx = _q.Index(i, _q.ny - 1);
T tem0 = 6.0*((1.0 + 1.0/(6.0*_diffusivity))*_qnbc(i + _q.offsetx, (_q.ny - 1) + _q.offsety) + _q.f[Q<T>::IndexF(idx, 2)] + _q.f[Q<T>::IndexF(idx, 5)] + _q.f[Q<T>::IndexF(idx, 6)])/(1.0 + 3.0*_uy[idx]);
_q.f[Q<T>::IndexF(idx, 4)] = tem0*(1.0 - 3.0*_uy[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 7)] = tem0*(1.0 - 3.0*_ux[idx] - 3.0*_uy[idx])/36.0;
_q.f[Q<T>::IndexF(idx, 8)] = tem0*(1.0 + 3.0*_ux[idx] - 3.0*_uy[idx])/36.0;
}
}
}
}
// Function of setting boundary condition set q of AD for D3Q15 (diffusivity constant)
template<class T, template<class>class Q, class Fv, class Ff>
void BoundaryConditionSetQ(Q<T>& _q, Fv _qnbc, const T *_ux, const T *_uy, const T *_uz, T _diffusivity, Ff _bctype) {
// On xmin
if (_q.PEx == 0) {
for (int j = 0; j < _q.ny; ++j) {
for (int k = 0; k < _q.nz; ++k) {
if (_bctype(0 + _q.offsetx, j + _q.offsety, k + _q.offsetz)) {
int idx = _q.Index(0, j, k);
T tem0 = 6.0*((1.0 + 1.0/(6.0*_diffusivity))*_qnbc(0 + _q.offsetx, j + _q.offsety, k + _q.offsetz) + _q.f[Q<T>::IndexF(idx, 4)] + _q.f[Q<T>::IndexF(idx, 8)] + _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 13)] + _q.f[Q<T>::IndexF(idx, 14)])/(1.0 - 3.0*_ux[idx]);
_q.f[Q<T>::IndexF(idx, 1)] = tem0*(1.0 + 3.0*_ux[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 7)] = tem0*(1.0 + 3.0*_ux[idx] + 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 9)] = tem0*(1.0 + 3.0*_ux[idx] - 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 10)] = tem0*(1.0 + 3.0*_ux[idx] + 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 12)] = tem0*(1.0 + 3.0*_ux[idx] - 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
}
}
}
}
// On xmax
if (_q.PEx == _q.mx - 1) {
for (int j = 0; j < _q.ny; ++j) {
for (int k = 0; k < _q.nz; ++k) {
if (_bctype((_q.nx - 1) + _q.offsetx, j + _q.offsety, k + _q.offsetz)) {
int idx = _q.Index(_q.nx - 1, j, k);
T tem0 = 6.0*((1.0 + 1.0/(6.0*_diffusivity))*_qnbc((_q.nx - 1) + _q.offsetx, j + _q.offsety, k + _q.offsetz) + _q.f[Q<T>::IndexF(idx, 1)] + _q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 9)] + _q.f[Q<T>::IndexF(idx, 10)] + _q.f[Q<T>::IndexF(idx, 12)])/(1.0 + 3.0*_ux[idx]);
_q.f[Q<T>::IndexF(idx, 4)] = tem0*(1.0 - 3.0*_ux[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 8)] = tem0*(1.0 - 3.0*_ux[idx] + 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 11)] = tem0*(1.0 - 3.0*_ux[idx] - 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 13)] = tem0*(1.0 - 3.0*_ux[idx] + 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 14)] = tem0*(1.0 - 3.0*_ux[idx] - 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
}
}
}
}
// On ymin
if (_q.PEy == 0) {
for (int k = 0; k < _q.nz; ++k) {
for (int i = 0; i < _q.nx; ++i) {
if (_bctype(i + _q.offsetx, 0 + _q.offsety, k + _q.offsetz)) {
int idx = _q.Index(i, 0, k);
T tem0 = 6.0*((1.0 + 1.0/(6.0*_diffusivity))*_qnbc(i + _q.offsetx, 0 + _q.offsety, k + _q.offsetz) + _q.f[Q<T>::IndexF(idx, 5)] + _q.f[Q<T>::IndexF(idx, 9)] + _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 12)] + _q.f[Q<T>::IndexF(idx, 14)])/(1.0 - 3.0*_uy[idx]);
_q.f[Q<T>::IndexF(idx, 2)] = tem0*(1.0 + 3.0*_uy[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 7)] = tem0*(1.0 + 3.0*_ux[idx] + 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 8)] = tem0*(1.0 - 3.0*_ux[idx] + 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 10)] = tem0*(1.0 + 3.0*_ux[idx] + 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 13)] = tem0*(1.0 - 3.0*_ux[idx] + 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
}
}
}
}
// On ymax
if (_q.PEy == _q.my - 1) {
for (int k = 0; k < _q.nz; ++k) {
for (int i = 0; i < _q.nx; ++i) {
if (_bctype(i + _q.offsetx, (_q.ny - 1) + _q.offsety, k + _q.offsetz)) {
int idx = _q.Index(i, _q.ny - 1, k);
T tem0 = 6.0*((1.0 + 1.0/(6.0*_diffusivity))*_qnbc(i + _q.offsetx, (_q.ny - 1) + _q.offsety, k + _q.offsetz) + _q.f[Q<T>::IndexF(idx, 2)] + _q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 8)] + _q.f[Q<T>::IndexF(idx, 10)] + _q.f[Q<T>::IndexF(idx, 13)])/(1.0 + 3.0*_uy[idx]);
_q.f[Q<T>::IndexF(idx, 5)] = tem0*(1.0 - 3.0*_uy[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 9)] = tem0*(1.0 + 3.0*_ux[idx] - 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 11)] = tem0*(1.0 - 3.0*_ux[idx] - 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 12)] = tem0*(1.0 + 3.0*_ux[idx] - 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 14)] = tem0*(1.0 - 3.0*_ux[idx] - 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
}
}
}
}
// On zmin
if (_q.PEz == 0) {
for (int i = 0; i < _q.nx; ++i) {
for (int j = 0; j < _q.ny; ++j) {
if (_bctype(i + _q.offsetx, j + _q.offsety, 0 + _q.offsetz)) {
int idx = _q.Index(i, j, 0);
T tem0 = 6.0*((1.0 + 1.0/(6.0*_diffusivity))*_qnbc(i + _q.offsetx, j + _q.offsety, 0 + _q.offsetz) + _q.f[Q<T>::IndexF(idx, 6)] + _q.f[Q<T>::IndexF(idx, 10)] + _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 12)] + _q.f[Q<T>::IndexF(idx, 13)])/(1.0 - 3.0*_uz[idx]);
_q.f[Q<T>::IndexF(idx, 3)] = tem0*(1.0 + 3.0*_uz[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 7)] = tem0*(1.0 + 3.0*_ux[idx] + 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 8)] = tem0*(1.0 - 3.0*_ux[idx] + 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 9)] = tem0*(1.0 + 3.0*_ux[idx] - 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 14)] = tem0*(1.0 - 3.0*_ux[idx] - 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
}
}
}
}
// On zmax
if (_q.PEz == _q.mz - 1) {
for (int i = 0; i < _q.nx; ++i) {
for (int j = 0; j < _q.ny; ++j) {
if (_bctype(i + _q.offsetx, j + _q.offsety, (_q.nz - 1) + _q.offsetz)) {
int idx = _q.Index(i, j, _q.nz - 1);
T tem0 = 6.0*((1.0 + 1.0/(6.0*_diffusivity))*_qnbc(i + _q.offsetx, j + _q.offsety, (_q.nz - 1) + _q.offsetz) + _q.f[Q<T>::IndexF(idx, 3)] + _q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 8)] + _q.f[Q<T>::IndexF(idx, 9)] + _q.f[Q<T>::IndexF(idx, 14)])/(1.0 + 3.0*_uz[idx]);
_q.f[Q<T>::IndexF(idx, 6)] = tem0*(1.0 - 3.0*_uz[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 10)] = tem0*(1.0 + 3.0*_ux[idx] + 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 11)] = tem0*(1.0 - 3.0*_ux[idx] - 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 12)] = tem0*(1.0 + 3.0*_ux[idx] - 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 13)] = tem0*(1.0 - 3.0*_ux[idx] + 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
}
}
}
}
}
// Function of setting boundary condition set q of AD for D2Q9 (diffusivity heterogenious)
template<class T, template<class>class Q, class Fv, class Ff>
void BoundaryConditionSetQ(Q<T>& _q, Fv _qnbc, const T *_ux, const T *_uy, const T *_diffusivity, Ff _bctype) {
// On xmin
if (_q.PEx == 0) {
for (int j = 0; j < _q.ny; ++j) {
if (_bctype(0 + _q.offsetx, j + _q.offsety)) {
int idx = _q.Index(0, j);
T tem0 = 6.0*((1.0 + 1.0/(6.0*_diffusivity[idx]))*_qnbc(0 + _q.offsetx, j + _q.offsety) + _q.f[Q<T>::IndexF(idx, 3)] + _q.f[Q<T>::IndexF(idx, 6)] + _q.f[Q<T>::IndexF(idx, 7)])/(1.0 - 3.0*_ux[idx]);
_q.f[Q<T>::IndexF(idx, 1)] = tem0*(1.0 + 3.0*_ux[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 5)] = tem0*(1.0 + 3.0*_ux[idx] + 3.0*_uy[idx])/36.0;
_q.f[Q<T>::IndexF(idx, 8)] = tem0*(1.0 + 3.0*_ux[idx] - 3.0*_uy[idx])/36.0;
}
}
}
// On xmax
if (_q.PEx == _q.mx - 1) {
for (int j = 0; j < _q.ny; ++j) {
if (_bctype((_q.nx - 1) + _q.offsetx, j + _q.offsety)) {
int idx = _q.Index(_q.nx - 1, j);
T tem0 = 6.0*((1.0 + 1.0/(6.0*_diffusivity[idx]))*_qnbc((_q.nx - 1) + _q.offsetx, j + _q.offsety) + _q.f[Q<T>::IndexF(idx, 1)] + _q.f[Q<T>::IndexF(idx, 5)] + _q.f[Q<T>::IndexF(idx, 8)])/(1.0 + 3.0*_ux[idx]);
_q.f[Q<T>::IndexF(idx, 3)] = tem0*(1.0 - 3.0*_ux[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 6)] = tem0*(1.0 - 3.0*_ux[idx] + 3.0*_uy[idx])/36.0;
_q.f[Q<T>::IndexF(idx, 7)] = tem0*(1.0 - 3.0*_ux[idx] - 3.0*_uy[idx])/36.0;
}
}
}
// On ymin
if (_q.PEy == 0) {
for (int i = 0; i < _q.nx; ++i) {
if (_bctype(i + _q.offsetx, 0 + _q.offsety)) {
int idx = _q.Index(i, 0);
T tem0 = 6.0*((1.0 + 1.0/(6.0*_diffusivity[idx]))*_qnbc(i + _q.offsetx, 0 + _q.offsety) + _q.f[Q<T>::IndexF(idx, 4)] + _q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 8)])/(1.0 - 3.0*_uy[idx]);
_q.f[Q<T>::IndexF(idx, 2)] = tem0*(1.0 + 3.0*_uy[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 5)] = tem0*(1.0 + 3.0*_ux[idx] + 3.0*_uy[idx])/36.0;
_q.f[Q<T>::IndexF(idx, 6)] = tem0*(1.0 - 3.0*_ux[idx] + 3.0*_uy[idx])/36.0;
}
}
}
// On ymax
if (_q.PEy == _q.my - 1) {
for (int i = 0; i < _q.nx; ++i) {
if (_bctype(i + _q.offsetx, (_q.ny - 1) + _q.offsety)) {
int idx = _q.Index(i, _q.ny - 1);
T tem0 = 6.0*((1.0 + 1.0/(6.0*_diffusivity[idx]))*_qnbc(i + _q.offsetx, (_q.ny - 1) + _q.offsety) + _q.f[Q<T>::IndexF(idx, 2)] + _q.f[Q<T>::IndexF(idx, 5)] + _q.f[Q<T>::IndexF(idx, 6)])/(1.0 + 3.0*_uy[idx]);
_q.f[Q<T>::IndexF(idx, 4)] = tem0*(1.0 - 3.0*_uy[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 7)] = tem0*(1.0 - 3.0*_ux[idx] - 3.0*_uy[idx])/36.0;
_q.f[Q<T>::IndexF(idx, 8)] = tem0*(1.0 + 3.0*_ux[idx] - 3.0*_uy[idx])/36.0;
}
}
}
}
// Function of setting boundary condition set q of AD for D3Q15 (diffusivity heterogenious)
template<class T, template<class>class Q, class Fv, class Ff>
void BoundaryConditionSetQ(Q<T>& _q, Fv _qnbc, const T *_ux, const T *_uy, const T *_uz, const T *_diffusivity, Ff _bctype) {
// On xmin
if (_q.PEx == 0) {
for (int j = 0; j < _q.ny; ++j) {
for (int k = 0; k < _q.nz; ++k) {
if (_bctype(0 + _q.offsetx, j + _q.offsety, k + _q.offsetz)) {
int idx = _q.Index(0, j, k);
T tem0 = 6.0*((1.0 + 1.0/(6.0*_diffusivity[idx]))*_qnbc(0 + _q.offsetx, j + _q.offsety, k + _q.offsetz) + _q.f[Q<T>::IndexF(idx, 4)] + _q.f[Q<T>::IndexF(idx, 8)] + _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 13)] + _q.f[Q<T>::IndexF(idx, 14)])/(1.0 - 3.0*_ux[idx]);
_q.f[Q<T>::IndexF(idx, 1)] = tem0*(1.0 + 3.0*_ux[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 7)] = tem0*(1.0 + 3.0*_ux[idx] + 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 9)] = tem0*(1.0 + 3.0*_ux[idx] - 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 10)] = tem0*(1.0 + 3.0*_ux[idx] + 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 12)] = tem0*(1.0 + 3.0*_ux[idx] - 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
}
}
}
}
// On xmax
if (_q.PEx == _q.mx - 1) {
for (int j = 0; j < _q.ny; ++j) {
for (int k = 0; k < _q.nz; ++k) {
if (_bctype((_q.nx - 1) + _q.offsetx, j + _q.offsety, k + _q.offsetz)) {
int idx = _q.Index(_q.nx - 1, j, k);
T tem0 = 6.0*((1.0 + 1.0/(6.0*_diffusivity[idx]))*_qnbc((_q.nx - 1) + _q.offsetx, j + _q.offsety, k + _q.offsetz) + _q.f[Q<T>::IndexF(idx, 1)] + _q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 9)] + _q.f[Q<T>::IndexF(idx, 10)] + _q.f[Q<T>::IndexF(idx, 12)])/(1.0 + 3.0*_ux[idx]);
_q.f[Q<T>::IndexF(idx, 4)] = tem0*(1.0 - 3.0*_ux[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 8)] = tem0*(1.0 - 3.0*_ux[idx] + 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 11)] = tem0*(1.0 - 3.0*_ux[idx] - 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 13)] = tem0*(1.0 - 3.0*_ux[idx] + 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 14)] = tem0*(1.0 - 3.0*_ux[idx] - 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
}
}
}
}
// On ymin
if (_q.PEy == 0) {
for (int k = 0; k < _q.nz; ++k) {
for (int i = 0; i < _q.nx; ++i) {
if (_bctype(i + _q.offsetx, 0 + _q.offsety, k + _q.offsetz)) {
int idx = _q.Index(i, 0, k);
T tem0 = 6.0*((1.0 + 1.0/(6.0*_diffusivity[idx]))*_qnbc(i + _q.offsetx, 0 + _q.offsety, k + _q.offsetz) + _q.f[Q<T>::IndexF(idx, 5)] + _q.f[Q<T>::IndexF(idx, 9)] + _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 12)] + _q.f[Q<T>::IndexF(idx, 14)])/(1.0 - 3.0*_uy[idx]);
_q.f[Q<T>::IndexF(idx, 2)] = tem0*(1.0 + 3.0*_uy[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 7)] = tem0*(1.0 + 3.0*_ux[idx] + 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 8)] = tem0*(1.0 - 3.0*_ux[idx] + 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 10)] = tem0*(1.0 + 3.0*_ux[idx] + 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 13)] = tem0*(1.0 - 3.0*_ux[idx] + 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
}
}
}
}
// On ymax
if (_q.PEy == _q.my - 1) {
for (int k = 0; k < _q.nz; ++k) {
for (int i = 0; i < _q.nx; ++i) {
if (_bctype(i + _q.offsetx, (_q.ny - 1) + _q.offsety, k + _q.offsetz)) {
int idx = _q.Index(i, _q.ny - 1, k);
T tem0 = 6.0*((1.0 + 1.0/(6.0*_diffusivity[idx]))*_qnbc(i + _q.offsetx, (_q.ny - 1) + _q.offsety, k + _q.offsetz) + _q.f[Q<T>::IndexF(idx, 2)] + _q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 8)] + _q.f[Q<T>::IndexF(idx, 10)] + _q.f[Q<T>::IndexF(idx, 13)])/(1.0 + 3.0*_uy[idx]);
_q.f[Q<T>::IndexF(idx, 5)] = tem0*(1.0 - 3.0*_uy[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 9)] = tem0*(1.0 + 3.0*_ux[idx] - 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 11)] = tem0*(1.0 - 3.0*_ux[idx] - 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 12)] = tem0*(1.0 + 3.0*_ux[idx] - 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 14)] = tem0*(1.0 - 3.0*_ux[idx] - 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
}
}
}
}
// On zmin
if (_q.PEz == 0) {
for (int i = 0; i < _q.nx; ++i) {
for (int j = 0; j < _q.ny; ++j) {
if (_bctype(i + _q.offsetx, j + _q.offsety, 0 + _q.offsetz)) {
int idx = _q.Index(i, j, 0);
T tem0 = 6.0*((1.0 + 1.0/(6.0*_diffusivity[idx]))*_qnbc(i + _q.offsetx, j + _q.offsety, 0 + _q.offsetz) + _q.f[Q<T>::IndexF(idx, 6)] + _q.f[Q<T>::IndexF(idx, 10)] + _q.f[Q<T>::IndexF(idx, 11)] + _q.f[Q<T>::IndexF(idx, 12)] + _q.f[Q<T>::IndexF(idx, 13)])/(1.0 - 3.0*_uz[idx]);
_q.f[Q<T>::IndexF(idx, 3)] = tem0*(1.0 + 3.0*_uz[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 7)] = tem0*(1.0 + 3.0*_ux[idx] + 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 8)] = tem0*(1.0 - 3.0*_ux[idx] + 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 9)] = tem0*(1.0 + 3.0*_ux[idx] - 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 14)] = tem0*(1.0 - 3.0*_ux[idx] - 3.0*_uy[idx] + 3.0*_uz[idx])/72.0;
}
}
}
}
// On zmax
if (_q.PEz == _q.mz - 1) {
for (int i = 0; i < _q.nx; ++i) {
for (int j = 0; j < _q.ny; ++j) {
if (_bctype(i + _q.offsetx, j + _q.offsety, (_q.nz - 1) + _q.offsetz)) {
int idx = _q.Index(i, j, _q.nz - 1);
T tem0 = 6.0*((1.0 + 1.0/(6.0*_diffusivity[idx]))*_qnbc(i + _q.offsetx, j + _q.offsety, (_q.nz - 1) + _q.offsetz) + _q.f[Q<T>::IndexF(idx, 3)] + _q.f[Q<T>::IndexF(idx, 7)] + _q.f[Q<T>::IndexF(idx, 8)] + _q.f[Q<T>::IndexF(idx, 9)] + _q.f[Q<T>::IndexF(idx, 14)])/(1.0 + 3.0*_uz[idx]);
_q.f[Q<T>::IndexF(idx, 6)] = tem0*(1.0 - 3.0*_uz[idx])/9.0;
_q.f[Q<T>::IndexF(idx, 10)] = tem0*(1.0 + 3.0*_ux[idx] + 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 11)] = tem0*(1.0 - 3.0*_ux[idx] - 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 12)] = tem0*(1.0 + 3.0*_ux[idx] - 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
_q.f[Q<T>::IndexF(idx, 13)] = tem0*(1.0 - 3.0*_ux[idx] + 3.0*_uy[idx] - 3.0*_uz[idx])/72.0;
}
}
}
}
}
}
} |
omp_mm_compute.c | #include <omp.h>
#include "omp_mm.h"
void compute() {
#pragma omp parallel for shared( a, b, c) private(i, j, k)
for (i = 0; i < SIZE; i++) {
for (j = 0; j < SIZE; j++) {
for (k = 0; k < SIZE; k++) {
c[i][j] += (a[i][k] * b[k][j]);
}
}
}
}
|
GB_unaryop__identity_fp32_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_fp32_uint64
// op(A') function: GB_tran__identity_fp32_uint64
// C type: float
// A type: uint64_t
// cast: float cij = (float) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
float z = (float) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_fp32_uint64
(
float *Cx, // Cx and Ax may be aliased
uint64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_fp32_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
profile.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP RRRR OOO FFFFF IIIII L EEEEE %
% P P R R O O F I L E %
% PPPP RRRR O O FFF I L EEE %
% P R R O O F I L E %
% P R R OOO F IIIII LLLLL EEEEE %
% %
% %
% MagickCore Image Profile Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/configure.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/option-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/profile-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#if defined(MAGICKCORE_LCMS_DELEGATE)
#if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H)
#include <wchar.h>
#include <lcms/lcms2.h>
#else
#include <wchar.h>
#include "lcms2.h"
#endif
#endif
#if defined(MAGICKCORE_XML_DELEGATE)
# if defined(MAGICKCORE_WINDOWS_SUPPORT)
# if !defined(__MINGW32__)
# include <win32config.h>
# endif
# endif
# include <libxml/parser.h>
# include <libxml/tree.h>
#endif
/*
Definitions
*/
#define LCMSHDRI
#if !defined(MAGICKCORE_HDRI_SUPPORT)
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
#undef LCMSHDRI
#define LCMSScaleSource(pixel) ScaleQuantumToShort(pixel)
#define LCMSScaleTarget(pixel) ScaleShortToQuantum(pixel)
typedef unsigned short
LCMSType;
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
#undef LCMSHDRI
#define LCMSScaleSource(pixel) (pixel)
#define LCMSScaleTarget(pixel) (pixel)
typedef unsigned short
LCMSType;
#endif
#endif
#if defined(LCMSHDRI)
#define LCMSScaleSource(pixel) (source_scale*QuantumScale*(pixel))
#define LCMSScaleTarget(pixel) ClampToQuantum(target_scale*QuantumRange*(pixel))
typedef double
LCMSType;
#endif
/*
Forward declarations
*/
static MagickBooleanType
SetImageProfileInternal(Image *,const char *,const StringInfo *,
const MagickBooleanType,ExceptionInfo *);
static void
WriteTo8BimProfile(Image *,const char*,const StringInfo *);
/*
Typedef declarations
*/
struct _ProfileInfo
{
char
*name;
size_t
length;
unsigned char
*info;
size_t
signature;
};
typedef struct _CMSExceptionInfo
{
Image
*image;
ExceptionInfo
*exception;
} CMSExceptionInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageProfiles() clones one or more image profiles.
%
% The format of the CloneImageProfiles method is:
%
% MagickBooleanType CloneImageProfiles(Image *image,
% const Image *clone_image)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone_image: the clone image.
%
*/
MagickExport MagickBooleanType CloneImageProfiles(Image *image,
const Image *clone_image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clone_image != (const Image *) NULL);
assert(clone_image->signature == MagickCoreSignature);
if (clone_image->profiles != (void *) NULL)
{
if (image->profiles != (void *) NULL)
DestroyImageProfiles(image);
image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles,
(void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteImageProfile() deletes a profile from the image by its name.
%
% The format of the DeleteImageProfile method is:
%
% MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return(MagickFalse);
WriteTo8BimProfile(image,name,(StringInfo *) NULL);
return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageProfiles() releases memory associated with an image profile map.
%
% The format of the DestroyProfiles method is:
%
% void DestroyImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImageProfiles(Image *image)
{
if (image->profiles != (SplayTreeInfo *) NULL)
image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageProfile() gets a profile associated with an image by name.
%
% The format of the GetImageProfile method is:
%
% const StringInfo *GetImageProfile(const Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport const StringInfo *GetImageProfile(const Image *image,
const char *name)
{
const StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N e x t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNextImageProfile() gets the next profile name for an image.
%
% The format of the GetNextImageProfile method is:
%
% char *GetNextImageProfile(const Image *image)
%
% A description of each parameter follows:
%
% o hash_info: the hash info.
%
*/
MagickExport char *GetNextImageProfile(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((char *) NULL);
return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r o f i l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ProfileImage() associates, applies, or removes an ICM, IPTC, or generic
% profile with / to / from an image. If the profile is NULL, it is removed
% from the image otherwise added or applied. Use a name of '*' and a profile
% of NULL to remove all profiles from the image.
%
% ICC and ICM profiles are handled as follows: If the image does not have
% an associated color profile, the one you provide is associated with the
% image and the image pixels are not transformed. Otherwise, the colorspace
% transform defined by the existing and new profile are applied to the image
% pixels and the new profile is associated with the image.
%
% The format of the ProfileImage method is:
%
% MagickBooleanType ProfileImage(Image *image,const char *name,
% const void *datum,const size_t length,const MagickBooleanType clone)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: Name of profile to add or remove: ICC, IPTC, or generic profile.
%
% o datum: the profile data.
%
% o length: the length of the profile.
%
% o clone: should be MagickFalse.
%
*/
#if defined(MAGICKCORE_LCMS_DELEGATE)
#if LCMS_VERSION < 2060
static void* cmsGetContextUserData(cmsContext ContextID)
{
return(ContextID);
}
static cmsContext cmsCreateContext(void *magick_unused(Plugin),void *UserData)
{
magick_unreferenced(Plugin);
return((cmsContext) UserData);
}
static void cmsSetLogErrorHandlerTHR(cmsContext magick_unused(ContextID),
cmsLogErrorHandlerFunction Fn)
{
magick_unreferenced(ContextID);
cmsSetLogErrorHandler(Fn);
}
static void cmsDeleteContext(cmsContext magick_unused(ContextID))
{
magick_unreferenced(ContextID);
}
#endif
static LCMSType **DestroyPixelThreadSet(LCMSType **pixels)
{
register ssize_t
i;
if (pixels == (LCMSType **) NULL)
return((LCMSType **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (LCMSType *) NULL)
pixels[i]=(LCMSType *) RelinquishMagickMemory(pixels[i]);
pixels=(LCMSType **) RelinquishMagickMemory(pixels);
return(pixels);
}
static LCMSType **AcquirePixelThreadSet(const size_t columns,
const size_t channels)
{
LCMSType
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(LCMSType **) AcquireQuantumMemory(number_threads,sizeof(*pixels));
if (pixels == (LCMSType **) NULL)
return((LCMSType **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(LCMSType *) AcquireQuantumMemory(columns,channels*
sizeof(**pixels));
if (pixels[i] == (LCMSType *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform)
{
register ssize_t
i;
assert(transform != (cmsHTRANSFORM *) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (transform[i] != (cmsHTRANSFORM) NULL)
cmsDeleteTransform(transform[i]);
transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform);
return(transform);
}
static cmsHTRANSFORM *AcquireTransformThreadSet(
const cmsHPROFILE source_profile,const cmsUInt32Number source_type,
const cmsHPROFILE target_profile,const cmsUInt32Number target_type,
const int intent,const cmsUInt32Number flags,cmsContext cms_context)
{
cmsHTRANSFORM
*transform;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads,
sizeof(*transform));
if (transform == (cmsHTRANSFORM *) NULL)
return((cmsHTRANSFORM *) NULL);
(void) memset(transform,0,number_threads*sizeof(*transform));
for (i=0; i < (ssize_t) number_threads; i++)
{
transform[i]=cmsCreateTransformTHR(cms_context,source_profile,source_type,
target_profile,target_type,intent,flags);
if (transform[i] == (cmsHTRANSFORM) NULL)
return(DestroyTransformThreadSet(transform));
}
return(transform);
}
static void CMSExceptionHandler(cmsContext context,cmsUInt32Number severity,
const char *message)
{
CMSExceptionInfo
*cms_exception;
ExceptionInfo
*exception;
Image
*image;
cms_exception=(CMSExceptionInfo *) cmsGetContextUserData(context);
if (cms_exception == (CMSExceptionInfo *) NULL)
return;
exception=cms_exception->exception;
if (exception == (ExceptionInfo *) NULL)
return;
image=cms_exception->image;
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"UnableToTransformColorspace","`%s'","unknown context");
return;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s",
severity,message != (char *) NULL ? message : "no message");
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"UnableToTransformColorspace","`%s', %s (#%u)",image->filename,
message != (char *) NULL ? message : "no message",severity);
}
#endif
static MagickBooleanType SetsRGBImageProfile(Image *image,
ExceptionInfo *exception)
{
static unsigned char
sRGBProfile[] =
{
0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00,
0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20,
0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a,
0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00,
0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6,
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99,
0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67,
0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70,
0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88,
0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c,
0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24,
0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14,
0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24,
0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14,
0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14,
0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14,
0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14,
0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14,
0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36,
0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76,
0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77,
0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39,
0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c,
0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31,
0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75,
0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77,
0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20,
0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66,
0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61,
0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d,
0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52,
0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f,
0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20,
0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57,
0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65,
0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e,
0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20,
0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69,
0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74,
0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e,
0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e,
0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e,
0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47,
0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61,
0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43,
0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44,
0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63,
0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20,
0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00,
0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c,
0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2,
0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d,
0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00,
0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0,
0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87,
0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4,
0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19,
0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37,
0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54,
0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72,
0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90,
0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae,
0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb,
0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb,
0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d,
0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32,
0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59,
0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83,
0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1,
0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1,
0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14,
0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b,
0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84,
0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1,
0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00,
0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43,
0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a,
0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3,
0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20,
0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71,
0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4,
0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c,
0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77,
0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5,
0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37,
0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d,
0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07,
0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74,
0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5,
0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a,
0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2,
0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f,
0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf,
0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54,
0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc,
0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69,
0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9,
0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e,
0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26,
0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3,
0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64,
0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09,
0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3,
0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61,
0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13,
0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9,
0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84,
0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43,
0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06,
0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce,
0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b,
0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c,
0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41,
0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b,
0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa,
0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd,
0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5,
0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2,
0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3,
0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99,
0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94,
0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94,
0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98,
0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1,
0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf,
0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2,
0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda,
0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7,
0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18,
0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f,
0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b,
0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b,
0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1,
0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c,
0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c,
0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91,
0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb,
0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a,
0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f,
0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8,
0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37,
0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c,
0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05,
0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74,
0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8,
0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61,
0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0,
0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64,
0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee,
0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d,
0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12,
0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab,
0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b,
0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0,
0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a,
0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a,
0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00,
0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb,
0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c,
0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42,
0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f,
0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0,
0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8,
0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95,
0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78,
0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61,
0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f,
0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43,
0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d,
0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d,
0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43,
0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f,
0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60,
0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78,
0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95,
0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8,
0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1,
0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11,
0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46,
0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81,
0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2,
0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a,
0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57,
0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab,
0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04,
0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64,
0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca,
0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36,
0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8,
0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20,
0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f,
0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24,
0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf,
0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40,
0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8,
0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76,
0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a,
0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4,
0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75,
0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d,
0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea,
0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae,
0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79,
0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a,
0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21,
0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff,
0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3,
0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce,
0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf,
0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7,
0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5,
0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba,
0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6,
0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8,
0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1,
0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10,
0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36,
0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63,
0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96,
0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0,
0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11,
0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58,
0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7,
0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb,
0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57,
0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba,
0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff
};
StringInfo
*profile;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (GetImageProfile(image,"icc") != (const StringInfo *) NULL)
return(MagickFalse);
profile=AcquireStringInfo(sizeof(sRGBProfile));
SetStringInfoDatum(profile,sRGBProfile);
status=SetImageProfile(image,"icc",profile,exception);
profile=DestroyStringInfo(profile);
return(status);
}
MagickExport MagickBooleanType ProfileImage(Image *image,const char *name,
const void *datum,const size_t length,ExceptionInfo *exception)
{
#define ProfileImageTag "Profile/Image"
#define ThrowProfileException(severity,tag,context) \
{ \
if (cms_context != (cmsContext) NULL) \
cmsDeleteContext(cms_context); \
if (source_profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(source_profile); \
if (target_profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(target_profile); \
ThrowBinaryException(severity,tag,context); \
}
MagickBooleanType
status;
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(name != (const char *) NULL);
if ((datum == (const void *) NULL) || (length == 0))
{
char
*next;
/*
Delete image profile(s).
*/
ResetImageProfileIterator(image);
for (next=GetNextImageProfile(image); next != (const char *) NULL; )
{
if (IsOptionMember(next,name) != MagickFalse)
{
(void) DeleteImageProfile(image,next);
ResetImageProfileIterator(image);
}
next=GetNextImageProfile(image);
}
return(MagickTrue);
}
/*
Add a ICC, IPTC, or generic profile to the image.
*/
status=MagickTrue;
profile=AcquireStringInfo((size_t) length);
SetStringInfoDatum(profile,(unsigned char *) datum);
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
status=SetImageProfile(image,name,profile,exception);
else
{
const StringInfo
*icc_profile;
icc_profile=GetImageProfile(image,"icc");
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
const char
*value;
value=GetImageProperty(image,"exif:ColorSpace",exception);
(void) value;
if (LocaleCompare(value,"1") != 0)
(void) SetsRGBImageProfile(image,exception);
value=GetImageProperty(image,"exif:InteroperabilityIndex",exception);
if (LocaleCompare(value,"R98.") != 0)
(void) SetsRGBImageProfile(image,exception);
/* Future.
value=GetImageProperty(image,"exif:InteroperabilityIndex",exception);
if (LocaleCompare(value,"R03.") != 0)
(void) SetAdobeRGB1998ImageProfile(image,exception);
*/
icc_profile=GetImageProfile(image,"icc");
}
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
profile=DestroyStringInfo(profile);
return(MagickTrue);
}
#if !defined(MAGICKCORE_LCMS_DELEGATE)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (LCMS)",image->filename);
#else
{
cmsHPROFILE
source_profile;
cmsContext
cms_context;
CMSExceptionInfo
cms_exception;
/*
Transform pixel colors as defined by the color profiles.
*/
cms_exception.image=image;
cms_exception.exception=exception;
cms_context=cmsCreateContext(NULL,&cms_exception);
if (cms_context == (cmsContext) NULL)
ThrowBinaryException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
cmsSetLogErrorHandlerTHR(cms_context,CMSExceptionHandler);
source_profile=cmsOpenProfileFromMemTHR(cms_context,
GetStringInfoDatum(profile),(cmsUInt32Number)
GetStringInfoLength(profile));
if (source_profile == (cmsHPROFILE) NULL)
{
cmsDeleteContext(cms_context);
ThrowBinaryException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
}
if ((cmsGetDeviceClass(source_profile) != cmsSigLinkClass) &&
(icc_profile == (StringInfo *) NULL))
status=SetImageProfile(image,name,profile,exception);
else
{
CacheView
*image_view;
ColorspaceType
source_colorspace,
target_colorspace;
cmsColorSpaceSignature
signature;
cmsHPROFILE
target_profile;
cmsHTRANSFORM
*magick_restrict transform;
cmsUInt32Number
flags,
source_type,
target_type;
int
intent;
LCMSType
**magick_restrict source_pixels,
**magick_restrict target_pixels;
#if defined(LCMSHDRI)
LCMSType
source_scale,
target_scale;
#endif
MagickOffsetType
progress;
size_t
source_channels,
target_channels;
ssize_t
y;
target_profile=(cmsHPROFILE) NULL;
if (icc_profile != (StringInfo *) NULL)
{
target_profile=source_profile;
source_profile=cmsOpenProfileFromMemTHR(cms_context,
GetStringInfoDatum(icc_profile),
(cmsUInt32Number) GetStringInfoLength(icc_profile));
if (source_profile == (cmsHPROFILE) NULL)
ThrowProfileException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
}
#if defined(LCMSHDRI)
source_scale=1.0;
#endif
source_colorspace=sRGBColorspace;
source_channels=3;
switch (cmsGetColorSpace(source_profile))
{
case cmsSigCmykData:
{
source_colorspace=CMYKColorspace;
source_channels=4;
#if defined(LCMSHDRI)
source_type=(cmsUInt32Number) TYPE_CMYK_DBL;
source_scale=100.0;
#else
source_type=(cmsUInt32Number) TYPE_CMYK_16;
#endif
break;
}
case cmsSigGrayData:
{
source_colorspace=GRAYColorspace;
source_channels=1;
#if defined(LCMSHDRI)
source_type=(cmsUInt32Number) TYPE_GRAY_DBL;
#else
source_type=(cmsUInt32Number) TYPE_GRAY_16;
#endif
break;
}
case cmsSigLabData:
{
source_colorspace=LabColorspace;
#if defined(LCMSHDRI)
source_type=(cmsUInt32Number) TYPE_Lab_DBL;
source_scale=100.0;
#else
source_type=(cmsUInt32Number) TYPE_Lab_16;
#endif
break;
}
#if !defined(LCMSHDRI)
case cmsSigLuvData:
{
source_colorspace=YUVColorspace;
source_type=(cmsUInt32Number) TYPE_YUV_16;
break;
}
#endif
case cmsSigRgbData:
{
source_colorspace=sRGBColorspace;
#if defined(LCMSHDRI)
source_type=(cmsUInt32Number) TYPE_RGB_DBL;
#else
source_type=(cmsUInt32Number) TYPE_RGB_16;
#endif
break;
}
case cmsSigXYZData:
{
source_colorspace=XYZColorspace;
#if defined(LCMSHDRI)
source_type=(cmsUInt32Number) TYPE_XYZ_DBL;
#else
source_type=(cmsUInt32Number) TYPE_XYZ_16;
#endif
break;
}
#if !defined(LCMSHDRI)
case cmsSigYCbCrData:
{
source_colorspace=YUVColorspace;
source_type=(cmsUInt32Number) TYPE_YCbCr_16;
break;
}
#endif
default:
ThrowProfileException(ImageError,
"ColorspaceColorProfileMismatch",name);
}
(void) source_colorspace;
signature=cmsGetPCS(source_profile);
if (target_profile != (cmsHPROFILE) NULL)
signature=cmsGetColorSpace(target_profile);
#if defined(LCMSHDRI)
target_scale=1.0;
#endif
target_channels=3;
switch (signature)
{
case cmsSigCmykData:
{
target_colorspace=CMYKColorspace;
target_channels=4;
#if defined(LCMSHDRI)
target_type=(cmsUInt32Number) TYPE_CMYK_DBL;
target_scale=0.01;
#else
target_type=(cmsUInt32Number) TYPE_CMYK_16;
#endif
break;
}
case cmsSigGrayData:
{
target_colorspace=GRAYColorspace;
target_channels=1;
#if defined(LCMSHDRI)
target_type=(cmsUInt32Number) TYPE_GRAY_DBL;
#else
target_type=(cmsUInt32Number) TYPE_GRAY_16;
#endif
break;
}
case cmsSigLabData:
{
target_colorspace=LabColorspace;
#if defined(LCMSHDRI)
target_type=(cmsUInt32Number) TYPE_Lab_DBL;
target_scale=0.01;
#else
target_type=(cmsUInt32Number) TYPE_Lab_16;
#endif
break;
}
#if !defined(LCMSHDRI)
case cmsSigLuvData:
{
target_colorspace=YUVColorspace;
target_type=(cmsUInt32Number) TYPE_YUV_16;
break;
}
#endif
case cmsSigRgbData:
{
target_colorspace=sRGBColorspace;
#if defined(LCMSHDRI)
target_type=(cmsUInt32Number) TYPE_RGB_DBL;
#else
target_type=(cmsUInt32Number) TYPE_RGB_16;
#endif
break;
}
case cmsSigXYZData:
{
target_colorspace=XYZColorspace;
#if defined(LCMSHDRI)
target_type=(cmsUInt32Number) TYPE_XYZ_DBL;
#else
target_type=(cmsUInt32Number) TYPE_XYZ_16;
#endif
break;
}
#if !defined(LCMSHDRI)
case cmsSigYCbCrData:
{
target_colorspace=YUVColorspace;
target_type=(cmsUInt32Number) TYPE_YCbCr_16;
break;
}
#endif
default:
ThrowProfileException(ImageError,
"ColorspaceColorProfileMismatch",name);
}
switch (image->rendering_intent)
{
case AbsoluteIntent: intent=INTENT_ABSOLUTE_COLORIMETRIC; break;
case PerceptualIntent: intent=INTENT_PERCEPTUAL; break;
case RelativeIntent: intent=INTENT_RELATIVE_COLORIMETRIC; break;
case SaturationIntent: intent=INTENT_SATURATION; break;
default: intent=INTENT_PERCEPTUAL; break;
}
flags=cmsFLAGS_HIGHRESPRECALC;
#if defined(cmsFLAGS_BLACKPOINTCOMPENSATION)
if (image->black_point_compensation != MagickFalse)
flags|=cmsFLAGS_BLACKPOINTCOMPENSATION;
#endif
transform=AcquireTransformThreadSet(source_profile,source_type,
target_profile,target_type,intent,flags,cms_context);
if (transform == (cmsHTRANSFORM *) NULL)
ThrowProfileException(ImageError,"UnableToCreateColorTransform",
name);
/*
Transform image as dictated by the source & target image profiles.
*/
source_pixels=AcquirePixelThreadSet(image->columns,source_channels);
target_pixels=AcquirePixelThreadSet(image->columns,target_channels);
if ((source_pixels == (LCMSType **) NULL) ||
(target_pixels == (LCMSType **) NULL))
{
target_pixels=DestroyPixelThreadSet(target_pixels);
source_pixels=DestroyPixelThreadSet(source_pixels);
transform=DestroyTransformThreadSet(transform);
ThrowProfileException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
target_pixels=DestroyPixelThreadSet(target_pixels);
source_pixels=DestroyPixelThreadSet(source_pixels);
transform=DestroyTransformThreadSet(transform);
if (source_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(source_profile);
if (target_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_profile);
return(MagickFalse);
}
if (target_colorspace == CMYKColorspace)
(void) SetImageColorspace(image,target_colorspace,exception);
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register LCMSType
*p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p=source_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
*p++=LCMSScaleSource(GetPixelRed(image,q));
if (source_channels > 1)
{
*p++=LCMSScaleSource(GetPixelGreen(image,q));
*p++=LCMSScaleSource(GetPixelBlue(image,q));
}
if (source_channels > 3)
*p++=LCMSScaleSource(GetPixelBlack(image,q));
q+=GetPixelChannels(image);
}
cmsDoTransform(transform[id],source_pixels[id],target_pixels[id],
(unsigned int) image->columns);
p=target_pixels[id];
q-=GetPixelChannels(image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (target_channels == 1)
SetPixelGray(image,LCMSScaleTarget(*p),q);
else
SetPixelRed(image,LCMSScaleTarget(*p),q);
p++;
if (target_channels > 1)
{
SetPixelGreen(image,LCMSScaleTarget(*p),q);
p++;
SetPixelBlue(image,LCMSScaleTarget(*p),q);
p++;
}
if (target_channels > 3)
{
SetPixelBlack(image,LCMSScaleTarget(*p),q);
p++;
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ProfileImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) SetImageColorspace(image,target_colorspace,exception);
switch (signature)
{
case cmsSigRgbData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
TrueColorType : TrueColorAlphaType;
break;
}
case cmsSigCmykData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
ColorSeparationType : ColorSeparationAlphaType;
break;
}
case cmsSigGrayData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
GrayscaleType : GrayscaleAlphaType;
break;
}
default:
break;
}
target_pixels=DestroyPixelThreadSet(target_pixels);
source_pixels=DestroyPixelThreadSet(source_pixels);
transform=DestroyTransformThreadSet(transform);
if ((status != MagickFalse) &&
(cmsGetDeviceClass(source_profile) != cmsSigLinkClass))
status=SetImageProfile(image,name,profile,exception);
if (target_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_profile);
}
(void) cmsCloseProfile(source_profile);
cmsDeleteContext(cms_context);
}
#endif
}
profile=DestroyStringInfo(profile);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m o v e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemoveImageProfile() removes a named profile from the image and returns its
% value.
%
% The format of the RemoveImageProfile method is:
%
% void *RemoveImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name)
{
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
WriteTo8BimProfile(image,name,(StringInfo *) NULL);
profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t P r o f i l e I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImageProfileIterator() resets the image profile iterator. Use it in
% conjunction with GetNextImageProfile() to iterate over all the profiles
% associated with an image.
%
% The format of the ResetImageProfileIterator method is:
%
% ResetImageProfileIterator(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void ResetImageProfileIterator(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return;
ResetSplayTreeIterator((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageProfile() adds a named profile to the image. If a profile with the
% same name already exists, it is replaced. This method differs from the
% ProfileImage() method in that it does not apply CMS color profiles.
%
% The format of the SetImageProfile method is:
%
% MagickBooleanType SetImageProfile(Image *image,const char *name,
% const StringInfo *profile)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name, for example icc, exif, and 8bim (8bim is the
% Photoshop wrapper for iptc profiles).
%
% o profile: A StringInfo structure that contains the named profile.
%
*/
static void *DestroyProfile(void *profile)
{
return((void *) DestroyStringInfo((StringInfo *) profile));
}
static inline const unsigned char *ReadResourceByte(const unsigned char *p,
unsigned char *quantum)
{
*quantum=(*p++);
return(p);
}
static inline const unsigned char *ReadResourceLong(const unsigned char *p,
unsigned int *quantum)
{
*quantum=(unsigned int) (*p++) << 24;
*quantum|=(unsigned int) (*p++) << 16;
*quantum|=(unsigned int) (*p++) << 8;
*quantum|=(unsigned int) (*p++);
return(p);
}
static inline const unsigned char *ReadResourceShort(const unsigned char *p,
unsigned short *quantum)
{
*quantum=(unsigned short) (*p++) << 8;
*quantum|=(unsigned short) (*p++);
return(p);
}
static inline void WriteResourceLong(unsigned char *p,
const unsigned int quantum)
{
unsigned char
buffer[4];
buffer[0]=(unsigned char) (quantum >> 24);
buffer[1]=(unsigned char) (quantum >> 16);
buffer[2]=(unsigned char) (quantum >> 8);
buffer[3]=(unsigned char) quantum;
(void) memcpy(p,buffer,4);
}
static void WriteTo8BimProfile(Image *image,const char *name,
const StringInfo *profile)
{
const unsigned char
*datum,
*q;
register const unsigned char
*p;
size_t
length;
StringInfo
*profile_8bim;
ssize_t
count;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id,
profile_id;
if (LocaleCompare(name,"icc") == 0)
profile_id=0x040f;
else
if (LocaleCompare(name,"iptc") == 0)
profile_id=0x0404;
else
if (LocaleCompare(name,"xmp") == 0)
profile_id=0x0424;
else
return;
profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,"8bim");
if (profile_8bim == (StringInfo *) NULL)
return;
datum=GetStringInfoDatum(profile_8bim);
length=GetStringInfoLength(profile_8bim);
for (p=datum; p < (datum+length-16); )
{
q=p;
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((count & 0x01) != 0)
count++;
if ((count < 0) || (p > (datum+length-count)) || (count > (ssize_t) length))
break;
if (id != profile_id)
p+=count;
else
{
size_t
extent,
offset;
ssize_t
extract_extent;
StringInfo
*extract_profile;
extract_extent=0;
extent=(datum+length)-(p+count);
if (profile == (StringInfo *) NULL)
{
offset=(q-datum);
extract_profile=AcquireStringInfo(offset+extent);
(void) memcpy(extract_profile->datum,datum,offset);
}
else
{
offset=(p-datum);
extract_extent=profile->length;
if ((extract_extent & 0x01) != 0)
extract_extent++;
extract_profile=AcquireStringInfo(offset+extract_extent+extent);
(void) memcpy(extract_profile->datum,datum,offset-4);
WriteResourceLong(extract_profile->datum+offset-4,(unsigned int)
profile->length);
(void) memcpy(extract_profile->datum+offset,
profile->datum,profile->length);
}
(void) memcpy(extract_profile->datum+offset+extract_extent,
p+count,extent);
(void) AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString("8bim"),CloneStringInfo(extract_profile));
extract_profile=DestroyStringInfo(extract_profile);
break;
}
}
}
static void GetProfilesFromResourceBlock(Image *image,
const StringInfo *resource_block,ExceptionInfo *exception)
{
const unsigned char
*datum;
register const unsigned char
*p;
size_t
length;
ssize_t
count;
StringInfo
*profile;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id;
datum=GetStringInfoDatum(resource_block);
length=GetStringInfoLength(resource_block);
for (p=datum; p < (datum+length-16); )
{
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((p > (datum+length-count)) || (count > (ssize_t) length) || (count < 0))
break;
switch (id)
{
case 0x03ed:
{
unsigned int
resolution;
unsigned short
units;
/*
Resolution.
*/
if (count < 10)
break;
p=ReadResourceLong(p,&resolution);
image->resolution.x=((double) resolution)/65536.0;
p=ReadResourceShort(p,&units)+2;
p=ReadResourceLong(p,&resolution)+4;
image->resolution.y=((double) resolution)/65536.0;
/*
Values are always stored as pixels per inch.
*/
if ((ResolutionType) units != PixelsPerCentimeterResolution)
image->units=PixelsPerInchResolution;
else
{
image->units=PixelsPerCentimeterResolution;
image->resolution.x/=2.54;
image->resolution.y/=2.54;
}
break;
}
case 0x0404:
{
/*
IPTC Profile
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"iptc",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x040c:
{
/*
Thumbnail.
*/
p+=count;
break;
}
case 0x040f:
{
/*
ICC Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"icc",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0422:
{
/*
EXIF Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"exif",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0424:
{
/*
XMP Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"xmp",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
default:
{
p+=count;
break;
}
}
if ((count & 0x01) != 0)
p++;
}
}
#if defined(MAGICKCORE_XML_DELEGATE)
static MagickBooleanType ValidateXMPProfile(const StringInfo *profile)
{
xmlDocPtr
document;
/*
Parse XML profile.
*/
document=xmlReadMemory((const char *) GetStringInfoDatum(profile),(int)
GetStringInfoLength(profile),"xmp.xml",NULL,XML_PARSE_NOERROR |
XML_PARSE_NOWARNING);
if (document == (xmlDocPtr) NULL)
return(MagickFalse);
xmlFreeDoc(document);
return(MagickTrue);
}
#else
static MagickBooleanType ValidateXMPProfile(const StringInfo *profile)
{
return(MagickFalse);
}
#endif
static MagickBooleanType SetImageProfileInternal(Image *image,const char *name,
const StringInfo *profile,const MagickBooleanType recursive,
ExceptionInfo *exception)
{
char
key[MagickPathExtent],
property[MagickPathExtent];
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((LocaleCompare(name,"xmp") == 0) &&
(ValidateXMPProfile(profile) == MagickFalse))
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"CorruptImageProfile","`%s'",name);
return(MagickTrue);
}
if (image->profiles == (SplayTreeInfo *) NULL)
image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
DestroyProfile);
(void) CopyMagickString(key,name,MagickPathExtent);
LocaleLower(key);
status=AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString(key),CloneStringInfo(profile));
if (status != MagickFalse)
{
if (LocaleCompare(name,"8bim") == 0)
GetProfilesFromResourceBlock(image,profile,exception);
else
if (recursive == MagickFalse)
WriteTo8BimProfile(image,name,profile);
}
/*
Inject profile into image properties.
*/
(void) FormatLocaleString(property,MagickPathExtent,"%s:*",name);
(void) GetImageProperty(image,property,exception);
return(status);
}
MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name,
const StringInfo *profile,ExceptionInfo *exception)
{
return(SetImageProfileInternal(image,name,profile,MagickFalse,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageProfiles() synchronizes image properties with the image profiles.
% Currently we only support updating the EXIF resolution and orientation.
%
% The format of the SyncImageProfiles method is:
%
% MagickBooleanType SyncImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static inline int ReadProfileByte(unsigned char **p,size_t *length)
{
int
c;
if (*length < 1)
return(EOF);
c=(int) (*(*p)++);
(*length)--;
return(c);
}
static inline signed short ReadProfileShort(const EndianType endian,
unsigned char *buffer)
{
union
{
unsigned int
unsigned_value;
signed int
signed_value;
} quantum;
unsigned short
value;
if (endian == LSBEndian)
{
value=(unsigned short) buffer[1] << 8;
value|=(unsigned short) buffer[0];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
value=(unsigned short) buffer[0] << 8;
value|=(unsigned short) buffer[1];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
static inline signed int ReadProfileLong(const EndianType endian,
unsigned char *buffer)
{
union
{
unsigned int
unsigned_value;
signed int
signed_value;
} quantum;
unsigned int
value;
if (endian == LSBEndian)
{
value=(unsigned int) buffer[3] << 24;
value|=(unsigned int) buffer[2] << 16;
value|=(unsigned int) buffer[1] << 8;
value|=(unsigned int) buffer[0];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
value=(unsigned int) buffer[0] << 24;
value|=(unsigned int) buffer[1] << 16;
value|=(unsigned int) buffer[2] << 8;
value|=(unsigned int) buffer[3];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length)
{
signed int
value;
if (*length < 4)
return(0);
value=ReadProfileLong(MSBEndian,*p);
(*length)-=4;
*p+=4;
return(value);
}
static inline signed short ReadProfileMSBShort(unsigned char **p,
size_t *length)
{
signed short
value;
if (*length < 2)
return(0);
value=ReadProfileShort(MSBEndian,*p);
(*length)-=2;
*p+=2;
return(value);
}
static inline void WriteProfileLong(const EndianType endian,
const size_t value,unsigned char *p)
{
unsigned char
buffer[4];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
buffer[2]=(unsigned char) (value >> 16);
buffer[3]=(unsigned char) (value >> 24);
(void) memcpy(p,buffer,4);
return;
}
buffer[0]=(unsigned char) (value >> 24);
buffer[1]=(unsigned char) (value >> 16);
buffer[2]=(unsigned char) (value >> 8);
buffer[3]=(unsigned char) value;
(void) memcpy(p,buffer,4);
}
static void WriteProfileShort(const EndianType endian,
const unsigned short value,unsigned char *p)
{
unsigned char
buffer[2];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
(void) memcpy(p,buffer,2);
return;
}
buffer[0]=(unsigned char) (value >> 8);
buffer[1]=(unsigned char) value;
(void) memcpy(p,buffer,2);
}
static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile)
{
size_t
length;
ssize_t
count;
unsigned char
*p;
unsigned short
id;
length=GetStringInfoLength(profile);
p=GetStringInfoDatum(profile);
while (length != 0)
{
if (ReadProfileByte(&p,&length) != 0x38)
continue;
if (ReadProfileByte(&p,&length) != 0x42)
continue;
if (ReadProfileByte(&p,&length) != 0x49)
continue;
if (ReadProfileByte(&p,&length) != 0x4D)
continue;
if (length < 7)
return(MagickFalse);
id=ReadProfileMSBShort(&p,&length);
count=(ssize_t) ReadProfileByte(&p,&length);
if ((count >= (ssize_t) length) || (count < 0))
return(MagickFalse);
p+=count;
length-=count;
if ((*p & 0x01) == 0)
(void) ReadProfileByte(&p,&length);
count=(ssize_t) ReadProfileMSBLong(&p,&length);
if ((count > (ssize_t) length) || (count < 0))
return(MagickFalse);
if ((id == 0x3ED) && (count == 16))
{
if (image->units == PixelsPerCentimeterResolution)
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x*2.54*
65536.0),p);
else
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x*
65536.0),p);
WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4);
if (image->units == PixelsPerCentimeterResolution)
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y*2.54*
65536.0),p+8);
else
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y*
65536.0),p+8);
WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12);
}
p+=count;
length-=count;
}
return(MagickTrue);
}
MagickBooleanType SyncExifProfile(Image *image,StringInfo *profile)
{
#define MaxDirectoryStack 16
#define EXIF_DELIMITER "\n"
#define EXIF_NUM_FORMATS 12
#define TAG_EXIF_OFFSET 0x8769
#define TAG_INTEROP_OFFSET 0xa005
typedef struct _DirectoryInfo
{
unsigned char
*directory;
size_t
entry;
} DirectoryInfo;
DirectoryInfo
directory_stack[MaxDirectoryStack];
EndianType
endian;
size_t
entry,
length,
number_entries;
SplayTreeInfo
*exif_resources;
ssize_t
id,
level,
offset;
static int
format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8};
unsigned char
*directory,
*exif;
/*
Set EXIF resolution tag.
*/
length=GetStringInfoLength(profile);
exif=GetStringInfoDatum(profile);
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
if ((id != 0x4949) && (id != 0x4D4D))
{
while (length != 0)
{
if (ReadProfileByte(&exif,&length) != 0x45)
continue;
if (ReadProfileByte(&exif,&length) != 0x78)
continue;
if (ReadProfileByte(&exif,&length) != 0x69)
continue;
if (ReadProfileByte(&exif,&length) != 0x66)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
break;
}
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
}
endian=LSBEndian;
if (id == 0x4949)
endian=LSBEndian;
else
if (id == 0x4D4D)
endian=MSBEndian;
else
return(MagickFalse);
if (ReadProfileShort(endian,exif+2) != 0x002a)
return(MagickFalse);
/*
This the offset to the first IFD.
*/
offset=(ssize_t) ReadProfileLong(endian,exif+4);
if ((offset < 0) || ((size_t) offset >= length))
return(MagickFalse);
directory=exif+offset;
level=0;
entry=0;
exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL,
(void *(*)(void *)) NULL,(void *(*)(void *)) NULL);
do
{
if (level > 0)
{
level--;
directory=directory_stack[level].directory;
entry=directory_stack[level].entry;
}
if ((directory < exif) || (directory > (exif+length-2)))
break;
/*
Determine how many entries there are in the current IFD.
*/
number_entries=ReadProfileShort(endian,directory);
for ( ; entry < number_entries; entry++)
{
int
components;
register unsigned char
*p,
*q;
size_t
number_bytes;
ssize_t
format,
tag_value;
q=(unsigned char *) (directory+2+(12*entry));
if (q > (exif+length-12))
break; /* corrupt EXIF */
if (GetValueFromSplayTree(exif_resources,q) == q)
break;
(void) AddValueToSplayTree(exif_resources,q,q);
tag_value=(ssize_t) ReadProfileShort(endian,q);
format=(ssize_t) ReadProfileShort(endian,q+2);
if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS))
break;
components=(int) ReadProfileLong(endian,q+4);
if (components < 0)
break; /* corrupt EXIF */
number_bytes=(size_t) components*format_bytes[format];
if ((ssize_t) number_bytes < components)
break; /* prevent overflow */
if (number_bytes <= 4)
p=q+8;
else
{
/*
The directory entry contains an offset.
*/
offset=(ssize_t) ReadProfileLong(endian,q+8);
if ((offset < 0) || ((size_t) (offset+number_bytes) > length))
continue;
if (~length < number_bytes)
continue; /* prevent overflow */
p=(unsigned char *) (exif+offset);
}
switch (tag_value)
{
case 0x011a:
{
(void) WriteProfileLong(endian,(size_t) (image->resolution.x+0.5),p);
if (number_bytes == 8)
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x011b:
{
(void) WriteProfileLong(endian,(size_t) (image->resolution.y+0.5),p);
if (number_bytes == 8)
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x0112:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) image->orientation,p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) image->orientation,
p);
break;
}
case 0x0128:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) (image->units+1),p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) (image->units+1),p);
break;
}
default:
break;
}
if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET))
{
offset=(ssize_t) ReadProfileLong(endian,p);
if (((size_t) offset < length) && (level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=directory;
entry++;
directory_stack[level].entry=entry;
level++;
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
if ((directory+2+(12*number_entries)) > (exif+length))
break;
offset=(ssize_t) ReadProfileLong(endian,directory+2+(12*
number_entries));
if ((offset != 0) && ((size_t) offset < length) &&
(level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
}
}
break;
}
}
} while (level > 0);
exif_resources=DestroySplayTree(exif_resources);
return(MagickTrue);
}
MagickPrivate MagickBooleanType SyncImageProfiles(Image *image)
{
MagickBooleanType
status;
StringInfo
*profile;
status=MagickTrue;
profile=(StringInfo *) GetImageProfile(image,"8BIM");
if (profile != (StringInfo *) NULL)
if (Sync8BimProfile(image,profile) == MagickFalse)
status=MagickFalse;
profile=(StringInfo *) GetImageProfile(image,"EXIF");
if (profile != (StringInfo *) NULL)
if (SyncExifProfile(image,profile) == MagickFalse)
status=MagickFalse;
return(status);
}
|
hasGPU.c | //https://lc.llnl.gov/confluence/display/LC/Clang+OpenMP+4.5+with+GPU+support#space-menu-link-content
// Revised a bit
#include <stdio.h>
#include <omp.h>
int main()
{
#pragma omp parallel
{
#pragma omp master
{
int thread_count = omp_get_num_threads();
printf ("Using %d out of max %d threads...\n", thread_count, omp_get_max_threads());
}
}
int runningOnGPU = 0;
printf ("The number of target devices =%d\n", omp_get_num_devices());
/* Test if GPU is available using OpenMP4.5 */
#pragma omp target map(from:runningOnGPU)
{
// This function returns true if currently running on the host device, false otherwise.
if (!omp_is_initial_device())
runningOnGPU = 1;
}
/* If still running on CPU, GPU must not be available */
if (runningOnGPU == 1)
printf("### Able to use the GPU! ### \n");
else
printf("### Unable to use the GPU, using CPU! ###\n");
return 0;
}
|
convolution_2x2_pack8_fp16.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv2x2s1_weight_fp16_pack8_avx(const Mat& kernel, Mat& kernel_tm_pack8, int num_input, int num_output)
{
// src = kw-kh-inch-outch
// dst = 8b-8a-kw-kh-inch/8a-outch/8b
Mat weight_data_r2 = kernel.reshape(4, num_input, num_output);
kernel_tm_pack8.create(4, num_input / 8, num_output / 8, (size_t)2 * 64, 64);
for (int q = 0; q + 7 < num_output; q += 8)
{
const Mat k0 = weight_data_r2.channel(q);
const Mat k1 = weight_data_r2.channel(q + 1);
const Mat k2 = weight_data_r2.channel(q + 2);
const Mat k3 = weight_data_r2.channel(q + 3);
const Mat k4 = weight_data_r2.channel(q + 4);
const Mat k5 = weight_data_r2.channel(q + 5);
const Mat k6 = weight_data_r2.channel(q + 6);
const Mat k7 = weight_data_r2.channel(q + 7);
Mat g0 = kernel_tm_pack8.channel(q / 8);
for (int p = 0; p + 7 < num_input; p += 8)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k04 = k0.row(p + 4);
const float* k05 = k0.row(p + 5);
const float* k06 = k0.row(p + 6);
const float* k07 = k0.row(p + 7);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k14 = k1.row(p + 4);
const float* k15 = k1.row(p + 5);
const float* k16 = k1.row(p + 6);
const float* k17 = k1.row(p + 7);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k24 = k2.row(p + 4);
const float* k25 = k2.row(p + 5);
const float* k26 = k2.row(p + 6);
const float* k27 = k2.row(p + 7);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
const float* k34 = k3.row(p + 4);
const float* k35 = k3.row(p + 5);
const float* k36 = k3.row(p + 6);
const float* k37 = k3.row(p + 7);
const float* k40 = k4.row(p);
const float* k41 = k4.row(p + 1);
const float* k42 = k4.row(p + 2);
const float* k43 = k4.row(p + 3);
const float* k44 = k4.row(p + 4);
const float* k45 = k4.row(p + 5);
const float* k46 = k4.row(p + 6);
const float* k47 = k4.row(p + 7);
const float* k50 = k5.row(p);
const float* k51 = k5.row(p + 1);
const float* k52 = k5.row(p + 2);
const float* k53 = k5.row(p + 3);
const float* k54 = k5.row(p + 4);
const float* k55 = k5.row(p + 5);
const float* k56 = k5.row(p + 6);
const float* k57 = k5.row(p + 7);
const float* k60 = k6.row(p);
const float* k61 = k6.row(p + 1);
const float* k62 = k6.row(p + 2);
const float* k63 = k6.row(p + 3);
const float* k64 = k6.row(p + 4);
const float* k65 = k6.row(p + 5);
const float* k66 = k6.row(p + 6);
const float* k67 = k6.row(p + 7);
const float* k70 = k7.row(p);
const float* k71 = k7.row(p + 1);
const float* k72 = k7.row(p + 2);
const float* k73 = k7.row(p + 3);
const float* k74 = k7.row(p + 4);
const float* k75 = k7.row(p + 5);
const float* k76 = k7.row(p + 6);
const float* k77 = k7.row(p + 7);
unsigned short* g00 = (unsigned short*)g0.row(p / 8);
for (int k = 0; k < 4; k++)
{
g00[0] = float32_to_float16(k00[k]);
g00[1] = float32_to_float16(k10[k]);
g00[2] = float32_to_float16(k20[k]);
g00[3] = float32_to_float16(k30[k]);
g00[4] = float32_to_float16(k40[k]);
g00[5] = float32_to_float16(k50[k]);
g00[6] = float32_to_float16(k60[k]);
g00[7] = float32_to_float16(k70[k]);
g00 += 8;
g00[0] = float32_to_float16(k01[k]);
g00[1] = float32_to_float16(k11[k]);
g00[2] = float32_to_float16(k21[k]);
g00[3] = float32_to_float16(k31[k]);
g00[4] = float32_to_float16(k41[k]);
g00[5] = float32_to_float16(k51[k]);
g00[6] = float32_to_float16(k61[k]);
g00[7] = float32_to_float16(k71[k]);
g00 += 8;
g00[0] = float32_to_float16(k02[k]);
g00[1] = float32_to_float16(k12[k]);
g00[2] = float32_to_float16(k22[k]);
g00[3] = float32_to_float16(k32[k]);
g00[4] = float32_to_float16(k42[k]);
g00[5] = float32_to_float16(k52[k]);
g00[6] = float32_to_float16(k62[k]);
g00[7] = float32_to_float16(k72[k]);
g00 += 8;
g00[0] = float32_to_float16(k03[k]);
g00[1] = float32_to_float16(k13[k]);
g00[2] = float32_to_float16(k23[k]);
g00[3] = float32_to_float16(k33[k]);
g00[4] = float32_to_float16(k43[k]);
g00[5] = float32_to_float16(k53[k]);
g00[6] = float32_to_float16(k63[k]);
g00[7] = float32_to_float16(k73[k]);
g00 += 8;
g00[0] = float32_to_float16(k04[k]);
g00[1] = float32_to_float16(k14[k]);
g00[2] = float32_to_float16(k24[k]);
g00[3] = float32_to_float16(k34[k]);
g00[4] = float32_to_float16(k44[k]);
g00[5] = float32_to_float16(k54[k]);
g00[6] = float32_to_float16(k64[k]);
g00[7] = float32_to_float16(k74[k]);
g00 += 8;
g00[0] = float32_to_float16(k05[k]);
g00[1] = float32_to_float16(k15[k]);
g00[2] = float32_to_float16(k25[k]);
g00[3] = float32_to_float16(k35[k]);
g00[4] = float32_to_float16(k45[k]);
g00[5] = float32_to_float16(k55[k]);
g00[6] = float32_to_float16(k65[k]);
g00[7] = float32_to_float16(k75[k]);
g00 += 8;
g00[0] = float32_to_float16(k06[k]);
g00[1] = float32_to_float16(k16[k]);
g00[2] = float32_to_float16(k26[k]);
g00[3] = float32_to_float16(k36[k]);
g00[4] = float32_to_float16(k46[k]);
g00[5] = float32_to_float16(k56[k]);
g00[6] = float32_to_float16(k66[k]);
g00[7] = float32_to_float16(k76[k]);
g00 += 8;
g00[0] = float32_to_float16(k07[k]);
g00[1] = float32_to_float16(k17[k]);
g00[2] = float32_to_float16(k27[k]);
g00[3] = float32_to_float16(k37[k]);
g00[4] = float32_to_float16(k47[k]);
g00[5] = float32_to_float16(k57[k]);
g00[6] = float32_to_float16(k67[k]);
g00[7] = float32_to_float16(k77[k]);
g00 += 8;
}
}
}
}
static void conv2x2s1_fp16_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f);
out0.fill(_bias0);
for (int q = 0; q < inch; q++)
{
float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p).row(q);
// const float* kptr = (const float*)kernel + 4 * inch * p * 64;
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 1 < outw; j += 2)
{
__m256 _sum0 = _mm256_loadu_ps(outptr0);
__m256 _sum1 = _mm256_loadu_ps(outptr0 + 8);
__m256 _r00 = _mm256_broadcast_ss(r0);
__m256 _r01 = _mm256_broadcast_ss(r0 + 1);
__m256 _r02 = _mm256_broadcast_ss(r0 + 2);
__m256 _r03 = _mm256_broadcast_ss(r0 + 3);
__m256 _r04 = _mm256_broadcast_ss(r0 + 4);
__m256 _r05 = _mm256_broadcast_ss(r0 + 5);
__m256 _r06 = _mm256_broadcast_ss(r0 + 6);
__m256 _r07 = _mm256_broadcast_ss(r0 + 7);
r0 += 8;
__m256 _k00 = loadfp16(kptr);
__m256 _k01 = loadfp16(kptr + 8);
__m256 _k02 = loadfp16(kptr + 16);
__m256 _k03 = loadfp16(kptr + 24);
kptr += 32;
_sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k03, _r03, _sum0);
__m256 _k04 = loadfp16(kptr);
__m256 _k05 = loadfp16(kptr + 8);
__m256 _k06 = loadfp16(kptr + 16);
__m256 _k07 = loadfp16(kptr + 24);
kptr += 32;
_sum0 = _mm256_comp_fmadd_ps(_k04, _r04, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k05, _r05, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k06, _r06, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k07, _r07, _sum0);
//========================================
_r00 = _mm256_broadcast_ss(r0);
_r01 = _mm256_broadcast_ss(r0 + 1);
_r02 = _mm256_broadcast_ss(r0 + 2);
_r03 = _mm256_broadcast_ss(r0 + 3);
_r04 = _mm256_broadcast_ss(r0 + 4);
_r05 = _mm256_broadcast_ss(r0 + 5);
_r06 = _mm256_broadcast_ss(r0 + 6);
_r07 = _mm256_broadcast_ss(r0 + 7);
r0 += 8;
_sum1 = _mm256_comp_fmadd_ps(_k00, _r00, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k02, _r02, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k03, _r03, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k04, _r04, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k05, _r05, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k06, _r06, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k07, _r07, _sum1);
_k00 = loadfp16(kptr);
_k01 = loadfp16(kptr + 8);
_k02 = loadfp16(kptr + 16);
_k03 = loadfp16(kptr + 24);
kptr += 32;
_sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k03, _r03, _sum0);
_k04 = loadfp16(kptr);
_k05 = loadfp16(kptr + 8);
_k06 = loadfp16(kptr + 16);
_k07 = loadfp16(kptr + 24);
kptr += 32;
_sum0 = _mm256_comp_fmadd_ps(_k04, _r04, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k05, _r05, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k06, _r06, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k07, _r07, _sum0);
_r00 = _mm256_broadcast_ss(r0);
_r01 = _mm256_broadcast_ss(r0 + 1);
_r02 = _mm256_broadcast_ss(r0 + 2);
_r03 = _mm256_broadcast_ss(r0 + 3);
_r04 = _mm256_broadcast_ss(r0 + 4);
_r05 = _mm256_broadcast_ss(r0 + 5);
_r06 = _mm256_broadcast_ss(r0 + 6);
_r07 = _mm256_broadcast_ss(r0 + 7);
_sum1 = _mm256_comp_fmadd_ps(_k00, _r00, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k02, _r02, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k03, _r03, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k04, _r04, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k05, _r05, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k06, _r06, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k07, _r07, _sum1);
//===============
__m256 _r10 = _mm256_broadcast_ss(r1);
__m256 _r11 = _mm256_broadcast_ss(r1 + 1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 2);
__m256 _r13 = _mm256_broadcast_ss(r1 + 3);
__m256 _r14 = _mm256_broadcast_ss(r1 + 4);
__m256 _r15 = _mm256_broadcast_ss(r1 + 5);
__m256 _r16 = _mm256_broadcast_ss(r1 + 6);
__m256 _r17 = _mm256_broadcast_ss(r1 + 7);
__m256 _k10 = loadfp16(kptr);
__m256 _k11 = loadfp16(kptr + 8);
__m256 _k12 = loadfp16(kptr + 16);
__m256 _k13 = loadfp16(kptr + 24);
kptr += 32;
_sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k13, _r13, _sum0);
__m256 _k14 = loadfp16(kptr);
__m256 _k15 = loadfp16(kptr + 8);
__m256 _k16 = loadfp16(kptr + 16);
__m256 _k17 = loadfp16(kptr + 24);
kptr += 32;
_sum0 = _mm256_comp_fmadd_ps(_k14, _r14, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k15, _r15, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k16, _r16, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k17, _r17, _sum0);
//=======================================
r1 += 8;
_r10 = _mm256_broadcast_ss(r1);
_r11 = _mm256_broadcast_ss(r1 + 1);
_r12 = _mm256_broadcast_ss(r1 + 2);
_r13 = _mm256_broadcast_ss(r1 + 3);
_r14 = _mm256_broadcast_ss(r1 + 4);
_r15 = _mm256_broadcast_ss(r1 + 5);
_r16 = _mm256_broadcast_ss(r1 + 6);
_r17 = _mm256_broadcast_ss(r1 + 7);
_sum1 = _mm256_comp_fmadd_ps(_k10, _r10, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k11, _r11, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k12, _r12, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k13, _r13, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k14, _r14, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k15, _r15, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k16, _r16, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k17, _r17, _sum1);
_k10 = loadfp16(kptr);
_k11 = loadfp16(kptr + 8);
_k12 = loadfp16(kptr + 16);
_k13 = loadfp16(kptr + 24);
kptr += 32;
_sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k13, _r13, _sum0);
_k14 = loadfp16(kptr);
_k15 = loadfp16(kptr + 8);
_k16 = loadfp16(kptr + 16);
_k17 = loadfp16(kptr + 24);
_sum0 = _mm256_comp_fmadd_ps(_k14, _r14, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k15, _r15, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k16, _r16, _sum0);
_sum0 = _mm256_comp_fmadd_ps(_k17, _r17, _sum0);
r1 += 8;
_r10 = _mm256_broadcast_ss(r1);
_r11 = _mm256_broadcast_ss(r1 + 1);
_r12 = _mm256_broadcast_ss(r1 + 2);
_r13 = _mm256_broadcast_ss(r1 + 3);
_r14 = _mm256_broadcast_ss(r1 + 4);
_r15 = _mm256_broadcast_ss(r1 + 5);
_r16 = _mm256_broadcast_ss(r1 + 6);
_r17 = _mm256_broadcast_ss(r1 + 7);
_sum1 = _mm256_comp_fmadd_ps(_k10, _r10, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k11, _r11, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k12, _r12, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k13, _r13, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k14, _r14, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k15, _r15, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k16, _r16, _sum1);
_sum1 = _mm256_comp_fmadd_ps(_k17, _r17, _sum1);
kptr -= 224;
_mm256_storeu_ps(outptr0, _sum0);
_mm256_storeu_ps(outptr0 + 8, _sum1);
outptr0 += 16;
}
for (; j < outw; j++)
{
__m256 _sum = _mm256_loadu_ps(outptr0);
__m256 _r00 = _mm256_broadcast_ss(r0);
__m256 _r01 = _mm256_broadcast_ss(r0 + 1);
__m256 _r02 = _mm256_broadcast_ss(r0 + 2);
__m256 _r03 = _mm256_broadcast_ss(r0 + 3);
__m256 _r04 = _mm256_broadcast_ss(r0 + 4);
__m256 _r05 = _mm256_broadcast_ss(r0 + 5);
__m256 _r06 = _mm256_broadcast_ss(r0 + 6);
__m256 _r07 = _mm256_broadcast_ss(r0 + 7);
__m256 _k00 = loadfp16(kptr);
__m256 _k01 = loadfp16(kptr + 8);
__m256 _k02 = loadfp16(kptr + 16);
__m256 _k03 = loadfp16(kptr + 24);
kptr += 32;
_sum = _mm256_comp_fmadd_ps(_k00, _r00, _sum);
_sum = _mm256_comp_fmadd_ps(_k01, _r01, _sum);
_sum = _mm256_comp_fmadd_ps(_k02, _r02, _sum);
_sum = _mm256_comp_fmadd_ps(_k03, _r03, _sum);
__m256 _k04 = loadfp16(kptr);
__m256 _k05 = loadfp16(kptr + 8);
__m256 _k06 = loadfp16(kptr + 16);
__m256 _k07 = loadfp16(kptr + 24);
kptr += 32;
_sum = _mm256_comp_fmadd_ps(_k04, _r04, _sum);
_sum = _mm256_comp_fmadd_ps(_k05, _r05, _sum);
_sum = _mm256_comp_fmadd_ps(_k06, _r06, _sum);
_sum = _mm256_comp_fmadd_ps(_k07, _r07, _sum);
//========================================
r0 += 8;
_r00 = _mm256_broadcast_ss(r0);
_r01 = _mm256_broadcast_ss(r0 + 1);
_r02 = _mm256_broadcast_ss(r0 + 2);
_r03 = _mm256_broadcast_ss(r0 + 3);
_r04 = _mm256_broadcast_ss(r0 + 4);
_r05 = _mm256_broadcast_ss(r0 + 5);
_r06 = _mm256_broadcast_ss(r0 + 6);
_r07 = _mm256_broadcast_ss(r0 + 7);
_k00 = loadfp16(kptr);
_k01 = loadfp16(kptr + 8);
_k02 = loadfp16(kptr + 16);
_k03 = loadfp16(kptr + 24);
kptr += 32;
_sum = _mm256_comp_fmadd_ps(_k00, _r00, _sum);
_sum = _mm256_comp_fmadd_ps(_k01, _r01, _sum);
_sum = _mm256_comp_fmadd_ps(_k02, _r02, _sum);
_sum = _mm256_comp_fmadd_ps(_k03, _r03, _sum);
_k04 = loadfp16(kptr);
_k05 = loadfp16(kptr + 8);
_k06 = loadfp16(kptr + 16);
_k07 = loadfp16(kptr + 24);
kptr += 32;
_sum = _mm256_comp_fmadd_ps(_k04, _r04, _sum);
_sum = _mm256_comp_fmadd_ps(_k05, _r05, _sum);
_sum = _mm256_comp_fmadd_ps(_k06, _r06, _sum);
_sum = _mm256_comp_fmadd_ps(_k07, _r07, _sum);
//===============
__m256 _r10 = _mm256_broadcast_ss(r1);
__m256 _r11 = _mm256_broadcast_ss(r1 + 1);
__m256 _r12 = _mm256_broadcast_ss(r1 + 2);
__m256 _r13 = _mm256_broadcast_ss(r1 + 3);
__m256 _r14 = _mm256_broadcast_ss(r1 + 4);
__m256 _r15 = _mm256_broadcast_ss(r1 + 5);
__m256 _r16 = _mm256_broadcast_ss(r1 + 6);
__m256 _r17 = _mm256_broadcast_ss(r1 + 7);
__m256 _k10 = loadfp16(kptr);
__m256 _k11 = loadfp16(kptr + 8);
__m256 _k12 = loadfp16(kptr + 16);
__m256 _k13 = loadfp16(kptr + 24);
kptr += 32;
_sum = _mm256_comp_fmadd_ps(_k10, _r10, _sum);
_sum = _mm256_comp_fmadd_ps(_k11, _r11, _sum);
_sum = _mm256_comp_fmadd_ps(_k12, _r12, _sum);
_sum = _mm256_comp_fmadd_ps(_k13, _r13, _sum);
__m256 _k14 = loadfp16(kptr);
__m256 _k15 = loadfp16(kptr + 8);
__m256 _k16 = loadfp16(kptr + 16);
__m256 _k17 = loadfp16(kptr + 24);
kptr += 32;
_sum = _mm256_comp_fmadd_ps(_k14, _r14, _sum);
_sum = _mm256_comp_fmadd_ps(_k15, _r15, _sum);
_sum = _mm256_comp_fmadd_ps(_k16, _r16, _sum);
_sum = _mm256_comp_fmadd_ps(_k17, _r17, _sum);
//=======================================
r1 += 8;
_r10 = _mm256_broadcast_ss(r1);
_r11 = _mm256_broadcast_ss(r1 + 1);
_r12 = _mm256_broadcast_ss(r1 + 2);
_r13 = _mm256_broadcast_ss(r1 + 3);
_r14 = _mm256_broadcast_ss(r1 + 4);
_r15 = _mm256_broadcast_ss(r1 + 5);
_r16 = _mm256_broadcast_ss(r1 + 6);
_r17 = _mm256_broadcast_ss(r1 + 7);
_k10 = loadfp16(kptr);
_k11 = loadfp16(kptr + 8);
_k12 = loadfp16(kptr + 16);
_k13 = loadfp16(kptr + 24);
kptr += 32;
_sum = _mm256_comp_fmadd_ps(_k10, _r10, _sum);
_sum = _mm256_comp_fmadd_ps(_k11, _r11, _sum);
_sum = _mm256_comp_fmadd_ps(_k12, _r12, _sum);
_sum = _mm256_comp_fmadd_ps(_k13, _r13, _sum);
_k14 = loadfp16(kptr);
_k15 = loadfp16(kptr + 8);
_k16 = loadfp16(kptr + 16);
_k17 = loadfp16(kptr + 24);
_sum = _mm256_comp_fmadd_ps(_k14, _r14, _sum);
_sum = _mm256_comp_fmadd_ps(_k15, _r15, _sum);
_sum = _mm256_comp_fmadd_ps(_k16, _r16, _sum);
_sum = _mm256_comp_fmadd_ps(_k17, _r17, _sum);
kptr -= 224;
_mm256_storeu_ps(outptr0, _sum);
outptr0 += 8;
}
r0 += 8;
r1 += 8;
}
}
}
}
|
xgboost_data.h | #ifndef XGBOOST_DATA_H
#define XGBOOST_DATA_H
/*!
* \file xgboost_data.h
* \brief the input data structure for gradient boosting
* \author Tianqi Chen: tianqi.tchen@gmail.com
*/
#include <vector>
#include <climits>
#include "../utils/xgboost_utils.h"
#include "../utils/xgboost_stream.h"
#include "../utils/xgboost_matrix_csr.h"
namespace xgboost{
namespace booster{
/*! \brief interger type used in boost */
typedef int bst_int;
/*! \brief unsigned interger type used in boost */
typedef unsigned bst_uint;
/*! \brief float type used in boost */
typedef float bst_float;
/*! \brief debug option for booster */
const bool bst_debug = false;
};
};
namespace xgboost{
namespace booster{
/**
* \brief This is a interface, defining the way to access features,
* by column or by row. This interface is used to make implementation
* of booster does not depend on how feature is stored.
*
* Why template instead of virtual class: for efficiency
* feature matrix is going to be used by most inner loop of the algorithm
*
* \tparam Derived type of actual implementation
* \sa FMatrixS: most of time FMatrixS is sufficient, refer to it if you find it confusing
*/
template<typename Derived>
struct FMatrix{
public:
/*! \brief exmaple iterator over one row */
struct RowIter{
/*!
* \brief move to next position
* \return whether there is element in next position
*/
inline bool Next(void);
/*! \return feature index in current position */
inline bst_uint findex(void) const;
/*! \return feature value in current position */
inline bst_float fvalue(void) const;
};
/*! \brief example iterator over one column */
struct ColIter{
/*!
* \brief move to next position
* \return whether there is element in next position
*/
inline bool Next(void);
/*! \return row index of current position */
inline bst_uint rindex(void) const;
/*! \return feature value in current position */
inline bst_float fvalue(void) const;
};
/*! \brief backward iterator over column */
struct ColBackIter : public ColIter {};
public:
/*!
* \brief get number of rows
* \return number of rows
*/
inline size_t NumRow(void) const;
/*!
* \brief get number of columns
* \return number of columns
*/
inline size_t NumCol(void) const;
/*!
* \brief get row iterator
* \param ridx row index
* \return row iterator
*/
inline RowIter GetRow(size_t ridx) const;
/*!
* \brief get number of column groups, this ise used together with GetRow( ridx, gid )
* \return number of column group
*/
inline unsigned NumColGroup(void) const{
return 1;
}
/*!
* \brief get row iterator, return iterator of specific column group
* \param ridx row index
* \param gid colmun group id
* \return row iterator, only iterates over features of specified column group
*/
inline RowIter GetRow(size_t ridx, unsigned gid) const;
/*! \return whether column access is enabled */
inline bool HaveColAccess(void) const;
/*!
* \brief get column iterator, the columns must be sorted by feature value
* \param ridx column index
* \return column iterator
*/
inline ColIter GetSortedCol(size_t ridx) const;
/*!
* \brief get column backward iterator, starts from biggest fvalue, and iterator back
* \param ridx column index
* \return reverse column iterator
*/
inline ColBackIter GetReverseSortedCol(size_t ridx) const;
};
};
};
namespace xgboost{
namespace booster{
/*!
* \brief feature matrix to store training instance, in sparse CSR format
*/
class FMatrixS : public FMatrix<FMatrixS>{
public:
/*! \brief one entry in a row */
struct REntry{
/*! \brief feature index */
bst_uint findex;
/*! \brief feature value */
bst_float fvalue;
/*! \brief constructor */
REntry(void){}
/*! \brief constructor */
REntry(bst_uint findex, bst_float fvalue) : findex(findex), fvalue(fvalue){}
inline static bool cmp_fvalue(const REntry &a, const REntry &b){
return a.fvalue < b.fvalue;
}
};
/*! \brief one row of sparse feature matrix */
struct Line{
/*! \brief array of feature index */
const REntry *data_;
/*! \brief size of the data */
bst_uint len;
/*! \brief get k-th element */
inline const REntry& operator[](unsigned i) const{
return data_[i];
}
};
/*! \brief row iterator */
struct RowIter{
const REntry *dptr_, *end_;
RowIter(const REntry* dptr, const REntry* end)
:dptr_(dptr), end_(end){}
inline bool Next(void){
if (dptr_ == end_) return false;
else{
++dptr_; return true;
}
}
inline bst_uint findex(void) const{
return dptr_->findex;
}
inline bst_float fvalue(void) const{
return dptr_->fvalue;
}
};
/*! \brief column iterator */
struct ColIter : public RowIter{
ColIter(const REntry* dptr, const REntry* end)
:RowIter(dptr, end){}
inline bst_uint rindex(void) const{
return this->findex();
}
};
/*! \brief reverse column iterator */
struct ColBackIter : public ColIter{
ColBackIter(const REntry* dptr, const REntry* end)
:ColIter(dptr, end){}
// shadows RowIter::Next
inline bool Next(void){
if (dptr_ == end_) return false;
else{
--dptr_; return true;
}
}
};
public:
/*! \brief constructor */
FMatrixS(void){ this->Clear(); }
/*! \brief get number of rows */
inline size_t NumRow(void) const{
return row_ptr_.size() - 1;
}
/*!
* \brief get number of nonzero entries
* \return number of nonzero entries
*/
inline size_t NumEntry(void) const{
return row_data_.size();
}
/*! \brief clear the storage */
inline void Clear(void){
row_ptr_.clear();
row_ptr_.push_back(0);
row_data_.clear();
col_ptr_.clear();
col_data_.clear();
}
/*! \brief get sparse part of current row */
inline Line operator[](size_t sidx) const{
Line sp;
utils::Assert(!bst_debug || sidx < this->NumRow(), "row id exceed bound");
sp.len = static_cast<bst_uint>(row_ptr_[sidx + 1] - row_ptr_[sidx]);
sp.data_ = &row_data_[row_ptr_[sidx]];
return sp;
}
/*!
* \brief add a row to the matrix, with data stored in STL container
* \param findex feature index
* \param fvalue feature value
* \param fstart start bound of feature
* \param fend end bound range of feature
* \return the row id added line
*/
inline size_t AddRow(const std::vector<bst_uint> &findex,
const std::vector<bst_float> &fvalue,
unsigned fstart = 0, unsigned fend = UINT_MAX){
utils::Assert(findex.size() == fvalue.size());
unsigned cnt = 0;
for (size_t i = 0; i < findex.size(); i++){
if (findex[i] < fstart || findex[i] >= fend) continue;
row_data_.push_back(REntry(findex[i], fvalue[i]));
cnt++;
}
row_ptr_.push_back(row_ptr_.back() + cnt);
return row_ptr_.size() - 2;
}
/*! \brief get row iterator*/
inline RowIter GetRow(size_t ridx) const{
utils::Assert(!bst_debug || ridx < this->NumRow(), "row id exceed bound");
return RowIter(&row_data_[row_ptr_[ridx]] - 1, &row_data_[row_ptr_[ridx + 1]] - 1);
}
/*! \brief get row iterator*/
inline RowIter GetRow(size_t ridx, unsigned gid) const{
utils::Assert(gid == 0, "FMatrixS only have 1 column group");
return FMatrixS::GetRow(ridx);
}
public:
/*! \return whether column access is enabled */
inline bool HaveColAccess(void) const{
return col_ptr_.size() != 0 && col_data_.size() == row_data_.size();
}
/*! \brief get number of colmuns */
inline size_t NumCol(void) const{
utils::Assert(this->HaveColAccess());
return col_ptr_.size() - 1;
}
/*! \brief get col iterator*/
inline ColIter GetSortedCol(size_t cidx) const{
utils::Assert(!bst_debug || cidx < this->NumCol(), "col id exceed bound");
return ColIter(&col_data_[col_ptr_[cidx]] - 1, &col_data_[col_ptr_[cidx + 1]] - 1);
}
/*! \brief get col iterator */
inline ColBackIter GetReverseSortedCol(size_t cidx) const{
utils::Assert(!bst_debug || cidx < this->NumCol(), "col id exceed bound");
return ColBackIter(&col_data_[col_ptr_[cidx + 1]], &col_data_[col_ptr_[cidx]]);
}
/*!
* \brief intialize the data so that we have both column and row major
* access, call this whenever we need column access
*/
inline void InitData(void){
utils::SparseCSRMBuilder<REntry> builder(col_ptr_, col_data_);
builder.InitBudget(0);
for (size_t i = 0; i < this->NumRow(); i++){
for (RowIter it = this->GetRow(i); it.Next();){
builder.AddBudget(it.findex());
}
}
builder.InitStorage();
for (size_t i = 0; i < this->NumRow(); i++){
for (RowIter it = this->GetRow(i); it.Next();){
builder.PushElem(it.findex(), REntry((bst_uint)i, it.fvalue()));
}
}
// sort columns
unsigned ncol = static_cast<unsigned>(this->NumCol());
#pragma omp parallel for schedule(static)
for (unsigned i = 0; i < ncol; i++){
std::sort(&col_data_[col_ptr_[i]], &col_data_[col_ptr_[i + 1]], REntry::cmp_fvalue);
}
}
/*!
* \brief save data to binary stream
* note: since we have size_t in ptr,
* the function is not consistent between 64bit and 32bit machine
* \param fo output stream
*/
inline void SaveBinary(utils::IStream &fo) const{
FMatrixS::SaveBinary(fo, row_ptr_, row_data_);
int col_access = this->HaveColAccess() ? 1 : 0;
fo.Write(&col_access, sizeof(int));
if (col_access != 0){
FMatrixS::SaveBinary(fo, col_ptr_, col_data_);
}
}
/*!
* \brief load data from binary stream
* note: since we have size_t in ptr,
* the function is not consistent between 64bit and 32bit machin
* \param fi input stream
*/
inline void LoadBinary(utils::IStream &fi){
FMatrixS::LoadBinary(fi, row_ptr_, row_data_);
int col_access;
fi.Read(&col_access, sizeof(int));
if (col_access != 0){
FMatrixS::LoadBinary(fi, col_ptr_, col_data_);
}else{
this->InitData();
}
}
/*!
* \brief load from text file
* \param fi input file pointer
*/
inline void LoadText(FILE *fi){
this->Clear();
int ninst;
while (fscanf(fi, "%d", &ninst) == 1){
std::vector<booster::bst_uint> findex;
std::vector<booster::bst_float> fvalue;
while (ninst--){
unsigned index; float value;
utils::Assert(fscanf(fi, "%u:%f", &index, &value) == 2, "load Text");
findex.push_back(index); fvalue.push_back(value);
}
this->AddRow(findex, fvalue);
}
// initialize column support as well
this->InitData();
}
private:
/*!
* \brief save data to binary stream
* \param fo output stream
* \param ptr pointer data
* \param data data content
*/
inline static void SaveBinary(utils::IStream &fo,
const std::vector<size_t> &ptr,
const std::vector<REntry> &data){
size_t nrow = ptr.size() - 1;
fo.Write(&nrow, sizeof(size_t));
fo.Write(&ptr[0], ptr.size() * sizeof(size_t));
if (data.size() != 0){
fo.Write(&data[0], data.size() * sizeof(REntry));
}
}
/*!
* \brief load data from binary stream
* \param fi input stream
* \param ptr pointer data
* \param data data content
*/
inline static void LoadBinary(utils::IStream &fi,
std::vector<size_t> &ptr,
std::vector<REntry> &data){
size_t nrow;
utils::Assert(fi.Read(&nrow, sizeof(size_t)) != 0, "Load FMatrixS");
ptr.resize(nrow + 1);
utils::Assert(fi.Read(&ptr[0], ptr.size() * sizeof(size_t)) != 0, "Load FMatrixS");
data.resize(ptr.back());
if (data.size() != 0){
utils::Assert(fi.Read(&data[0], data.size() * sizeof(REntry)) != 0, "Load FMatrixS");
}
}
public:
/*! \brief row pointer of CSR sparse storage */
std::vector<size_t> row_ptr_;
/*! \brief data in the row */
std::vector<REntry> row_data_;
/*! \brief column pointer of CSC format */
std::vector<size_t> col_ptr_;
/*! \brief column datas */
std::vector<REntry> col_data_;
};
};
};
#endif
|
known_hosts_fmt_plug.c | /* Quick-and-dirty cracker for ~/.ssh/known_hosts hashes (HashKnownHosts yes).
*
* Based on http://blog.tremily.us/posts/known_hosts/
*
* This software is Copyright (c) 2014, Dhiru Kholia <dhiru at openwall.com>,
* and it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* Significant speedup Dec 2014, JimF. OMPSCALE was way off, and:
* NOTE Appears that salt and password are reversed?? With this info, salt was
* redone, to compute the first half of the HMAC, and double the speed.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_known_hosts;
#elif FMT_REGISTERS_H
john_register_one(&fmt_known_hosts);
#else
#include "sha.h"
#include <string.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "base64.h"
#include "params.h"
#include "options.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "known_hosts"
#define FORMAT_TAG "$known_hosts$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define FORMAT_NAME "HashKnownHosts HMAC-SHA1"
#define ALGORITHM_NAME "SHA1 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 20
#define BINARY_ENCODED_SIZE 28
#define PAD_SIZE 64
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests known_hosts_tests[] = {
{"$known_hosts$|1|yivSFSAv9mhGu/GPc14KpaPMSjE=|I9L3FH6RGefWIFb0Po74BVN3Fto=", "213.100.98.219"},
{"$known_hosts$|1|pgjIzNM77FYsBHLfKvvG9aWpKAA=|XbHqTCXG1JAV6fb2h2HT8MT7kGU=", "192.30.252.130"},
{"$known_hosts$|1|vAQX51f9EfXY33/j3upxFIlI1ds=|q+CzSLaa1EaSsAQzP/XRM/gaFQ4=", "192.30.252.128"},
{"$known_hosts$|1|F1E1KeoE/eEWhi10WpGv4OdiO6Y=|3988QV0VE8wmZL7suNrYQLITLCg=", "192.168.1.61"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static struct custom_salt {
SHA_CTX ipad_ctx;
SHA_CTX opad_ctx;
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
return 0;
p = q = ciphertext + TAG_LENGTH;
if (p[0] != '|' || p[2] != '|')
return 0;
p += 3;
q = strchr(p, '|');
if (q -p != BINARY_ENCODED_SIZE)
return 0;
p = strrchr(ciphertext, '|') + 1;
if (strlen(p) != BINARY_ENCODED_SIZE)
return 0;
return 1;
}
static void *get_salt(char *ciphertext)
{
char *p, *q;
unsigned char ipad[20], opad[20], salt[20 + 4 + 1];
int i;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
p = ciphertext + TAG_LENGTH + 3;
q = strchr(p, '|');
base64_decode(p, q - p, (char*)salt);
for (i = 0; i < 20; ++i) {
ipad[i] = salt[i] ^ 0x36;
opad[i] = salt[i] ^ 0x5C;
}
SHA1_Init(&cs.ipad_ctx);
SHA1_Update(&cs.ipad_ctx, ipad, 20);
SHA1_Update(&cs.ipad_ctx, "\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36\x36", 44);
SHA1_Init(&cs.opad_ctx);
SHA1_Update(&cs.opad_ctx, opad, 20);
SHA1_Update(&cs.opad_ctx, "\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C", 44);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE + 1 + 4];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
p = strrchr(ciphertext, '|') + 1;
base64_decode((char*)p, BINARY_ENCODED_SIZE, (char*)out);
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
SHA_CTX ctx;
memcpy(&ctx, &cur_salt->ipad_ctx, sizeof(ctx));
SHA1_Update(&ctx, saved_key[index], strlen(saved_key[index]));
SHA1_Final((unsigned char*) crypt_out[index], &ctx);
memcpy(&ctx, &cur_salt->opad_ctx, sizeof(ctx));
SHA1_Update(&ctx, crypt_out[index], BINARY_SIZE);
SHA1_Final((unsigned char*) crypt_out[index], &ctx);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void known_hosts_set_key(char *key, int index)
{
int len = strlen(key);
memcpy(saved_key[index], key, len);
saved_key[index][len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_known_hosts = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD,
{ NULL },
{ FORMAT_TAG },
known_hosts_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
known_hosts_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
scheduled-clause.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
void main(int argc, char **argv) {
int i, n=16, chunk, a[n], suma=0;
if(argc<3){
fprintf(stderr,"\nFalta chunk \n");
exit(-1);
}
n = atoi(argv[1]); if (n>200) n=200;
chunk = atoi(argv[2]);
for(i=0; i<n; i++) a[i]=i;
#pragma omp parallel for firstprivate(suma) lastprivate(suma) schedule(dynamic,chunk)
for(i=0;i<n;i++){
suma=suma + a[i];
printf("thread %d suma a[%d]=%d suma=%d\n", omp_get_thread_num(), i, a[i], suma);
}
printf("Fuera de 'parallel for' suma=%d \n",suma);
}
|
stepper.c | #include "stepper.h"
#include <omp.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <stdbool.h>
#include <stdio.h>
// block_size, same for x/y
#ifndef BLOCK_SIZE
#define BLOCK_SIZE ((int) 100)
#endif
//ldoc on
/**
* ## Implementation
*
* ### Structure allocation
*/
central2d_t* central2d_init(float w, float h, int nx, int ny,
int nfield, flux_t flux, speed_t speed,
float cfl)
{
// We extend to a four cell buffer to avoid BC comm on odd time steps
int ng = 4;
central2d_t* sim = (central2d_t*) malloc(sizeof(central2d_t));
sim->nx = nx;
sim->ny = ny;
sim->ng = ng;
sim->nfield = nfield;
sim->dx = w/nx;
sim->dy = h/ny;
sim->flux = flux;
sim->speed = speed;
sim->cfl = cfl;
int nx_all = nx + 2*ng;
int ny_all = ny + 2*ng;
int nc = nx_all * ny_all;
int N = nfield * nc;
sim->u = (float*) malloc((4*N + 6*nx_all)* sizeof(float));
sim->v = sim->u + N;
sim->f = sim->u + 2*N;
sim->g = sim->u + 3*N;
sim->scratch = sim->u + 4*N;
return sim;
}
void central2d_free(central2d_t* sim)
{
free(sim->u);
free(sim);
}
int central2d_offset(central2d_t* sim, int k, int ix, int iy)
{
int nx = sim->nx, ny = sim->ny, ng = sim->ng;
int nx_all = nx + 2*ng;
int ny_all = ny + 2*ng;
return (k*ny_all+(ng+iy))*nx_all+(ng+ix);
}
/**
* ### Boundary conditions
*
* In finite volume methods, boundary conditions are typically applied by
* setting appropriate values in ghost cells. For our framework, we will
* apply periodic boundary conditions; that is, waves that exit one side
* of the domain will enter from the other side.
*
* We apply the conditions by assuming that the cells with coordinates
* `nghost <= ix <= nx+nghost` and `nghost <= iy <= ny+nghost` are
* "canonical", and setting the values for all other cells `(ix,iy)`
* to the corresponding canonical values `(ix+p*nx,iy+q*ny)` for some
* integers `p` and `q`.
*/
static inline
void copy_subgrid(float* restrict dst,
const float* restrict src,
int nx, int ny, int stride)
{
for (int iy = 0; iy < ny; ++iy)
for (int ix = 0; ix < nx; ++ix)
dst[iy*stride+ix] = src[iy*stride+ix];
}
void central2d_periodic(float* restrict u,
int nx, int ny, int ng, int nfield)
{
// Stride and number per field
int s = nx + 2*ng;
int field_stride = (ny+2*ng)*s;
// Offsets of left, right, top, and bottom data blocks and ghost blocks
int l = nx, lg = 0;
int r = ng, rg = nx+ng;
int b = ny*s, bg = 0;
int t = ng*s, tg = (nx+ng)*s;
// Copy data into ghost cells on each side
for (int k = 0; k < nfield; ++k) {
float* uk = u + k*field_stride;
copy_subgrid(uk+lg, uk+l, ng, ny+2*ng, s);
copy_subgrid(uk+rg, uk+r, ng, ny+2*ng, s);
copy_subgrid(uk+tg, uk+t, nx+2*ng, ng, s);
copy_subgrid(uk+bg, uk+b, nx+2*ng, ng, s);
}
}
/**
* ### Derivatives with limiters
*
* In order to advance the time step, we also need to estimate
* derivatives of the fluxes and the solution values at each cell.
* In order to maintain stability, we apply a limiter here.
*
* The minmod limiter *looks* like it should be expensive to computer,
* since superficially it seems to require a number of branches.
* We do something a little tricky, getting rid of the condition
* on the sign of the arguments using the `copysign` instruction.
* If the compiler does the "right" thing with `max` and `min`
* for floating point arguments (translating them to branch-free
* intrinsic operations), this implementation should be relatively fast.
*/
// Branch-free computation of minmod of two numbers times 2s
static inline
float xmin2s(float s, float a, float b) {
float sa = copysignf(s, a);
float sb = copysignf(s, b);
float abs_a = fabsf(a);
float abs_b = fabsf(b);
float min_abs = (abs_a < abs_b ? abs_a : abs_b);
return (sa+sb) * min_abs;
}
// Limited combined slope estimate
static inline
float limdiff(float um, float u0, float up) {
const float theta = 2.0;
const float quarter = 0.25;
float du1 = u0-um; // Difference to left
float du2 = up-u0; // Difference to right
float duc = up-um; // Twice centered difference
return xmin2s( quarter, xmin2s(theta, du1, du2), duc );
}
// Compute limited derivs
static inline
void limited_deriv1(float* restrict du,
const float* restrict u,
int ncell)
{
for (int i = 0; i < ncell; ++i)
du[i] = limdiff(u[i-1], u[i], u[i+1]);
}
// Compute limited derivs across stride
static inline
void limited_derivk(float* restrict du,
const float* restrict u,
int ncell, int stride)
{
assert(stride > 0);
for (int i = 0; i < ncell; ++i)
du[i] = limdiff(u[i-stride], u[i], u[i+stride]);
}
/**
* ### Advancing a time step
*
* Take one step of the numerical scheme. This consists of two pieces:
* a first-order corrector computed at a half time step, which is used
* to obtain new $F$ and $G$ values; and a corrector step that computes
* the solution at the full step. For full details, we refer to the
* [Jiang and Tadmor paper][jt].
*
* The `compute_step` function takes two arguments: the `io` flag
* which is the time step modulo 2 (0 if even, 1 if odd); and the `dt`
* flag, which actually determines the time step length. We need
* to know the even-vs-odd distinction because the Jiang-Tadmor
* scheme alternates between a primary grid (on even steps) and a
* staggered grid (on odd steps). This means that the data at $(i,j)$
* in an even step and the data at $(i,j)$ in an odd step represent
* values at different locations in space, offset by half a space step
* in each direction. Every other step, we shift things back by one
* mesh cell in each direction, essentially resetting to the primary
* indexing scheme.
*
* We're slightly tricky in the corrector in that we write
* $$
* v(i,j) = (s(i+1,j) + s(i,j)) - (d(i+1,j)-d(i,j))
* $$
* where $s(i,j)$ comprises the $u$ and $x$-derivative terms in the
* update formula, and $d(i,j)$ the $y$-derivative terms. This cuts
* the arithmetic cost a little (not that it's that big to start).
* It also makes it more obvious that we only need four rows worth
* of scratch space.
*/
// Predictor half-step
static
void central2d_predict(float* restrict v,
float* restrict scratch,
const float* restrict u,
const float* restrict f,
const float* restrict g,
float dtcdx2, float dtcdy2,
int nx, int ny, int nfield)
{
float* restrict fx = scratch;
float* restrict gy = scratch+nx;
for (int k = 0; k < nfield; ++k) {
for (int iy = 1; iy < ny-1; ++iy) {
int offset = (k*ny+iy)*nx+1;
limited_deriv1(fx+1, f+offset, nx-2);
limited_derivk(gy+1, g+offset, nx-2, nx);
for (int ix = 1; ix < nx-1; ++ix) {
int offset = (k*ny+iy)*nx+ix;
v[offset] = u[offset] - dtcdx2 * fx[ix] - dtcdy2 * gy[ix];
}
}
}
}
// Corrector
static
void central2d_correct_sd(float* restrict s,
float* restrict d,
const float* restrict ux,
const float* restrict uy,
const float* restrict u,
const float* restrict f,
const float* restrict g,
float dtcdx2, float dtcdy2,
int xlo, int xhi)
{
for (int ix = xlo; ix < xhi; ++ix)
s[ix] =
0.2500f * (u [ix] + u [ix+1]) +
0.0625f * (ux[ix] - ux[ix+1]) +
dtcdx2 * (f [ix] - f [ix+1]);
for (int ix = xlo; ix < xhi; ++ix)
d[ix] =
0.0625f * (uy[ix] + uy[ix+1]) +
dtcdy2 * (g [ix] + g [ix+1]);
}
// Corrector
static
void central2d_correct(float* restrict v,
float* restrict scratch,
const float* restrict u,
const float* restrict f,
const float* restrict g,
float dtcdx2, float dtcdy2,
int xlo, int xhi, int ylo, int yhi,
int nx, int ny, int nfield)
{
assert(0 <= xlo && xlo < xhi && xhi <= nx);
assert(0 <= ylo && ylo < yhi && yhi <= ny);
float* restrict ux = scratch;
float* restrict uy = scratch + nx;
float* restrict s0 = scratch + 2*nx;
float* restrict d0 = scratch + 3*nx;
float* restrict s1 = scratch + 4*nx;
float* restrict d1 = scratch + 5*nx;
for (int k = 0; k < nfield; ++k) {
float* restrict vk = v + k*ny*nx;
const float* restrict uk = u + k*ny*nx;
const float* restrict fk = f + k*ny*nx;
const float* restrict gk = g + k*ny*nx;
limited_deriv1(ux+1, uk+ylo*nx+1, nx-2);
limited_derivk(uy+1, uk+ylo*nx+1, nx-2, nx);
central2d_correct_sd(s1, d1, ux, uy,
uk + ylo*nx, fk + ylo*nx, gk + ylo*nx,
dtcdx2, dtcdy2, xlo, xhi);
for (int iy = ylo; iy < yhi; ++iy) {
float* tmp;
tmp = s0; s0 = s1; s1 = tmp;
tmp = d0; d0 = d1; d1 = tmp;
limited_deriv1(ux+1, uk+(iy+1)*nx+1, nx-2);
limited_derivk(uy+1, uk+(iy+1)*nx+1, nx-2, nx);
central2d_correct_sd(s1, d1, ux, uy,
uk + (iy+1)*nx, fk + (iy+1)*nx, gk + (iy+1)*nx,
dtcdx2, dtcdy2, xlo, xhi);
for (int ix = xlo; ix < xhi; ++ix)
vk[iy*nx+ix] = (s1[ix]+s0[ix])-(d1[ix]-d0[ix]);
}
}
}
static
void central2d_step(float* restrict u, float* restrict v,
float* restrict scratch,
float* restrict f,
float* restrict g,
int io, int nx, int ny, int ng,
int nfield, flux_t flux, speed_t speed,
float dt, float dx, float dy)
{
int nx_all = nx + 2*ng;
int ny_all = ny + 2*ng;
float dtcdx2 = 0.5 * dt / dx;
float dtcdy2 = 0.5 * dt / dy;
flux(f, g, u, nx_all * ny_all, nx_all * ny_all);
central2d_predict(v, scratch, u, f, g, dtcdx2, dtcdy2,
nx_all, ny_all, nfield);
// Flux values of f and g at half step
for (int iy = 1; iy < ny_all-1; ++iy) {
int jj = iy*nx_all+1;
flux(f+jj, g+jj, v+jj, nx_all-2, nx_all * ny_all);
}
central2d_correct(v+io*(nx_all+1), scratch, u, f, g, dtcdx2, dtcdy2,
ng-io, nx+ng-io,
ng-io, ny+ng-io,
nx_all, ny_all, nfield);
}
/**
* ### Advance a fixed time
*
* The `run` method advances from time 0 (initial conditions) to time
* `tfinal`. Note that `run` can be called repeatedly; for example,
* we might want to advance for a period of time, write out a picture,
* advance more, and write another picture. In this sense, `tfinal`
* should be interpreted as an offset from the time represented by
* the simulator at the start of the call, rather than as an absolute time.
*
* We always take an even number of steps so that the solution
* at the end lives on the main grid instead of the staggered grid.
*/
void do_copy_in(float* restrict u, float* restrict bu,
int stride, int bstride, int bh, int bw, int nfield, int field_stride, int bfield_stride)
{
for (int k = 0; k < nfield; ++k){
float* uk = u + k*field_stride;
float* buk = bu + k*bfield_stride;
for (int iy = 0; iy < bh; ++iy){
for (int ix = 0; ix < bw; ++ix){
buk[iy*bstride+ix] = uk[iy*stride+ix];
}
}
}
}
void do_copy_out(float* restrict u, float* restrict bu,
int stride, int bstride, int bh, int bw, int nfield, int field_stride, int bfield_stride)
{
for (int k = 0; k < nfield; ++k){
float* uk = u + k*field_stride;
float* buk = bu + k*bfield_stride;
for (int iy = 0; iy < bh; ++iy){
for (int ix = 0; ix < bw; ++ix){
uk[iy*stride+ix] = buk[iy*bstride+ix];
}
}
}
}
static
int central2d_xrun(float* restrict u, float* restrict v,
float* restrict scratch,
float* restrict f,
float* restrict g,
int nx, int ny, int ng,
int nfield, flux_t flux, speed_t speed,
float tfinal, float dx, float dy, float cfl)
{
const int M = (nx%BLOCK_SIZE ? nx/BLOCK_SIZE + 1: nx/BLOCK_SIZE); // number of blocks in x/y direction
printf("M: %d\n", M);
int nstep = 0;
int nx_all = nx + 2*ng;
int ny_all = ny + 2*ng;
int nc = nx_all * ny_all;
int N = nc * nfield;
bool done = false;
float t = 0;
//int max_threads = omp_get_max_threads();
while (!done) {
float cxy[2] = {1.0e-15f, 1.0e-15f};
central2d_periodic(u, nx, ny, ng, nfield);
speed(cxy, u, nx_all * ny_all, nx_all * ny_all);
float dt = cfl / fmaxf(cxy[0]/dx, cxy[1]/dy);
if (t + 2*dt >= tfinal) {
dt = (tfinal-t)/2;
done = true;
}
float* uk = (float*) malloc((4*N + 6*nx_all)* sizeof(float)); // copy out blocking values to uk
/**
* Use openmp to parallize the for loop
*
*
*/
#pragma omp parallel for
// blocking of the full u
for (int bj = 0; bj < M; ++bj){
const int j = bj * BLOCK_SIZE;
int bny = (j + BLOCK_SIZE > ny ? ny-j : BLOCK_SIZE);
int bny_all = bny + 2*ng;
for (int bi = 0; bi < M; ++bi){
const int i = bi * BLOCK_SIZE;
int bnx = (i + BLOCK_SIZE > nx ? nx-i : BLOCK_SIZE);
int bnx_all = bnx + 2*ng;
int bnxy = bnx_all > bny_all ? bnx_all : bny_all;
int bnc = bnxy * bnxy;
int bN = nfield * bnc;
float* bu = (float*) malloc((4*bN + 6*bnxy)* sizeof(float));
float* bv = bu + bN;
float* bf = bu + 2*bN;
float* bg = bu + 3*bN;
float* bscratch = bu + 4*bN;
// copy u into the blocking u, w/ ghost cells of blocking u
// offset when the segment is not square
int jj = bny < bnx ? BLOCK_SIZE - bny : 0;
int ii = bnx < bny ? BLOCK_SIZE - bnx : 0;
do_copy_in(u + (j-jj)*nx_all + (i-ii), bu, ny_all, bnxy, bnxy, bnxy, nfield, nc, bnc);
// 2 time step update
central2d_step(bu, bv, bscratch, bf, bg,
0, bnxy-2*ng+4, bnxy-2*ng+4, ng-2,
nfield, flux, speed,
dt, dx, dy);
central2d_step(bv, bu, bscratch, bf, bg,
1, bnxy-2*ng, bnxy-2*ng, ng,
nfield, flux, speed,
dt, dx, dy);
// copy blocking u out of the original u, w/o ghost cells of blocking u
do_copy_out(uk + (ng+j)*nx_all + ng + i, bu + (ng+jj)*bnxy + ng + ii, nx_all, bnxy, bny, bnx, nfield, nc, bnc);
free(bu);
}
}
do_copy_out(u + ng*nx_all + ng, uk + ng*nx_all + ng, ny_all, ny_all, ny, nx, nfield, nc, nc);
free(uk);
t += 2*dt;
nstep += 2;
}
return nstep;
}
int central2d_run(central2d_t* sim, float tfinal)
{
return central2d_xrun(sim->u, sim->v, sim->scratch,
sim->f, sim->g,
sim->nx, sim->ny, sim->ng,
sim->nfield, sim->flux, sim->speed,
tfinal, sim->dx, sim->dy, sim->cfl);
}
|
omp_func.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char *argv[])
{
int nthreads, tid, procs, maxt, inpar, dynamic, nested;
// start parallel region
#pragma omp parallel private(nthreads, tid)
{
// obtain thread number
tid = omp_get_thread_num();
// only master thread does this
if(tid == 0)
{
printf("Thread %d getting environment info...\n", tid);
//getting environment information
procs = omp_get_num_procs();
nthreads = omp_get_num_threads();
maxt = omp_get_max_threads();
inpar = omp_in_parallel();
dynamic = omp_get_dynamic();
nested = omp_get_nested();
// print environment information
printf("Number of processors = %d\n", procs);
printf("Number of threads = %d\n", threads);
printf("Max threads = %d\n", maxt);
printf("In parallel? = %d\n", inpar);
printf("Dynamic threads enabled? = %d\n", dynamic);
printf("Nested parallelism enabled? = %d\n", nested);
}
}
}
|
OMPResultVector.h | /** \file OMPResultVector.h*/
#ifndef OMP_RESULTVECTOR_H_
#define OMP_RESULTVECTOR_H_
typedef struct int4 {
int elem[4];
} int4;
//! A vector structure abstraction.
/** The OMPResultVector holds integer quadruplets and has a specific size. It has abstraction of pop and push (thus it works like a stack)
* and it supports atomic inserts.
*/
typedef struct OMPResultVector
{
int m_size;
int4 m_data[1000];
} OMPResultVector;
//! Insert an element to the vector.
/** Pushes a quadruplet to the back of the vector if there is enough space.
* Returns index of position.
\param v The vector structure
\param element The element to insert
*/
int push_backrv(OMPResultVector* v, const int4* element) {
int previousSize = v->m_size;
(v->m_size)++;
if(previousSize<1000) {
v->m_data[previousSize] = *element;
return previousSize;
} else {
--(v->m_size);
return -1;
}
}
//! Insert an element to the vector in a thread-safe way.
/** Pushes a quadruplet to the back of the vector if there is enough space. Uses atomic operations to make it thread-safe.
* Returns index of position.
\param v The vector structure
\param element The element to insert
*/
int push_backtsrv(OMPResultVector* v, const int4* element) {
int previousSize;
#pragma omp atomic capture
previousSize = (v->m_size)++;
if(previousSize<1000) {
v->m_data[previousSize] = *element;
return previousSize;
} else {
#pragma omp atomic
--(v->m_size);
return -1;
}
}
//! Extract an element from the vector.
/** Pops an element from the back of the vector if any.
\param v The vector structure
\param cell The pointer to the item in which the extracted value is written
*/
int pop_backrv(OMPResultVector* v, int4* cell) {
if(v->m_size > 0) {
int previousSize = (v->m_size)--;
*cell = v->m_data[previousSize-1];
return 1;
} else {
return -1;
}
}
//! Set the vector to empty.
void resetrv(OMPResultVector* v) {
v->m_size = 0;
}
//! Get number of elements in vector.
int sizerv(OMPResultVector* v) {
return v->m_size;
}
#endif
|
dense_distances.c | /* Generated by Cython 0.15.1 on Fri Jan 6 16:49:30 2012 */
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#else
#include <stddef.h> /* For offsetof */
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#if PY_VERSION_HEX < 0x02040000
#define METH_COEXIST 0
#define PyDict_CheckExact(op) (Py_TYPE(op) == &PyDict_Type)
#define PyDict_Contains(d,o) PySequence_Contains(d,o)
#endif
#if PY_VERSION_HEX < 0x02050000
typedef int Py_ssize_t;
#define PY_SSIZE_T_MAX INT_MAX
#define PY_SSIZE_T_MIN INT_MIN
#define PY_FORMAT_SIZE_T ""
#define PyInt_FromSsize_t(z) PyInt_FromLong(z)
#define PyInt_AsSsize_t(o) __Pyx_PyInt_AsInt(o)
#define PyNumber_Index(o) PyNumber_Int(o)
#define PyIndex_Check(o) PyNumber_Check(o)
#define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message)
#endif
#if PY_VERSION_HEX < 0x02060000
#define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt)
#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
#define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size)
#define PyVarObject_HEAD_INIT(type, size) \
PyObject_HEAD_INIT(type) size,
#define PyType_Modified(t)
typedef struct {
void *buf;
PyObject *obj;
Py_ssize_t len;
Py_ssize_t itemsize;
int readonly;
int ndim;
char *format;
Py_ssize_t *shape;
Py_ssize_t *strides;
Py_ssize_t *suboffsets;
void *internal;
} Py_buffer;
#define PyBUF_SIMPLE 0
#define PyBUF_WRITABLE 0x0001
#define PyBUF_FORMAT 0x0004
#define PyBUF_ND 0x0008
#define PyBUF_STRIDES (0x0010 | PyBUF_ND)
#define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES)
#define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES)
#define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES)
#define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES)
#endif
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#endif
#if PY_MAJOR_VERSION >= 3
#define Py_TPFLAGS_CHECKTYPES 0
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3)
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#endif
#if PY_VERSION_HEX < 0x02060000
#define PyBytesObject PyStringObject
#define PyBytes_Type PyString_Type
#define PyBytes_Check PyString_Check
#define PyBytes_CheckExact PyString_CheckExact
#define PyBytes_FromString PyString_FromString
#define PyBytes_FromStringAndSize PyString_FromStringAndSize
#define PyBytes_FromFormat PyString_FromFormat
#define PyBytes_DecodeEscape PyString_DecodeEscape
#define PyBytes_AsString PyString_AsString
#define PyBytes_AsStringAndSize PyString_AsStringAndSize
#define PyBytes_Size PyString_Size
#define PyBytes_AS_STRING PyString_AS_STRING
#define PyBytes_GET_SIZE PyString_GET_SIZE
#define PyBytes_Repr PyString_Repr
#define PyBytes_Concat PyString_Concat
#define PyBytes_ConcatAndDel PyString_ConcatAndDel
#endif
#if PY_VERSION_HEX < 0x02060000
#define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type)
#define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type)
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_VERSION_HEX < 0x03020000
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300)
#define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b)
#define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value)
#define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b)
#else
#define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \
(PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \
(likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \
(PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0)))
#define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \
(PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \
(likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \
(PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1)))
#define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \
(PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \
(likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \
(PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1)))
#endif
#if PY_MAJOR_VERSION >= 3
#define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
#endif
#if PY_VERSION_HEX < 0x02050000
#define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n)))
#define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a))
#define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n)))
#else
#define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n))
#define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a))
#define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n))
#endif
#if PY_VERSION_HEX < 0x02050000
#define __Pyx_NAMESTR(n) ((char *)(n))
#define __Pyx_DOCSTR(n) ((char *)(n))
#else
#define __Pyx_NAMESTR(n) (n)
#define __Pyx_DOCSTR(n) (n)
#endif
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#define __PYX_HAVE__dense_distances
#define __PYX_HAVE_API__dense_distances
#include "stdio.h"
#include "stdlib.h"
#include "numpy/arrayobject.h"
#include "numpy/ufuncobject.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#ifdef PYREX_WITHOUT_ASSERTIONS
#define CYTHON_WITHOUT_ASSERTIONS
#endif
/* inline attribute */
#ifndef CYTHON_INLINE
#if defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
/* unused attribute */
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || defined(__INTEL_COMPILER)
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/
/* Type Conversion Predeclarations */
#define __Pyx_PyBytes_FromUString(s) PyBytes_FromString((char*)s)
#define __Pyx_PyBytes_AsUString(s) ((unsigned char*) PyBytes_AsString(s))
#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None)
#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False))
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x);
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*);
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#ifdef __GNUC__
/* Test for GCC > 2.95 */
#if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* __GNUC__ > 2 ... */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ > 2 ... */
#else /* __GNUC__ */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static PyObject *__pyx_m;
static PyObject *__pyx_b;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
#if !defined(CYTHON_CCOMPLEX)
#if defined(__cplusplus)
#define CYTHON_CCOMPLEX 1
#elif defined(_Complex_I)
#define CYTHON_CCOMPLEX 1
#else
#define CYTHON_CCOMPLEX 0
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#include <complex>
#else
#include <complex.h>
#endif
#endif
#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
#undef _Complex_I
#define _Complex_I 1.0fj
#endif
static const char *__pyx_f[] = {
"dense_distances.pyx",
"numpy.pxd",
};
/* "numpy.pxd":719
* # in Cython to enable them only on the right systems.
*
* ctypedef npy_int8 int8_t # <<<<<<<<<<<<<<
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
*/
typedef npy_int8 __pyx_t_5numpy_int8_t;
/* "numpy.pxd":720
*
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t # <<<<<<<<<<<<<<
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t
*/
typedef npy_int16 __pyx_t_5numpy_int16_t;
/* "numpy.pxd":721
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t # <<<<<<<<<<<<<<
* ctypedef npy_int64 int64_t
* #ctypedef npy_int96 int96_t
*/
typedef npy_int32 __pyx_t_5numpy_int32_t;
/* "numpy.pxd":722
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t # <<<<<<<<<<<<<<
* #ctypedef npy_int96 int96_t
* #ctypedef npy_int128 int128_t
*/
typedef npy_int64 __pyx_t_5numpy_int64_t;
/* "numpy.pxd":726
* #ctypedef npy_int128 int128_t
*
* ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<<
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
*/
typedef npy_uint8 __pyx_t_5numpy_uint8_t;
/* "numpy.pxd":727
*
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<<
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t
*/
typedef npy_uint16 __pyx_t_5numpy_uint16_t;
/* "numpy.pxd":728
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<<
* ctypedef npy_uint64 uint64_t
* #ctypedef npy_uint96 uint96_t
*/
typedef npy_uint32 __pyx_t_5numpy_uint32_t;
/* "numpy.pxd":729
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<<
* #ctypedef npy_uint96 uint96_t
* #ctypedef npy_uint128 uint128_t
*/
typedef npy_uint64 __pyx_t_5numpy_uint64_t;
/* "numpy.pxd":733
* #ctypedef npy_uint128 uint128_t
*
* ctypedef npy_float32 float32_t # <<<<<<<<<<<<<<
* ctypedef npy_float64 float64_t
* #ctypedef npy_float80 float80_t
*/
typedef npy_float32 __pyx_t_5numpy_float32_t;
/* "numpy.pxd":734
*
* ctypedef npy_float32 float32_t
* ctypedef npy_float64 float64_t # <<<<<<<<<<<<<<
* #ctypedef npy_float80 float80_t
* #ctypedef npy_float128 float128_t
*/
typedef npy_float64 __pyx_t_5numpy_float64_t;
/* "numpy.pxd":743
* # The int types are mapped a bit surprising --
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t
*/
typedef npy_long __pyx_t_5numpy_int_t;
/* "numpy.pxd":744
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong longlong_t
*
*/
typedef npy_longlong __pyx_t_5numpy_long_t;
/* "numpy.pxd":745
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_ulong uint_t
*/
typedef npy_longlong __pyx_t_5numpy_longlong_t;
/* "numpy.pxd":747
* ctypedef npy_longlong longlong_t
*
* ctypedef npy_ulong uint_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t
*/
typedef npy_ulong __pyx_t_5numpy_uint_t;
/* "numpy.pxd":748
*
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulonglong_t
*
*/
typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
/* "numpy.pxd":749
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_intp intp_t
*/
typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
/* "numpy.pxd":751
* ctypedef npy_ulonglong ulonglong_t
*
* ctypedef npy_intp intp_t # <<<<<<<<<<<<<<
* ctypedef npy_uintp uintp_t
*
*/
typedef npy_intp __pyx_t_5numpy_intp_t;
/* "numpy.pxd":752
*
* ctypedef npy_intp intp_t
* ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<<
*
* ctypedef npy_double float_t
*/
typedef npy_uintp __pyx_t_5numpy_uintp_t;
/* "numpy.pxd":754
* ctypedef npy_uintp uintp_t
*
* ctypedef npy_double float_t # <<<<<<<<<<<<<<
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t
*/
typedef npy_double __pyx_t_5numpy_float_t;
/* "numpy.pxd":755
*
* ctypedef npy_double float_t
* ctypedef npy_double double_t # <<<<<<<<<<<<<<
* ctypedef npy_longdouble longdouble_t
*
*/
typedef npy_double __pyx_t_5numpy_double_t;
/* "numpy.pxd":756
* ctypedef npy_double float_t
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cfloat cfloat_t
*/
typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
/* "dense_distances.pyx":12
* DTYPE = np.float
* # corresponding compile-time type
* ctypedef np.float_t DTYPE_t # <<<<<<<<<<<<<<
*
*
*/
typedef __pyx_t_5numpy_float_t __pyx_t_15dense_distances_DTYPE_t;
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< float > __pyx_t_float_complex;
#else
typedef float _Complex __pyx_t_float_complex;
#endif
#else
typedef struct { float real, imag; } __pyx_t_float_complex;
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< double > __pyx_t_double_complex;
#else
typedef double _Complex __pyx_t_double_complex;
#endif
#else
typedef struct { double real, imag; } __pyx_t_double_complex;
#endif
/*--- Type declarations ---*/
/* "numpy.pxd":758
* ctypedef npy_longdouble longdouble_t
*
* ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<<
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t
*/
typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
/* "numpy.pxd":759
*
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<<
* ctypedef npy_clongdouble clongdouble_t
*
*/
typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
/* "numpy.pxd":760
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cdouble complex_t
*/
typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
/* "numpy.pxd":762
* ctypedef npy_clongdouble clongdouble_t
*
* ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew1(a):
*/
typedef npy_cdouble __pyx_t_5numpy_complex_t;
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#define __Pyx_RefNannySetupContext(name) __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#define __Pyx_RefNannyFinishContext() __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif /* CYTHON_REFNANNY */
static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name, PyObject* kw_name); /*proto*/
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name); /*proto*/
static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact); /*proto*/
/* Run-time type information about structs used with buffers */
struct __Pyx_StructField_;
typedef struct {
const char* name; /* for error messages only */
struct __Pyx_StructField_* fields;
size_t size; /* sizeof(type) */
char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject */
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
#define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0)
static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/
#define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1)
#include <string.h>
void __pyx_init_nan(void);
static float __PYX_NAN;
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index); /*proto*/
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
Py_ssize_t __Pyx_zeros[] = {0, 0};
Py_ssize_t __Pyx_minusones[] = {-1, -1};
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#define __Pyx_CREAL(z) ((z).real())
#define __Pyx_CIMAG(z) ((z).imag())
#else
#define __Pyx_CREAL(z) (__real__(z))
#define __Pyx_CIMAG(z) (__imag__(z))
#endif
#else
#define __Pyx_CREAL(z) ((z).real)
#define __Pyx_CIMAG(z) ((z).imag)
#endif
#if defined(_WIN32) && defined(__cplusplus) && CYTHON_CCOMPLEX
#define __Pyx_SET_CREAL(z,x) ((z).real(x))
#define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
#else
#define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
#define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
#endif
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float);
#if CYTHON_CCOMPLEX
#define __Pyx_c_eqf(a, b) ((a)==(b))
#define __Pyx_c_sumf(a, b) ((a)+(b))
#define __Pyx_c_difff(a, b) ((a)-(b))
#define __Pyx_c_prodf(a, b) ((a)*(b))
#define __Pyx_c_quotf(a, b) ((a)/(b))
#define __Pyx_c_negf(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zerof(z) ((z)==(float)0)
#define __Pyx_c_conjf(z) (::std::conj(z))
#if 1
#define __Pyx_c_absf(z) (::std::abs(z))
#define __Pyx_c_powf(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zerof(z) ((z)==0)
#define __Pyx_c_conjf(z) (conjf(z))
#if 1
#define __Pyx_c_absf(z) (cabsf(z))
#define __Pyx_c_powf(a, b) (cpowf(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex);
static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex);
#if 1
static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex);
#endif
#endif
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double);
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq(a, b) ((a)==(b))
#define __Pyx_c_sum(a, b) ((a)+(b))
#define __Pyx_c_diff(a, b) ((a)-(b))
#define __Pyx_c_prod(a, b) ((a)*(b))
#define __Pyx_c_quot(a, b) ((a)/(b))
#define __Pyx_c_neg(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero(z) ((z)==(double)0)
#define __Pyx_c_conj(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs(z) (::std::abs(z))
#define __Pyx_c_pow(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero(z) ((z)==0)
#define __Pyx_c_conj(z) (conj(z))
#if 1
#define __Pyx_c_abs(z) (cabs(z))
#define __Pyx_c_pow(a, b) (cpow(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex);
static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex);
#if 1
static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex);
#endif
#endif
static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *);
static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *);
static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *);
static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject *);
static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject *);
static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject *);
static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *);
static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *);
static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *);
static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject *);
static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *);
static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *);
static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject *);
static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *);
static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *);
static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *);
static int __Pyx_check_binary_version(void);
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/
static PyObject *__Pyx_ImportModule(const char *name); /*proto*/
static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno,
int __pyx_lineno, const char *__pyx_filename); /*proto*/
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
/* Module declarations from 'cpython.buffer' */
/* Module declarations from 'cpython.ref' */
/* Module declarations from 'libc.stdio' */
/* Module declarations from 'cpython.object' */
/* Module declarations from 'libc.stdlib' */
/* Module declarations from 'numpy' */
/* Module declarations from 'numpy' */
static PyTypeObject *__pyx_ptype_5numpy_dtype = 0;
static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0;
static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0;
static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0;
static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *); /*proto*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *, PyObject *); /*proto*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *, PyObject *, PyObject *); /*proto*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *, PyObject *, PyObject *, PyObject *); /*proto*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *, PyObject *, PyObject *, PyObject *, PyObject *); /*proto*/
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/
static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *, PyObject *); /*proto*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *); /*proto*/
/* Module declarations from 'cython.cython.view' */
/* Module declarations from 'cython' */
/* Module declarations from 'dense_distances' */
static __pyx_t_15dense_distances_DTYPE_t __pyx_f_15dense_distances_chisquare(__pyx_t_15dense_distances_DTYPE_t, __pyx_t_15dense_distances_DTYPE_t); /*proto*/
static __pyx_t_15dense_distances_DTYPE_t __pyx_f_15dense_distances_euclidean(__pyx_t_15dense_distances_DTYPE_t, __pyx_t_15dense_distances_DTYPE_t); /*proto*/
static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t = { "DTYPE_t", NULL, sizeof(__pyx_t_15dense_distances_DTYPE_t), 'R' };
#define __Pyx_MODULE_NAME "dense_distances"
int __pyx_module_is_main_dense_distances = 0;
/* Implementation of 'dense_distances' */
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_RuntimeError;
static char __pyx_k_1[] = "Dimension mismatch";
static char __pyx_k_2[] = "Matrix dimension mismatch";
static char __pyx_k_3[] = "ndarray is not C contiguous";
static char __pyx_k_5[] = "ndarray is not Fortran contiguous";
static char __pyx_k_7[] = "Non-native byte order not supported";
static char __pyx_k_9[] = "unknown dtype code in numpy.pxd (%d)";
static char __pyx_k_10[] = "Format string allocated too short, see comment in numpy.pxd";
static char __pyx_k_13[] = "Format string allocated too short.";
static char __pyx_k__B[] = "B";
static char __pyx_k__H[] = "H";
static char __pyx_k__I[] = "I";
static char __pyx_k__L[] = "L";
static char __pyx_k__O[] = "O";
static char __pyx_k__Q[] = "Q";
static char __pyx_k__b[] = "b";
static char __pyx_k__d[] = "d";
static char __pyx_k__f[] = "f";
static char __pyx_k__g[] = "g";
static char __pyx_k__h[] = "h";
static char __pyx_k__i[] = "i";
static char __pyx_k__l[] = "l";
static char __pyx_k__m[] = "m";
static char __pyx_k__q[] = "q";
static char __pyx_k__x[] = "x";
static char __pyx_k__Zd[] = "Zd";
static char __pyx_k__Zf[] = "Zf";
static char __pyx_k__Zg[] = "Zg";
static char __pyx_k__m1[] = "m1";
static char __pyx_k__m2[] = "m2";
static char __pyx_k__np[] = "np";
static char __pyx_k__x1[] = "x1";
static char __pyx_k__x2[] = "x2";
static char __pyx_k__DTYPE[] = "DTYPE";
static char __pyx_k__float[] = "float";
static char __pyx_k__numpy[] = "numpy";
static char __pyx_k__range[] = "range";
static char __pyx_k__zeros[] = "zeros";
static char __pyx_k____main__[] = "__main__";
static char __pyx_k____test__[] = "__test__";
static char __pyx_k__ValueError[] = "ValueError";
static char __pyx_k__RuntimeError[] = "RuntimeError";
static char __pyx_k__m2m_chisquare[] = "m2m_chisquare";
static char __pyx_k__m2m_euclidean[] = "m2m_euclidean";
static char __pyx_k__v2m_chisquare[] = "v2m_chisquare";
static char __pyx_k__v2m_euclidean[] = "v2m_euclidean";
static char __pyx_k__v2v_chisquare[] = "v2v_chisquare";
static char __pyx_k__v2v_euclidean[] = "v2v_euclidean";
static char __pyx_k__gram_chisquare[] = "gram_chisquare";
static char __pyx_k__gram_euclidean[] = "gram_euclidean";
static char __pyx_k__dense_distances[] = "dense_distances";
static PyObject *__pyx_kp_s_1;
static PyObject *__pyx_kp_u_10;
static PyObject *__pyx_kp_u_13;
static PyObject *__pyx_kp_s_2;
static PyObject *__pyx_kp_u_3;
static PyObject *__pyx_kp_u_5;
static PyObject *__pyx_kp_u_7;
static PyObject *__pyx_kp_u_9;
static PyObject *__pyx_n_s__DTYPE;
static PyObject *__pyx_n_s__RuntimeError;
static PyObject *__pyx_n_s__ValueError;
static PyObject *__pyx_n_s____main__;
static PyObject *__pyx_n_s____test__;
static PyObject *__pyx_n_s__dense_distances;
static PyObject *__pyx_n_s__float;
static PyObject *__pyx_n_s__gram_chisquare;
static PyObject *__pyx_n_s__gram_euclidean;
static PyObject *__pyx_n_s__m;
static PyObject *__pyx_n_s__m1;
static PyObject *__pyx_n_s__m2;
static PyObject *__pyx_n_s__m2m_chisquare;
static PyObject *__pyx_n_s__m2m_euclidean;
static PyObject *__pyx_n_s__np;
static PyObject *__pyx_n_s__numpy;
static PyObject *__pyx_n_s__range;
static PyObject *__pyx_n_s__v2m_chisquare;
static PyObject *__pyx_n_s__v2m_euclidean;
static PyObject *__pyx_n_s__v2v_chisquare;
static PyObject *__pyx_n_s__v2v_euclidean;
static PyObject *__pyx_n_s__x;
static PyObject *__pyx_n_s__x1;
static PyObject *__pyx_n_s__x2;
static PyObject *__pyx_n_s__zeros;
static PyObject *__pyx_int_15;
static PyObject *__pyx_k_tuple_4;
static PyObject *__pyx_k_tuple_6;
static PyObject *__pyx_k_tuple_8;
static PyObject *__pyx_k_tuple_11;
static PyObject *__pyx_k_tuple_12;
static PyObject *__pyx_k_tuple_14;
/* "dense_distances.pyx":15
*
*
* cdef DTYPE_t chisquare(DTYPE_t x, DTYPE_t y) nogil: # <<<<<<<<<<<<<<
* cdef DTYPE_t d
* if (x + y) > 0:
*/
static __pyx_t_15dense_distances_DTYPE_t __pyx_f_15dense_distances_chisquare(__pyx_t_15dense_distances_DTYPE_t __pyx_v_x, __pyx_t_15dense_distances_DTYPE_t __pyx_v_y) {
__pyx_t_15dense_distances_DTYPE_t __pyx_v_d;
__pyx_t_15dense_distances_DTYPE_t __pyx_r;
int __pyx_t_1;
/* "dense_distances.pyx":17
* cdef DTYPE_t chisquare(DTYPE_t x, DTYPE_t y) nogil:
* cdef DTYPE_t d
* if (x + y) > 0: # <<<<<<<<<<<<<<
* d = ((x - y) * (x - y)) / (x + y)
* else:
*/
__pyx_t_1 = ((__pyx_v_x + __pyx_v_y) > 0.0);
if (__pyx_t_1) {
/* "dense_distances.pyx":18
* cdef DTYPE_t d
* if (x + y) > 0:
* d = ((x - y) * (x - y)) / (x + y) # <<<<<<<<<<<<<<
* else:
* d = 0.0
*/
__pyx_v_d = (((__pyx_v_x - __pyx_v_y) * (__pyx_v_x - __pyx_v_y)) / (__pyx_v_x + __pyx_v_y));
goto __pyx_L3;
}
/*else*/ {
/* "dense_distances.pyx":20
* d = ((x - y) * (x - y)) / (x + y)
* else:
* d = 0.0 # <<<<<<<<<<<<<<
* return d
*
*/
__pyx_v_d = 0.0;
}
__pyx_L3:;
/* "dense_distances.pyx":21
* else:
* d = 0.0
* return d # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_d;
goto __pyx_L0;
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "dense_distances.pyx":24
*
*
* cdef DTYPE_t euclidean(DTYPE_t x, DTYPE_t y) nogil: # <<<<<<<<<<<<<<
* return (x - y) * (x - y)
*
*/
static __pyx_t_15dense_distances_DTYPE_t __pyx_f_15dense_distances_euclidean(__pyx_t_15dense_distances_DTYPE_t __pyx_v_x, __pyx_t_15dense_distances_DTYPE_t __pyx_v_y) {
__pyx_t_15dense_distances_DTYPE_t __pyx_r;
/* "dense_distances.pyx":25
*
* cdef DTYPE_t euclidean(DTYPE_t x, DTYPE_t y) nogil:
* return (x - y) * (x - y) # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = ((__pyx_v_x - __pyx_v_y) * (__pyx_v_x - __pyx_v_y));
goto __pyx_L0;
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "dense_distances.pyx":28
*
*
* def v2v_euclidean(np.ndarray[DTYPE_t, ndim=1] x1, np.ndarray[DTYPE_t, ndim=1] x2): # <<<<<<<<<<<<<<
* cdef int d = len(x1)
* cdef int _d = len(x2)
*/
static PyObject *__pyx_pf_15dense_distances_v2v_euclidean(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_15dense_distances_v2v_euclidean = {__Pyx_NAMESTR("v2v_euclidean"), (PyCFunction)__pyx_pf_15dense_distances_v2v_euclidean, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)};
static PyObject *__pyx_pf_15dense_distances_v2v_euclidean(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyArrayObject *__pyx_v_x1 = 0;
PyArrayObject *__pyx_v_x2 = 0;
int __pyx_v_d;
int __pyx_v__d;
int __pyx_v_i;
__pyx_t_15dense_distances_DTYPE_t __pyx_v_dist;
Py_buffer __pyx_bstruct_x1;
Py_ssize_t __pyx_bstride_0_x1 = 0;
Py_ssize_t __pyx_bshape_0_x1 = 0;
Py_buffer __pyx_bstruct_x2;
Py_ssize_t __pyx_bstride_0_x2 = 0;
Py_ssize_t __pyx_bshape_0_x2 = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__x1,&__pyx_n_s__x2,0};
__Pyx_RefNannySetupContext("v2v_euclidean");
__pyx_self = __pyx_self;
{
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 0:
values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x1);
if (likely(values[0])) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x2);
if (likely(values[1])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("v2v_euclidean", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "v2v_euclidean") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
}
__pyx_v_x1 = ((PyArrayObject *)values[0]);
__pyx_v_x2 = ((PyArrayObject *)values[1]);
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("v2v_euclidean", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("dense_distances.v2v_euclidean", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_bstruct_x1.buf = NULL;
__pyx_bstruct_x2.buf = NULL;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x1), __pyx_ptype_5numpy_ndarray, 1, "x1", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x2), __pyx_ptype_5numpy_ndarray, 1, "x2", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_x1, (PyObject*)__pyx_v_x1, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_bstride_0_x1 = __pyx_bstruct_x1.strides[0];
__pyx_bshape_0_x1 = __pyx_bstruct_x1.shape[0];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_x2, (PyObject*)__pyx_v_x2, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_bstride_0_x2 = __pyx_bstruct_x2.strides[0];
__pyx_bshape_0_x2 = __pyx_bstruct_x2.shape[0];
/* "dense_distances.pyx":29
*
* def v2v_euclidean(np.ndarray[DTYPE_t, ndim=1] x1, np.ndarray[DTYPE_t, ndim=1] x2):
* cdef int d = len(x1) # <<<<<<<<<<<<<<
* cdef int _d = len(x2)
* assert d == _d, "Dimension mismatch"
*/
__pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x1)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_d = __pyx_t_1;
/* "dense_distances.pyx":30
* def v2v_euclidean(np.ndarray[DTYPE_t, ndim=1] x1, np.ndarray[DTYPE_t, ndim=1] x2):
* cdef int d = len(x1)
* cdef int _d = len(x2) # <<<<<<<<<<<<<<
* assert d == _d, "Dimension mismatch"
* cdef int i
*/
__pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x2)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v__d = __pyx_t_1;
/* "dense_distances.pyx":31
* cdef int d = len(x1)
* cdef int _d = len(x2)
* assert d == _d, "Dimension mismatch" # <<<<<<<<<<<<<<
* cdef int i
* cdef DTYPE_t dist = 0.0
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!(__pyx_v_d == __pyx_v__d))) {
PyErr_SetObject(PyExc_AssertionError, ((PyObject *)__pyx_kp_s_1));
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
#endif
/* "dense_distances.pyx":33
* assert d == _d, "Dimension mismatch"
* cdef int i
* cdef DTYPE_t dist = 0.0 # <<<<<<<<<<<<<<
* for i in range(d):
* dist += euclidean(x1[i], x2[i])
*/
__pyx_v_dist = 0.0;
/* "dense_distances.pyx":34
* cdef int i
* cdef DTYPE_t dist = 0.0
* for i in range(d): # <<<<<<<<<<<<<<
* dist += euclidean(x1[i], x2[i])
* return dist
*/
__pyx_t_2 = __pyx_v_d;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "dense_distances.pyx":35
* cdef DTYPE_t dist = 0.0
* for i in range(d):
* dist += euclidean(x1[i], x2[i]) # <<<<<<<<<<<<<<
* return dist
*
*/
__pyx_t_4 = __pyx_v_i;
if (__pyx_t_4 < 0) __pyx_t_4 += __pyx_bshape_0_x1;
__pyx_t_5 = __pyx_v_i;
if (__pyx_t_5 < 0) __pyx_t_5 += __pyx_bshape_0_x2;
__pyx_v_dist = (__pyx_v_dist + __pyx_f_15dense_distances_euclidean((*__Pyx_BufPtrStrided1d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_x1.buf, __pyx_t_4, __pyx_bstride_0_x1)), (*__Pyx_BufPtrStrided1d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_x2.buf, __pyx_t_5, __pyx_bstride_0_x2))));
}
/* "dense_distances.pyx":36
* for i in range(d):
* dist += euclidean(x1[i], x2[i])
* return dist # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_6 = PyFloat_FromDouble(__pyx_v_dist); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__pyx_r = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_x1);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_x2);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("dense_distances.v2v_euclidean", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_x1);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_x2);
__pyx_L2:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "dense_distances.pyx":39
*
*
* def v2v_chisquare(np.ndarray[DTYPE_t, ndim=1] x1, np.ndarray[DTYPE_t, ndim=1] x2): # <<<<<<<<<<<<<<
* cdef int d = len(x1)
* cdef int _d = len(x2)
*/
static PyObject *__pyx_pf_15dense_distances_1v2v_chisquare(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_15dense_distances_1v2v_chisquare = {__Pyx_NAMESTR("v2v_chisquare"), (PyCFunction)__pyx_pf_15dense_distances_1v2v_chisquare, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)};
static PyObject *__pyx_pf_15dense_distances_1v2v_chisquare(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyArrayObject *__pyx_v_x1 = 0;
PyArrayObject *__pyx_v_x2 = 0;
int __pyx_v_d;
int __pyx_v__d;
int __pyx_v_i;
__pyx_t_15dense_distances_DTYPE_t __pyx_v_dist;
Py_buffer __pyx_bstruct_x1;
Py_ssize_t __pyx_bstride_0_x1 = 0;
Py_ssize_t __pyx_bshape_0_x1 = 0;
Py_buffer __pyx_bstruct_x2;
Py_ssize_t __pyx_bstride_0_x2 = 0;
Py_ssize_t __pyx_bshape_0_x2 = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__x1,&__pyx_n_s__x2,0};
__Pyx_RefNannySetupContext("v2v_chisquare");
__pyx_self = __pyx_self;
{
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 0:
values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x1);
if (likely(values[0])) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x2);
if (likely(values[1])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("v2v_chisquare", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "v2v_chisquare") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
}
__pyx_v_x1 = ((PyArrayObject *)values[0]);
__pyx_v_x2 = ((PyArrayObject *)values[1]);
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("v2v_chisquare", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("dense_distances.v2v_chisquare", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_bstruct_x1.buf = NULL;
__pyx_bstruct_x2.buf = NULL;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x1), __pyx_ptype_5numpy_ndarray, 1, "x1", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x2), __pyx_ptype_5numpy_ndarray, 1, "x2", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_x1, (PyObject*)__pyx_v_x1, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_bstride_0_x1 = __pyx_bstruct_x1.strides[0];
__pyx_bshape_0_x1 = __pyx_bstruct_x1.shape[0];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_x2, (PyObject*)__pyx_v_x2, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_bstride_0_x2 = __pyx_bstruct_x2.strides[0];
__pyx_bshape_0_x2 = __pyx_bstruct_x2.shape[0];
/* "dense_distances.pyx":40
*
* def v2v_chisquare(np.ndarray[DTYPE_t, ndim=1] x1, np.ndarray[DTYPE_t, ndim=1] x2):
* cdef int d = len(x1) # <<<<<<<<<<<<<<
* cdef int _d = len(x2)
* assert d == _d, "Dimension mismatch"
*/
__pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x1)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_d = __pyx_t_1;
/* "dense_distances.pyx":41
* def v2v_chisquare(np.ndarray[DTYPE_t, ndim=1] x1, np.ndarray[DTYPE_t, ndim=1] x2):
* cdef int d = len(x1)
* cdef int _d = len(x2) # <<<<<<<<<<<<<<
* assert d == _d, "Dimension mismatch"
* cdef int i
*/
__pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x2)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v__d = __pyx_t_1;
/* "dense_distances.pyx":42
* cdef int d = len(x1)
* cdef int _d = len(x2)
* assert d == _d, "Dimension mismatch" # <<<<<<<<<<<<<<
* cdef int i
* cdef DTYPE_t dist = 0.0
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!(__pyx_v_d == __pyx_v__d))) {
PyErr_SetObject(PyExc_AssertionError, ((PyObject *)__pyx_kp_s_1));
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
#endif
/* "dense_distances.pyx":44
* assert d == _d, "Dimension mismatch"
* cdef int i
* cdef DTYPE_t dist = 0.0 # <<<<<<<<<<<<<<
* for i in range(d):
* dist += chisquare(x1[i], x2[i])
*/
__pyx_v_dist = 0.0;
/* "dense_distances.pyx":45
* cdef int i
* cdef DTYPE_t dist = 0.0
* for i in range(d): # <<<<<<<<<<<<<<
* dist += chisquare(x1[i], x2[i])
* return dist
*/
__pyx_t_2 = __pyx_v_d;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "dense_distances.pyx":46
* cdef DTYPE_t dist = 0.0
* for i in range(d):
* dist += chisquare(x1[i], x2[i]) # <<<<<<<<<<<<<<
* return dist
*
*/
__pyx_t_4 = __pyx_v_i;
if (__pyx_t_4 < 0) __pyx_t_4 += __pyx_bshape_0_x1;
__pyx_t_5 = __pyx_v_i;
if (__pyx_t_5 < 0) __pyx_t_5 += __pyx_bshape_0_x2;
__pyx_v_dist = (__pyx_v_dist + __pyx_f_15dense_distances_chisquare((*__Pyx_BufPtrStrided1d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_x1.buf, __pyx_t_4, __pyx_bstride_0_x1)), (*__Pyx_BufPtrStrided1d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_x2.buf, __pyx_t_5, __pyx_bstride_0_x2))));
}
/* "dense_distances.pyx":47
* for i in range(d):
* dist += chisquare(x1[i], x2[i])
* return dist # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_6 = PyFloat_FromDouble(__pyx_v_dist); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__pyx_r = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_x1);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_x2);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("dense_distances.v2v_chisquare", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_x1);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_x2);
__pyx_L2:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "dense_distances.pyx":50
*
*
* def v2m_euclidean(np.ndarray[DTYPE_t, ndim=1] x, np.ndarray[DTYPE_t, ndim=2] m): # <<<<<<<<<<<<<<
* """ Euclidean distances between vector x and row vectors in m
* """
*/
static PyObject *__pyx_pf_15dense_distances_2v2m_euclidean(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_15dense_distances_2v2m_euclidean[] = " Euclidean distances between vector x and row vectors in m\n ";
static PyMethodDef __pyx_mdef_15dense_distances_2v2m_euclidean = {__Pyx_NAMESTR("v2m_euclidean"), (PyCFunction)__pyx_pf_15dense_distances_2v2m_euclidean, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_15dense_distances_2v2m_euclidean)};
static PyObject *__pyx_pf_15dense_distances_2v2m_euclidean(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyArrayObject *__pyx_v_x = 0;
PyArrayObject *__pyx_v_m = 0;
int __pyx_v_d;
int __pyx_v_n;
int __pyx_v__d;
PyArrayObject *__pyx_v_res = 0;
int __pyx_v_i;
int __pyx_v_j;
Py_buffer __pyx_bstruct_res;
Py_ssize_t __pyx_bstride_0_res = 0;
Py_ssize_t __pyx_bshape_0_res = 0;
Py_buffer __pyx_bstruct_m;
Py_ssize_t __pyx_bstride_0_m = 0;
Py_ssize_t __pyx_bstride_1_m = 0;
Py_ssize_t __pyx_bshape_0_m = 0;
Py_ssize_t __pyx_bshape_1_m = 0;
Py_buffer __pyx_bstruct_x;
Py_ssize_t __pyx_bstride_0_x = 0;
Py_ssize_t __pyx_bshape_0_x = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyArrayObject *__pyx_t_5 = NULL;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
int __pyx_t_12;
int __pyx_t_13;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__x,&__pyx_n_s__m,0};
__Pyx_RefNannySetupContext("v2m_euclidean");
__pyx_self = __pyx_self;
{
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 0:
values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x);
if (likely(values[0])) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__m);
if (likely(values[1])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("v2m_euclidean", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "v2m_euclidean") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
}
__pyx_v_x = ((PyArrayObject *)values[0]);
__pyx_v_m = ((PyArrayObject *)values[1]);
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("v2m_euclidean", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("dense_distances.v2m_euclidean", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_bstruct_res.buf = NULL;
__pyx_bstruct_x.buf = NULL;
__pyx_bstruct_m.buf = NULL;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 1, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_m), __pyx_ptype_5numpy_ndarray, 1, "m", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_x, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_bstride_0_x = __pyx_bstruct_x.strides[0];
__pyx_bshape_0_x = __pyx_bstruct_x.shape[0];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_m, (PyObject*)__pyx_v_m, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_bstride_0_m = __pyx_bstruct_m.strides[0]; __pyx_bstride_1_m = __pyx_bstruct_m.strides[1];
__pyx_bshape_0_m = __pyx_bstruct_m.shape[0]; __pyx_bshape_1_m = __pyx_bstruct_m.shape[1];
/* "dense_distances.pyx":53
* """ Euclidean distances between vector x and row vectors in m
* """
* cdef int d = len(x) # <<<<<<<<<<<<<<
* cdef int n = m.shape[0]
* cdef int _d = m.shape[1]
*/
__pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_d = __pyx_t_1;
/* "dense_distances.pyx":54
* """
* cdef int d = len(x)
* cdef int n = m.shape[0] # <<<<<<<<<<<<<<
* cdef int _d = m.shape[1]
* assert _d == d, "Matrix dimension mismatch"
*/
__pyx_v_n = (__pyx_v_m->dimensions[0]);
/* "dense_distances.pyx":55
* cdef int d = len(x)
* cdef int n = m.shape[0]
* cdef int _d = m.shape[1] # <<<<<<<<<<<<<<
* assert _d == d, "Matrix dimension mismatch"
* cdef np.ndarray[DTYPE_t, ndim=1] res = np.zeros((n,))
*/
__pyx_v__d = (__pyx_v_m->dimensions[1]);
/* "dense_distances.pyx":56
* cdef int n = m.shape[0]
* cdef int _d = m.shape[1]
* assert _d == d, "Matrix dimension mismatch" # <<<<<<<<<<<<<<
* cdef np.ndarray[DTYPE_t, ndim=1] res = np.zeros((n,))
* cdef int i
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!(__pyx_v__d == __pyx_v_d))) {
PyErr_SetObject(PyExc_AssertionError, ((PyObject *)__pyx_kp_s_2));
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
#endif
/* "dense_distances.pyx":57
* cdef int _d = m.shape[1]
* assert _d == d, "Matrix dimension mismatch"
* cdef np.ndarray[DTYPE_t, ndim=1] res = np.zeros((n,)) # <<<<<<<<<<<<<<
* cdef int i
* cdef int j
*/
__pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__zeros); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyInt_FromLong(__pyx_v_n); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_4));
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_2));
PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_4));
__Pyx_GIVEREF(((PyObject *)__pyx_t_4));
__pyx_t_4 = 0;
__pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_5 = ((PyArrayObject *)__pyx_t_4);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_res, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
__pyx_v_res = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_res.buf = NULL;
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 57; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
} else {__pyx_bstride_0_res = __pyx_bstruct_res.strides[0];
__pyx_bshape_0_res = __pyx_bstruct_res.shape[0];
}
}
__pyx_t_5 = 0;
__pyx_v_res = ((PyArrayObject *)__pyx_t_4);
__pyx_t_4 = 0;
/* "dense_distances.pyx":60
* cdef int i
* cdef int j
* for i in range(n): # <<<<<<<<<<<<<<
* for j in range(d):
* res[i] += euclidean(m[i, j], x[j])
*/
__pyx_t_6 = __pyx_v_n;
for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_6; __pyx_t_7+=1) {
__pyx_v_i = __pyx_t_7;
/* "dense_distances.pyx":61
* cdef int j
* for i in range(n):
* for j in range(d): # <<<<<<<<<<<<<<
* res[i] += euclidean(m[i, j], x[j])
* return res
*/
__pyx_t_8 = __pyx_v_d;
for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) {
__pyx_v_j = __pyx_t_9;
/* "dense_distances.pyx":62
* for i in range(n):
* for j in range(d):
* res[i] += euclidean(m[i, j], x[j]) # <<<<<<<<<<<<<<
* return res
*
*/
__pyx_t_10 = __pyx_v_i;
__pyx_t_11 = __pyx_v_j;
if (__pyx_t_10 < 0) __pyx_t_10 += __pyx_bshape_0_m;
if (__pyx_t_11 < 0) __pyx_t_11 += __pyx_bshape_1_m;
__pyx_t_12 = __pyx_v_j;
if (__pyx_t_12 < 0) __pyx_t_12 += __pyx_bshape_0_x;
__pyx_t_13 = __pyx_v_i;
if (__pyx_t_13 < 0) __pyx_t_13 += __pyx_bshape_0_res;
*__Pyx_BufPtrStrided1d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_res.buf, __pyx_t_13, __pyx_bstride_0_res) += __pyx_f_15dense_distances_euclidean((*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_m.buf, __pyx_t_10, __pyx_bstride_0_m, __pyx_t_11, __pyx_bstride_1_m)), (*__Pyx_BufPtrStrided1d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_x.buf, __pyx_t_12, __pyx_bstride_0_x)));
}
}
/* "dense_distances.pyx":63
* for j in range(d):
* res[i] += euclidean(m[i, j], x[j])
* return res # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_res));
__pyx_r = ((PyObject *)__pyx_v_res);
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_res);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_m);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_x);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("dense_distances.v2m_euclidean", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_res);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_m);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_x);
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_res);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "dense_distances.pyx":66
*
*
* def v2m_chisquare(np.ndarray[DTYPE_t, ndim=1] x, np.ndarray[DTYPE_t, ndim=2] m): # <<<<<<<<<<<<<<
* """ Chisquare distances between vector x and row vectors in m
* """
*/
static PyObject *__pyx_pf_15dense_distances_3v2m_chisquare(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_15dense_distances_3v2m_chisquare[] = " Chisquare distances between vector x and row vectors in m\n ";
static PyMethodDef __pyx_mdef_15dense_distances_3v2m_chisquare = {__Pyx_NAMESTR("v2m_chisquare"), (PyCFunction)__pyx_pf_15dense_distances_3v2m_chisquare, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_15dense_distances_3v2m_chisquare)};
static PyObject *__pyx_pf_15dense_distances_3v2m_chisquare(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyArrayObject *__pyx_v_x = 0;
PyArrayObject *__pyx_v_m = 0;
int __pyx_v_d;
int __pyx_v_n;
int __pyx_v__d;
PyArrayObject *__pyx_v_res = 0;
int __pyx_v_i;
int __pyx_v_j;
Py_buffer __pyx_bstruct_res;
Py_ssize_t __pyx_bstride_0_res = 0;
Py_ssize_t __pyx_bshape_0_res = 0;
Py_buffer __pyx_bstruct_m;
Py_ssize_t __pyx_bstride_0_m = 0;
Py_ssize_t __pyx_bstride_1_m = 0;
Py_ssize_t __pyx_bshape_0_m = 0;
Py_ssize_t __pyx_bshape_1_m = 0;
Py_buffer __pyx_bstruct_x;
Py_ssize_t __pyx_bstride_0_x = 0;
Py_ssize_t __pyx_bshape_0_x = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyArrayObject *__pyx_t_5 = NULL;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
int __pyx_t_12;
int __pyx_t_13;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__x,&__pyx_n_s__m,0};
__Pyx_RefNannySetupContext("v2m_chisquare");
__pyx_self = __pyx_self;
{
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 0:
values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__x);
if (likely(values[0])) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__m);
if (likely(values[1])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("v2m_chisquare", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "v2m_chisquare") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
}
__pyx_v_x = ((PyArrayObject *)values[0]);
__pyx_v_m = ((PyArrayObject *)values[1]);
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("v2m_chisquare", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("dense_distances.v2m_chisquare", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_bstruct_res.buf = NULL;
__pyx_bstruct_x.buf = NULL;
__pyx_bstruct_m.buf = NULL;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 1, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_m), __pyx_ptype_5numpy_ndarray, 1, "m", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_x, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_bstride_0_x = __pyx_bstruct_x.strides[0];
__pyx_bshape_0_x = __pyx_bstruct_x.shape[0];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_m, (PyObject*)__pyx_v_m, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_bstride_0_m = __pyx_bstruct_m.strides[0]; __pyx_bstride_1_m = __pyx_bstruct_m.strides[1];
__pyx_bshape_0_m = __pyx_bstruct_m.shape[0]; __pyx_bshape_1_m = __pyx_bstruct_m.shape[1];
/* "dense_distances.pyx":69
* """ Chisquare distances between vector x and row vectors in m
* """
* cdef int d = len(x) # <<<<<<<<<<<<<<
* cdef int n = m.shape[0]
* cdef int _d = m.shape[1]
*/
__pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_d = __pyx_t_1;
/* "dense_distances.pyx":70
* """
* cdef int d = len(x)
* cdef int n = m.shape[0] # <<<<<<<<<<<<<<
* cdef int _d = m.shape[1]
* assert _d == d, "Matrix dimension mismatch"
*/
__pyx_v_n = (__pyx_v_m->dimensions[0]);
/* "dense_distances.pyx":71
* cdef int d = len(x)
* cdef int n = m.shape[0]
* cdef int _d = m.shape[1] # <<<<<<<<<<<<<<
* assert _d == d, "Matrix dimension mismatch"
* cdef np.ndarray[DTYPE_t, ndim=1] res = np.zeros((n,))
*/
__pyx_v__d = (__pyx_v_m->dimensions[1]);
/* "dense_distances.pyx":72
* cdef int n = m.shape[0]
* cdef int _d = m.shape[1]
* assert _d == d, "Matrix dimension mismatch" # <<<<<<<<<<<<<<
* cdef np.ndarray[DTYPE_t, ndim=1] res = np.zeros((n,))
* cdef int i
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!(__pyx_v__d == __pyx_v_d))) {
PyErr_SetObject(PyExc_AssertionError, ((PyObject *)__pyx_kp_s_2));
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 72; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
#endif
/* "dense_distances.pyx":73
* cdef int _d = m.shape[1]
* assert _d == d, "Matrix dimension mismatch"
* cdef np.ndarray[DTYPE_t, ndim=1] res = np.zeros((n,)) # <<<<<<<<<<<<<<
* cdef int i
* cdef int j
*/
__pyx_t_2 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__zeros); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyInt_FromLong(__pyx_v_n); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_4));
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_2));
PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_t_4));
__Pyx_GIVEREF(((PyObject *)__pyx_t_4));
__pyx_t_4 = 0;
__pyx_t_4 = PyObject_Call(__pyx_t_3, ((PyObject *)__pyx_t_2), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_5 = ((PyArrayObject *)__pyx_t_4);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_res, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {
__pyx_v_res = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_res.buf = NULL;
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
} else {__pyx_bstride_0_res = __pyx_bstruct_res.strides[0];
__pyx_bshape_0_res = __pyx_bstruct_res.shape[0];
}
}
__pyx_t_5 = 0;
__pyx_v_res = ((PyArrayObject *)__pyx_t_4);
__pyx_t_4 = 0;
/* "dense_distances.pyx":76
* cdef int i
* cdef int j
* for i in range(n): # <<<<<<<<<<<<<<
* for j in range(d):
* res[i] += chisquare(m[i, j], x[j])
*/
__pyx_t_6 = __pyx_v_n;
for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_6; __pyx_t_7+=1) {
__pyx_v_i = __pyx_t_7;
/* "dense_distances.pyx":77
* cdef int j
* for i in range(n):
* for j in range(d): # <<<<<<<<<<<<<<
* res[i] += chisquare(m[i, j], x[j])
* return res
*/
__pyx_t_8 = __pyx_v_d;
for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) {
__pyx_v_j = __pyx_t_9;
/* "dense_distances.pyx":78
* for i in range(n):
* for j in range(d):
* res[i] += chisquare(m[i, j], x[j]) # <<<<<<<<<<<<<<
* return res
*
*/
__pyx_t_10 = __pyx_v_i;
__pyx_t_11 = __pyx_v_j;
if (__pyx_t_10 < 0) __pyx_t_10 += __pyx_bshape_0_m;
if (__pyx_t_11 < 0) __pyx_t_11 += __pyx_bshape_1_m;
__pyx_t_12 = __pyx_v_j;
if (__pyx_t_12 < 0) __pyx_t_12 += __pyx_bshape_0_x;
__pyx_t_13 = __pyx_v_i;
if (__pyx_t_13 < 0) __pyx_t_13 += __pyx_bshape_0_res;
*__Pyx_BufPtrStrided1d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_res.buf, __pyx_t_13, __pyx_bstride_0_res) += __pyx_f_15dense_distances_chisquare((*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_m.buf, __pyx_t_10, __pyx_bstride_0_m, __pyx_t_11, __pyx_bstride_1_m)), (*__Pyx_BufPtrStrided1d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_x.buf, __pyx_t_12, __pyx_bstride_0_x)));
}
}
/* "dense_distances.pyx":79
* for j in range(d):
* res[i] += chisquare(m[i, j], x[j])
* return res # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_res));
__pyx_r = ((PyObject *)__pyx_v_res);
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_res);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_m);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_x);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("dense_distances.v2m_chisquare", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_res);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_m);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_x);
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_res);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "dense_distances.pyx":82
*
*
* def m2m_euclidean(np.ndarray[DTYPE_t, ndim=2] m1, np.ndarray[DTYPE_t, ndim=2] m2): # <<<<<<<<<<<<<<
* """ Parallelized Euclidean distances between row vectors in m1 and m2
* """
*/
static PyObject *__pyx_pf_15dense_distances_4m2m_euclidean(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_15dense_distances_4m2m_euclidean[] = " Parallelized Euclidean distances between row vectors in m1 and m2\n ";
static PyMethodDef __pyx_mdef_15dense_distances_4m2m_euclidean = {__Pyx_NAMESTR("m2m_euclidean"), (PyCFunction)__pyx_pf_15dense_distances_4m2m_euclidean, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_15dense_distances_4m2m_euclidean)};
static PyObject *__pyx_pf_15dense_distances_4m2m_euclidean(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyArrayObject *__pyx_v_m1 = 0;
PyArrayObject *__pyx_v_m2 = 0;
int __pyx_v_n1;
int __pyx_v_d;
int __pyx_v_n2;
int __pyx_v__d;
PyArrayObject *__pyx_v_res = 0;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_v_k;
Py_buffer __pyx_bstruct_res;
Py_ssize_t __pyx_bstride_0_res = 0;
Py_ssize_t __pyx_bstride_1_res = 0;
Py_ssize_t __pyx_bshape_0_res = 0;
Py_ssize_t __pyx_bshape_1_res = 0;
Py_buffer __pyx_bstruct_m1;
Py_ssize_t __pyx_bstride_0_m1 = 0;
Py_ssize_t __pyx_bstride_1_m1 = 0;
Py_ssize_t __pyx_bshape_0_m1 = 0;
Py_ssize_t __pyx_bshape_1_m1 = 0;
Py_buffer __pyx_bstruct_m2;
Py_ssize_t __pyx_bstride_0_m2 = 0;
Py_ssize_t __pyx_bstride_1_m2 = 0;
Py_ssize_t __pyx_bshape_0_m2 = 0;
Py_ssize_t __pyx_bshape_1_m2 = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyArrayObject *__pyx_t_5 = NULL;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
int __pyx_t_12;
int __pyx_t_13;
int __pyx_t_14;
int __pyx_t_15;
int __pyx_t_16;
int __pyx_t_17;
int __pyx_t_18;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__m1,&__pyx_n_s__m2,0};
__Pyx_RefNannySetupContext("m2m_euclidean");
__pyx_self = __pyx_self;
{
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 0:
values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__m1);
if (likely(values[0])) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__m2);
if (likely(values[1])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("m2m_euclidean", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "m2m_euclidean") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
}
__pyx_v_m1 = ((PyArrayObject *)values[0]);
__pyx_v_m2 = ((PyArrayObject *)values[1]);
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("m2m_euclidean", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("dense_distances.m2m_euclidean", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_bstruct_res.buf = NULL;
__pyx_bstruct_m1.buf = NULL;
__pyx_bstruct_m2.buf = NULL;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_m1), __pyx_ptype_5numpy_ndarray, 1, "m1", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_m2), __pyx_ptype_5numpy_ndarray, 1, "m2", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_m1, (PyObject*)__pyx_v_m1, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_bstride_0_m1 = __pyx_bstruct_m1.strides[0]; __pyx_bstride_1_m1 = __pyx_bstruct_m1.strides[1];
__pyx_bshape_0_m1 = __pyx_bstruct_m1.shape[0]; __pyx_bshape_1_m1 = __pyx_bstruct_m1.shape[1];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_m2, (PyObject*)__pyx_v_m2, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_bstride_0_m2 = __pyx_bstruct_m2.strides[0]; __pyx_bstride_1_m2 = __pyx_bstruct_m2.strides[1];
__pyx_bshape_0_m2 = __pyx_bstruct_m2.shape[0]; __pyx_bshape_1_m2 = __pyx_bstruct_m2.shape[1];
/* "dense_distances.pyx":85
* """ Parallelized Euclidean distances between row vectors in m1 and m2
* """
* cdef int n1 = m1.shape[0] # <<<<<<<<<<<<<<
* cdef int d = m1.shape[1]
* cdef int n2 = m2.shape[0]
*/
__pyx_v_n1 = (__pyx_v_m1->dimensions[0]);
/* "dense_distances.pyx":86
* """
* cdef int n1 = m1.shape[0]
* cdef int d = m1.shape[1] # <<<<<<<<<<<<<<
* cdef int n2 = m2.shape[0]
* cdef int _d = m2.shape[1]
*/
__pyx_v_d = (__pyx_v_m1->dimensions[1]);
/* "dense_distances.pyx":87
* cdef int n1 = m1.shape[0]
* cdef int d = m1.shape[1]
* cdef int n2 = m2.shape[0] # <<<<<<<<<<<<<<
* cdef int _d = m2.shape[1]
* assert _d == d, "Matrix dimension mismatch"
*/
__pyx_v_n2 = (__pyx_v_m2->dimensions[0]);
/* "dense_distances.pyx":88
* cdef int d = m1.shape[1]
* cdef int n2 = m2.shape[0]
* cdef int _d = m2.shape[1] # <<<<<<<<<<<<<<
* assert _d == d, "Matrix dimension mismatch"
* cdef np.ndarray[DTYPE_t, ndim=2] res = np.zeros((n1, n2))
*/
__pyx_v__d = (__pyx_v_m2->dimensions[1]);
/* "dense_distances.pyx":89
* cdef int n2 = m2.shape[0]
* cdef int _d = m2.shape[1]
* assert _d == d, "Matrix dimension mismatch" # <<<<<<<<<<<<<<
* cdef np.ndarray[DTYPE_t, ndim=2] res = np.zeros((n1, n2))
* cdef int i
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!(__pyx_v__d == __pyx_v_d))) {
PyErr_SetObject(PyExc_AssertionError, ((PyObject *)__pyx_kp_s_2));
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
#endif
/* "dense_distances.pyx":90
* cdef int _d = m2.shape[1]
* assert _d == d, "Matrix dimension mismatch"
* cdef np.ndarray[DTYPE_t, ndim=2] res = np.zeros((n1, n2)) # <<<<<<<<<<<<<<
* cdef int i
* cdef int j
*/
__pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyInt_FromLong(__pyx_v_n1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyInt_FromLong(__pyx_v_n2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_4));
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_3));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_4));
__Pyx_GIVEREF(((PyObject *)__pyx_t_4));
__pyx_t_4 = 0;
__pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_5 = ((PyArrayObject *)__pyx_t_4);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_res, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) {
__pyx_v_res = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_res.buf = NULL;
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
} else {__pyx_bstride_0_res = __pyx_bstruct_res.strides[0]; __pyx_bstride_1_res = __pyx_bstruct_res.strides[1];
__pyx_bshape_0_res = __pyx_bstruct_res.shape[0]; __pyx_bshape_1_res = __pyx_bstruct_res.shape[1];
}
}
__pyx_t_5 = 0;
__pyx_v_res = ((PyArrayObject *)__pyx_t_4);
__pyx_t_4 = 0;
/* "dense_distances.pyx":94
* cdef int j
* cdef int k
* for i in prange(n1, nogil=True, schedule='runtime'): # <<<<<<<<<<<<<<
* for j in range(n2):
* for k in range(d):
*/
{
#ifdef WITH_THREAD
PyThreadState *_save = NULL;
#endif
Py_UNBLOCK_THREADS
/*try:*/ {
__pyx_t_6 = __pyx_v_n1;
if (1 == 0) abort();
{
__pyx_t_8 = (__pyx_t_6 - 0) / 1;
if (__pyx_t_8 > 0)
{
__pyx_v_i = 0;
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_16, __pyx_t_11, __pyx_t_9, __pyx_t_10, __pyx_t_15, __pyx_t_14, __pyx_t_18, __pyx_t_13, __pyx_t_12, __pyx_t_17)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) lastprivate(__pyx_v_k) schedule(runtime)
#endif /* _OPENMP */
for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_8; __pyx_t_7++){
{
__pyx_v_i = 0 + 1 * __pyx_t_7;
/* Initialize private variables to invalid values */
__pyx_v_j = ((int)0xbad0bad0);
__pyx_v_k = ((int)0xbad0bad0);
/* "dense_distances.pyx":95
* cdef int k
* for i in prange(n1, nogil=True, schedule='runtime'):
* for j in range(n2): # <<<<<<<<<<<<<<
* for k in range(d):
* res[i,j] += euclidean(m1[i, k], m2[j, k])
*/
__pyx_t_9 = __pyx_v_n2;
for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) {
__pyx_v_j = __pyx_t_10;
/* "dense_distances.pyx":96
* for i in prange(n1, nogil=True, schedule='runtime'):
* for j in range(n2):
* for k in range(d): # <<<<<<<<<<<<<<
* res[i,j] += euclidean(m1[i, k], m2[j, k])
* return res
*/
__pyx_t_11 = __pyx_v_d;
for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
__pyx_v_k = __pyx_t_12;
/* "dense_distances.pyx":97
* for j in range(n2):
* for k in range(d):
* res[i,j] += euclidean(m1[i, k], m2[j, k]) # <<<<<<<<<<<<<<
* return res
*
*/
__pyx_t_13 = __pyx_v_i;
__pyx_t_14 = __pyx_v_k;
if (__pyx_t_13 < 0) __pyx_t_13 += __pyx_bshape_0_m1;
if (__pyx_t_14 < 0) __pyx_t_14 += __pyx_bshape_1_m1;
__pyx_t_15 = __pyx_v_j;
__pyx_t_16 = __pyx_v_k;
if (__pyx_t_15 < 0) __pyx_t_15 += __pyx_bshape_0_m2;
if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_bshape_1_m2;
__pyx_t_17 = __pyx_v_i;
__pyx_t_18 = __pyx_v_j;
if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_bshape_0_res;
if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_bshape_1_res;
*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_res.buf, __pyx_t_17, __pyx_bstride_0_res, __pyx_t_18, __pyx_bstride_1_res) += __pyx_f_15dense_distances_euclidean((*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_m1.buf, __pyx_t_13, __pyx_bstride_0_m1, __pyx_t_14, __pyx_bstride_1_m1)), (*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_m2.buf, __pyx_t_15, __pyx_bstride_0_m2, __pyx_t_16, __pyx_bstride_1_m2)));
}
}
}
}
}
}
}
}
/* "dense_distances.pyx":94
* cdef int j
* cdef int k
* for i in prange(n1, nogil=True, schedule='runtime'): # <<<<<<<<<<<<<<
* for j in range(n2):
* for k in range(d):
*/
/*finally:*/ {
Py_BLOCK_THREADS
}
}
/* "dense_distances.pyx":98
* for k in range(d):
* res[i,j] += euclidean(m1[i, k], m2[j, k])
* return res # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_res));
__pyx_r = ((PyObject *)__pyx_v_res);
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_res);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_m1);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_m2);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("dense_distances.m2m_euclidean", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_res);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_m1);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_m2);
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_res);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "dense_distances.pyx":101
*
*
* def m2m_chisquare(np.ndarray[DTYPE_t, ndim=2] m1, np.ndarray[DTYPE_t, ndim=2] m2): # <<<<<<<<<<<<<<
* """ Parallelized Chisquare distances between row vectors in m1 and m2
* """
*/
static PyObject *__pyx_pf_15dense_distances_5m2m_chisquare(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_15dense_distances_5m2m_chisquare[] = " Parallelized Chisquare distances between row vectors in m1 and m2\n ";
static PyMethodDef __pyx_mdef_15dense_distances_5m2m_chisquare = {__Pyx_NAMESTR("m2m_chisquare"), (PyCFunction)__pyx_pf_15dense_distances_5m2m_chisquare, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_15dense_distances_5m2m_chisquare)};
static PyObject *__pyx_pf_15dense_distances_5m2m_chisquare(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyArrayObject *__pyx_v_m1 = 0;
PyArrayObject *__pyx_v_m2 = 0;
int __pyx_v_n1;
int __pyx_v_d;
int __pyx_v_n2;
int __pyx_v__d;
PyArrayObject *__pyx_v_res = 0;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_v_k;
Py_buffer __pyx_bstruct_res;
Py_ssize_t __pyx_bstride_0_res = 0;
Py_ssize_t __pyx_bstride_1_res = 0;
Py_ssize_t __pyx_bshape_0_res = 0;
Py_ssize_t __pyx_bshape_1_res = 0;
Py_buffer __pyx_bstruct_m1;
Py_ssize_t __pyx_bstride_0_m1 = 0;
Py_ssize_t __pyx_bstride_1_m1 = 0;
Py_ssize_t __pyx_bshape_0_m1 = 0;
Py_ssize_t __pyx_bshape_1_m1 = 0;
Py_buffer __pyx_bstruct_m2;
Py_ssize_t __pyx_bstride_0_m2 = 0;
Py_ssize_t __pyx_bstride_1_m2 = 0;
Py_ssize_t __pyx_bshape_0_m2 = 0;
Py_ssize_t __pyx_bshape_1_m2 = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyArrayObject *__pyx_t_5 = NULL;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
int __pyx_t_12;
int __pyx_t_13;
int __pyx_t_14;
int __pyx_t_15;
int __pyx_t_16;
int __pyx_t_17;
int __pyx_t_18;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__m1,&__pyx_n_s__m2,0};
__Pyx_RefNannySetupContext("m2m_chisquare");
__pyx_self = __pyx_self;
{
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 0:
values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__m1);
if (likely(values[0])) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__m2);
if (likely(values[1])) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("m2m_chisquare", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "m2m_chisquare") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
}
__pyx_v_m1 = ((PyArrayObject *)values[0]);
__pyx_v_m2 = ((PyArrayObject *)values[1]);
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("m2m_chisquare", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("dense_distances.m2m_chisquare", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_bstruct_res.buf = NULL;
__pyx_bstruct_m1.buf = NULL;
__pyx_bstruct_m2.buf = NULL;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_m1), __pyx_ptype_5numpy_ndarray, 1, "m1", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_m2), __pyx_ptype_5numpy_ndarray, 1, "m2", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_m1, (PyObject*)__pyx_v_m1, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_bstride_0_m1 = __pyx_bstruct_m1.strides[0]; __pyx_bstride_1_m1 = __pyx_bstruct_m1.strides[1];
__pyx_bshape_0_m1 = __pyx_bstruct_m1.shape[0]; __pyx_bshape_1_m1 = __pyx_bstruct_m1.shape[1];
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_m2, (PyObject*)__pyx_v_m2, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_bstride_0_m2 = __pyx_bstruct_m2.strides[0]; __pyx_bstride_1_m2 = __pyx_bstruct_m2.strides[1];
__pyx_bshape_0_m2 = __pyx_bstruct_m2.shape[0]; __pyx_bshape_1_m2 = __pyx_bstruct_m2.shape[1];
/* "dense_distances.pyx":104
* """ Parallelized Chisquare distances between row vectors in m1 and m2
* """
* cdef int n1 = m1.shape[0] # <<<<<<<<<<<<<<
* cdef int d = m1.shape[1]
* cdef int n2 = m2.shape[0]
*/
__pyx_v_n1 = (__pyx_v_m1->dimensions[0]);
/* "dense_distances.pyx":105
* """
* cdef int n1 = m1.shape[0]
* cdef int d = m1.shape[1] # <<<<<<<<<<<<<<
* cdef int n2 = m2.shape[0]
* cdef int _d = m2.shape[1]
*/
__pyx_v_d = (__pyx_v_m1->dimensions[1]);
/* "dense_distances.pyx":106
* cdef int n1 = m1.shape[0]
* cdef int d = m1.shape[1]
* cdef int n2 = m2.shape[0] # <<<<<<<<<<<<<<
* cdef int _d = m2.shape[1]
* assert _d == d, "Matrix dimension mismatch"
*/
__pyx_v_n2 = (__pyx_v_m2->dimensions[0]);
/* "dense_distances.pyx":107
* cdef int d = m1.shape[1]
* cdef int n2 = m2.shape[0]
* cdef int _d = m2.shape[1] # <<<<<<<<<<<<<<
* assert _d == d, "Matrix dimension mismatch"
* cdef np.ndarray[DTYPE_t, ndim=2] res = np.zeros((n1, n2))
*/
__pyx_v__d = (__pyx_v_m2->dimensions[1]);
/* "dense_distances.pyx":108
* cdef int n2 = m2.shape[0]
* cdef int _d = m2.shape[1]
* assert _d == d, "Matrix dimension mismatch" # <<<<<<<<<<<<<<
* cdef np.ndarray[DTYPE_t, ndim=2] res = np.zeros((n1, n2))
* cdef int i
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!(__pyx_v__d == __pyx_v_d))) {
PyErr_SetObject(PyExc_AssertionError, ((PyObject *)__pyx_kp_s_2));
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
#endif
/* "dense_distances.pyx":109
* cdef int _d = m2.shape[1]
* assert _d == d, "Matrix dimension mismatch"
* cdef np.ndarray[DTYPE_t, ndim=2] res = np.zeros((n1, n2)) # <<<<<<<<<<<<<<
* cdef int i
* cdef int j
*/
__pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyInt_FromLong(__pyx_v_n1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyInt_FromLong(__pyx_v_n2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_4));
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_3));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_4));
__Pyx_GIVEREF(((PyObject *)__pyx_t_4));
__pyx_t_4 = 0;
__pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_5 = ((PyArrayObject *)__pyx_t_4);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_res, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) {
__pyx_v_res = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_res.buf = NULL;
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
} else {__pyx_bstride_0_res = __pyx_bstruct_res.strides[0]; __pyx_bstride_1_res = __pyx_bstruct_res.strides[1];
__pyx_bshape_0_res = __pyx_bstruct_res.shape[0]; __pyx_bshape_1_res = __pyx_bstruct_res.shape[1];
}
}
__pyx_t_5 = 0;
__pyx_v_res = ((PyArrayObject *)__pyx_t_4);
__pyx_t_4 = 0;
/* "dense_distances.pyx":113
* cdef int j
* cdef int k
* for i in prange(n1, nogil=True, schedule='runtime'): # <<<<<<<<<<<<<<
* for j in range(n2):
* for k in range(d):
*/
{
#ifdef WITH_THREAD
PyThreadState *_save = NULL;
#endif
Py_UNBLOCK_THREADS
/*try:*/ {
__pyx_t_6 = __pyx_v_n1;
if (1 == 0) abort();
{
__pyx_t_8 = (__pyx_t_6 - 0) / 1;
if (__pyx_t_8 > 0)
{
__pyx_v_i = 0;
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_16, __pyx_t_11, __pyx_t_9, __pyx_t_10, __pyx_t_15, __pyx_t_14, __pyx_t_18, __pyx_t_13, __pyx_t_12, __pyx_t_17)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_j) lastprivate(__pyx_v_k) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) schedule(runtime)
#endif /* _OPENMP */
for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_8; __pyx_t_7++){
{
__pyx_v_i = 0 + 1 * __pyx_t_7;
/* Initialize private variables to invalid values */
__pyx_v_j = ((int)0xbad0bad0);
__pyx_v_k = ((int)0xbad0bad0);
/* "dense_distances.pyx":114
* cdef int k
* for i in prange(n1, nogil=True, schedule='runtime'):
* for j in range(n2): # <<<<<<<<<<<<<<
* for k in range(d):
* res[i,j] += chisquare(m1[i, k], m2[j, k])
*/
__pyx_t_9 = __pyx_v_n2;
for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) {
__pyx_v_j = __pyx_t_10;
/* "dense_distances.pyx":115
* for i in prange(n1, nogil=True, schedule='runtime'):
* for j in range(n2):
* for k in range(d): # <<<<<<<<<<<<<<
* res[i,j] += chisquare(m1[i, k], m2[j, k])
* return res
*/
__pyx_t_11 = __pyx_v_d;
for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
__pyx_v_k = __pyx_t_12;
/* "dense_distances.pyx":116
* for j in range(n2):
* for k in range(d):
* res[i,j] += chisquare(m1[i, k], m2[j, k]) # <<<<<<<<<<<<<<
* return res
*
*/
__pyx_t_13 = __pyx_v_i;
__pyx_t_14 = __pyx_v_k;
if (__pyx_t_13 < 0) __pyx_t_13 += __pyx_bshape_0_m1;
if (__pyx_t_14 < 0) __pyx_t_14 += __pyx_bshape_1_m1;
__pyx_t_15 = __pyx_v_j;
__pyx_t_16 = __pyx_v_k;
if (__pyx_t_15 < 0) __pyx_t_15 += __pyx_bshape_0_m2;
if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_bshape_1_m2;
__pyx_t_17 = __pyx_v_i;
__pyx_t_18 = __pyx_v_j;
if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_bshape_0_res;
if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_bshape_1_res;
*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_res.buf, __pyx_t_17, __pyx_bstride_0_res, __pyx_t_18, __pyx_bstride_1_res) += __pyx_f_15dense_distances_chisquare((*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_m1.buf, __pyx_t_13, __pyx_bstride_0_m1, __pyx_t_14, __pyx_bstride_1_m1)), (*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_m2.buf, __pyx_t_15, __pyx_bstride_0_m2, __pyx_t_16, __pyx_bstride_1_m2)));
}
}
}
}
}
}
}
}
/* "dense_distances.pyx":113
* cdef int j
* cdef int k
* for i in prange(n1, nogil=True, schedule='runtime'): # <<<<<<<<<<<<<<
* for j in range(n2):
* for k in range(d):
*/
/*finally:*/ {
Py_BLOCK_THREADS
}
}
/* "dense_distances.pyx":117
* for k in range(d):
* res[i,j] += chisquare(m1[i, k], m2[j, k])
* return res # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_res));
__pyx_r = ((PyObject *)__pyx_v_res);
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_res);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_m1);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_m2);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("dense_distances.m2m_chisquare", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_res);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_m1);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_m2);
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_res);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "dense_distances.pyx":120
*
*
* def gram_euclidean(np.ndarray[DTYPE_t, ndim=2] m): # <<<<<<<<<<<<<<
* """ Parallelized Euclidean distances between all row vectors of m
* """
*/
static PyObject *__pyx_pf_15dense_distances_6gram_euclidean(PyObject *__pyx_self, PyObject *__pyx_v_m); /*proto*/
static char __pyx_doc_15dense_distances_6gram_euclidean[] = " Parallelized Euclidean distances between all row vectors of m\n ";
static PyMethodDef __pyx_mdef_15dense_distances_6gram_euclidean = {__Pyx_NAMESTR("gram_euclidean"), (PyCFunction)__pyx_pf_15dense_distances_6gram_euclidean, METH_O, __Pyx_DOCSTR(__pyx_doc_15dense_distances_6gram_euclidean)};
static PyObject *__pyx_pf_15dense_distances_6gram_euclidean(PyObject *__pyx_self, PyObject *__pyx_v_m) {
int __pyx_v_n;
int __pyx_v_d;
PyArrayObject *__pyx_v_res = 0;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_v_k;
Py_buffer __pyx_bstruct_res;
Py_ssize_t __pyx_bstride_0_res = 0;
Py_ssize_t __pyx_bstride_1_res = 0;
Py_ssize_t __pyx_bshape_0_res = 0;
Py_ssize_t __pyx_bshape_1_res = 0;
Py_buffer __pyx_bstruct_m;
Py_ssize_t __pyx_bstride_0_m = 0;
Py_ssize_t __pyx_bstride_1_m = 0;
Py_ssize_t __pyx_bshape_0_m = 0;
Py_ssize_t __pyx_bshape_1_m = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyArrayObject *__pyx_t_5 = NULL;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
int __pyx_t_12;
int __pyx_t_13;
int __pyx_t_14;
int __pyx_t_15;
int __pyx_t_16;
int __pyx_t_17;
int __pyx_t_18;
int __pyx_t_19;
int __pyx_t_20;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("gram_euclidean");
__pyx_self = __pyx_self;
__pyx_bstruct_res.buf = NULL;
__pyx_bstruct_m.buf = NULL;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_m), __pyx_ptype_5numpy_ndarray, 1, "m", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_m, (PyObject*)__pyx_v_m, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_bstride_0_m = __pyx_bstruct_m.strides[0]; __pyx_bstride_1_m = __pyx_bstruct_m.strides[1];
__pyx_bshape_0_m = __pyx_bstruct_m.shape[0]; __pyx_bshape_1_m = __pyx_bstruct_m.shape[1];
/* "dense_distances.pyx":123
* """ Parallelized Euclidean distances between all row vectors of m
* """
* cdef int n = m.shape[0] # <<<<<<<<<<<<<<
* cdef int d = m.shape[1]
* cdef np.ndarray[DTYPE_t, ndim=2] res = np.zeros((n, n))
*/
__pyx_v_n = (((PyArrayObject *)__pyx_v_m)->dimensions[0]);
/* "dense_distances.pyx":124
* """
* cdef int n = m.shape[0]
* cdef int d = m.shape[1] # <<<<<<<<<<<<<<
* cdef np.ndarray[DTYPE_t, ndim=2] res = np.zeros((n, n))
* cdef int i
*/
__pyx_v_d = (((PyArrayObject *)__pyx_v_m)->dimensions[1]);
/* "dense_distances.pyx":125
* cdef int n = m.shape[0]
* cdef int d = m.shape[1]
* cdef np.ndarray[DTYPE_t, ndim=2] res = np.zeros((n, n)) # <<<<<<<<<<<<<<
* cdef int i
* cdef int j
*/
__pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyInt_FromLong(__pyx_v_n); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyInt_FromLong(__pyx_v_n); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_4));
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_3));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_4));
__Pyx_GIVEREF(((PyObject *)__pyx_t_4));
__pyx_t_4 = 0;
__pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_5 = ((PyArrayObject *)__pyx_t_4);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_res, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) {
__pyx_v_res = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_res.buf = NULL;
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
} else {__pyx_bstride_0_res = __pyx_bstruct_res.strides[0]; __pyx_bstride_1_res = __pyx_bstruct_res.strides[1];
__pyx_bshape_0_res = __pyx_bstruct_res.shape[0]; __pyx_bshape_1_res = __pyx_bstruct_res.shape[1];
}
}
__pyx_t_5 = 0;
__pyx_v_res = ((PyArrayObject *)__pyx_t_4);
__pyx_t_4 = 0;
/* "dense_distances.pyx":129
* cdef int j
* cdef int k
* for i in prange(n, nogil=True, schedule='runtime'): # <<<<<<<<<<<<<<
* for j in range(i):
* for k in range(d):
*/
{
#ifdef WITH_THREAD
PyThreadState *_save = NULL;
#endif
Py_UNBLOCK_THREADS
/*try:*/ {
__pyx_t_6 = __pyx_v_n;
if (1 == 0) abort();
{
__pyx_t_8 = (__pyx_t_6 - 0) / 1;
if (__pyx_t_8 > 0)
{
__pyx_v_i = 0;
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_16, __pyx_t_11, __pyx_t_9, __pyx_t_10, __pyx_t_15, __pyx_t_14, __pyx_t_19, __pyx_t_18, __pyx_t_13, __pyx_t_12, __pyx_t_17, __pyx_t_20)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_j) lastprivate(__pyx_v_k) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) schedule(runtime)
#endif /* _OPENMP */
for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_8; __pyx_t_7++){
{
__pyx_v_i = 0 + 1 * __pyx_t_7;
/* Initialize private variables to invalid values */
__pyx_v_j = ((int)0xbad0bad0);
__pyx_v_k = ((int)0xbad0bad0);
/* "dense_distances.pyx":130
* cdef int k
* for i in prange(n, nogil=True, schedule='runtime'):
* for j in range(i): # <<<<<<<<<<<<<<
* for k in range(d):
* res[i,j] += euclidean(m[i, k], m[j, k])
*/
__pyx_t_9 = __pyx_v_i;
for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) {
__pyx_v_j = __pyx_t_10;
/* "dense_distances.pyx":131
* for i in prange(n, nogil=True, schedule='runtime'):
* for j in range(i):
* for k in range(d): # <<<<<<<<<<<<<<
* res[i,j] += euclidean(m[i, k], m[j, k])
* res[j,i] = res[i,j]
*/
__pyx_t_11 = __pyx_v_d;
for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
__pyx_v_k = __pyx_t_12;
/* "dense_distances.pyx":132
* for j in range(i):
* for k in range(d):
* res[i,j] += euclidean(m[i, k], m[j, k]) # <<<<<<<<<<<<<<
* res[j,i] = res[i,j]
* return res
*/
__pyx_t_13 = __pyx_v_i;
__pyx_t_14 = __pyx_v_k;
if (__pyx_t_13 < 0) __pyx_t_13 += __pyx_bshape_0_m;
if (__pyx_t_14 < 0) __pyx_t_14 += __pyx_bshape_1_m;
__pyx_t_15 = __pyx_v_j;
__pyx_t_16 = __pyx_v_k;
if (__pyx_t_15 < 0) __pyx_t_15 += __pyx_bshape_0_m;
if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_bshape_1_m;
__pyx_t_17 = __pyx_v_i;
__pyx_t_18 = __pyx_v_j;
if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_bshape_0_res;
if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_bshape_1_res;
*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_res.buf, __pyx_t_17, __pyx_bstride_0_res, __pyx_t_18, __pyx_bstride_1_res) += __pyx_f_15dense_distances_euclidean((*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_m.buf, __pyx_t_13, __pyx_bstride_0_m, __pyx_t_14, __pyx_bstride_1_m)), (*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_m.buf, __pyx_t_15, __pyx_bstride_0_m, __pyx_t_16, __pyx_bstride_1_m)));
}
/* "dense_distances.pyx":133
* for k in range(d):
* res[i,j] += euclidean(m[i, k], m[j, k])
* res[j,i] = res[i,j] # <<<<<<<<<<<<<<
* return res
*
*/
__pyx_t_11 = __pyx_v_i;
__pyx_t_12 = __pyx_v_j;
if (__pyx_t_11 < 0) __pyx_t_11 += __pyx_bshape_0_res;
if (__pyx_t_12 < 0) __pyx_t_12 += __pyx_bshape_1_res;
__pyx_t_19 = __pyx_v_j;
__pyx_t_20 = __pyx_v_i;
if (__pyx_t_19 < 0) __pyx_t_19 += __pyx_bshape_0_res;
if (__pyx_t_20 < 0) __pyx_t_20 += __pyx_bshape_1_res;
*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_res.buf, __pyx_t_19, __pyx_bstride_0_res, __pyx_t_20, __pyx_bstride_1_res) = (*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_res.buf, __pyx_t_11, __pyx_bstride_0_res, __pyx_t_12, __pyx_bstride_1_res));
}
}
}
}
}
}
}
/* "dense_distances.pyx":129
* cdef int j
* cdef int k
* for i in prange(n, nogil=True, schedule='runtime'): # <<<<<<<<<<<<<<
* for j in range(i):
* for k in range(d):
*/
/*finally:*/ {
Py_BLOCK_THREADS
}
}
/* "dense_distances.pyx":134
* res[i,j] += euclidean(m[i, k], m[j, k])
* res[j,i] = res[i,j]
* return res # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_res));
__pyx_r = ((PyObject *)__pyx_v_res);
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_res);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_m);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("dense_distances.gram_euclidean", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_res);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_m);
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_res);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "dense_distances.pyx":137
*
*
* def gram_chisquare(np.ndarray[DTYPE_t, ndim=2] m): # <<<<<<<<<<<<<<
* """ Parallelized Chisquare distances between all row vectors of m
* """
*/
static PyObject *__pyx_pf_15dense_distances_7gram_chisquare(PyObject *__pyx_self, PyObject *__pyx_v_m); /*proto*/
static char __pyx_doc_15dense_distances_7gram_chisquare[] = " Parallelized Chisquare distances between all row vectors of m\n ";
static PyMethodDef __pyx_mdef_15dense_distances_7gram_chisquare = {__Pyx_NAMESTR("gram_chisquare"), (PyCFunction)__pyx_pf_15dense_distances_7gram_chisquare, METH_O, __Pyx_DOCSTR(__pyx_doc_15dense_distances_7gram_chisquare)};
static PyObject *__pyx_pf_15dense_distances_7gram_chisquare(PyObject *__pyx_self, PyObject *__pyx_v_m) {
int __pyx_v_n;
int __pyx_v_d;
PyArrayObject *__pyx_v_res = 0;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_v_k;
Py_buffer __pyx_bstruct_res;
Py_ssize_t __pyx_bstride_0_res = 0;
Py_ssize_t __pyx_bstride_1_res = 0;
Py_ssize_t __pyx_bshape_0_res = 0;
Py_ssize_t __pyx_bshape_1_res = 0;
Py_buffer __pyx_bstruct_m;
Py_ssize_t __pyx_bstride_0_m = 0;
Py_ssize_t __pyx_bstride_1_m = 0;
Py_ssize_t __pyx_bshape_0_m = 0;
Py_ssize_t __pyx_bshape_1_m = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyArrayObject *__pyx_t_5 = NULL;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
int __pyx_t_12;
int __pyx_t_13;
int __pyx_t_14;
int __pyx_t_15;
int __pyx_t_16;
int __pyx_t_17;
int __pyx_t_18;
int __pyx_t_19;
int __pyx_t_20;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("gram_chisquare");
__pyx_self = __pyx_self;
__pyx_bstruct_res.buf = NULL;
__pyx_bstruct_m.buf = NULL;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_m), __pyx_ptype_5numpy_ndarray, 1, "m", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_m, (PyObject*)__pyx_v_m, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_bstride_0_m = __pyx_bstruct_m.strides[0]; __pyx_bstride_1_m = __pyx_bstruct_m.strides[1];
__pyx_bshape_0_m = __pyx_bstruct_m.shape[0]; __pyx_bshape_1_m = __pyx_bstruct_m.shape[1];
/* "dense_distances.pyx":140
* """ Parallelized Chisquare distances between all row vectors of m
* """
* cdef int n = m.shape[0] # <<<<<<<<<<<<<<
* cdef int d = m.shape[1]
* cdef np.ndarray[DTYPE_t, ndim=2] res = np.zeros((n, n))
*/
__pyx_v_n = (((PyArrayObject *)__pyx_v_m)->dimensions[0]);
/* "dense_distances.pyx":141
* """
* cdef int n = m.shape[0]
* cdef int d = m.shape[1] # <<<<<<<<<<<<<<
* cdef np.ndarray[DTYPE_t, ndim=2] res = np.zeros((n, n))
* cdef int i
*/
__pyx_v_d = (((PyArrayObject *)__pyx_v_m)->dimensions[1]);
/* "dense_distances.pyx":142
* cdef int n = m.shape[0]
* cdef int d = m.shape[1]
* cdef np.ndarray[DTYPE_t, ndim=2] res = np.zeros((n, n)) # <<<<<<<<<<<<<<
* cdef int i
* cdef int j
*/
__pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__zeros); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyInt_FromLong(__pyx_v_n); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = PyInt_FromLong(__pyx_v_n); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_4));
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_3));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_4));
__Pyx_GIVEREF(((PyObject *)__pyx_t_4));
__pyx_t_4 = 0;
__pyx_t_4 = PyObject_Call(__pyx_t_2, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_5 = ((PyArrayObject *)__pyx_t_4);
{
__Pyx_BufFmt_StackElem __pyx_stack[1];
if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_bstruct_res, (PyObject*)__pyx_t_5, &__Pyx_TypeInfo_nn___pyx_t_15dense_distances_DTYPE_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) {
__pyx_v_res = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_bstruct_res.buf = NULL;
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
} else {__pyx_bstride_0_res = __pyx_bstruct_res.strides[0]; __pyx_bstride_1_res = __pyx_bstruct_res.strides[1];
__pyx_bshape_0_res = __pyx_bstruct_res.shape[0]; __pyx_bshape_1_res = __pyx_bstruct_res.shape[1];
}
}
__pyx_t_5 = 0;
__pyx_v_res = ((PyArrayObject *)__pyx_t_4);
__pyx_t_4 = 0;
/* "dense_distances.pyx":146
* cdef int j
* cdef int k
* for i in prange(n, nogil=True, schedule='runtime'): # <<<<<<<<<<<<<<
* for j in range(i):
* for k in range(d):
*/
{
#ifdef WITH_THREAD
PyThreadState *_save = NULL;
#endif
Py_UNBLOCK_THREADS
/*try:*/ {
__pyx_t_6 = __pyx_v_n;
if (1 == 0) abort();
{
__pyx_t_8 = (__pyx_t_6 - 0) / 1;
if (__pyx_t_8 > 0)
{
__pyx_v_i = 0;
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_16, __pyx_t_11, __pyx_t_9, __pyx_t_10, __pyx_t_15, __pyx_t_14, __pyx_t_19, __pyx_t_18, __pyx_t_13, __pyx_t_12, __pyx_t_17, __pyx_t_20)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) lastprivate(__pyx_v_k) schedule(runtime)
#endif /* _OPENMP */
for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_8; __pyx_t_7++){
{
__pyx_v_i = 0 + 1 * __pyx_t_7;
/* Initialize private variables to invalid values */
__pyx_v_j = ((int)0xbad0bad0);
__pyx_v_k = ((int)0xbad0bad0);
/* "dense_distances.pyx":147
* cdef int k
* for i in prange(n, nogil=True, schedule='runtime'):
* for j in range(i): # <<<<<<<<<<<<<<
* for k in range(d):
* res[i,j] += chisquare(m[i, k], m[j, k])
*/
__pyx_t_9 = __pyx_v_i;
for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) {
__pyx_v_j = __pyx_t_10;
/* "dense_distances.pyx":148
* for i in prange(n, nogil=True, schedule='runtime'):
* for j in range(i):
* for k in range(d): # <<<<<<<<<<<<<<
* res[i,j] += chisquare(m[i, k], m[j, k])
* res[j,i] = res[i,j]
*/
__pyx_t_11 = __pyx_v_d;
for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
__pyx_v_k = __pyx_t_12;
/* "dense_distances.pyx":149
* for j in range(i):
* for k in range(d):
* res[i,j] += chisquare(m[i, k], m[j, k]) # <<<<<<<<<<<<<<
* res[j,i] = res[i,j]
* return res
*/
__pyx_t_13 = __pyx_v_i;
__pyx_t_14 = __pyx_v_k;
if (__pyx_t_13 < 0) __pyx_t_13 += __pyx_bshape_0_m;
if (__pyx_t_14 < 0) __pyx_t_14 += __pyx_bshape_1_m;
__pyx_t_15 = __pyx_v_j;
__pyx_t_16 = __pyx_v_k;
if (__pyx_t_15 < 0) __pyx_t_15 += __pyx_bshape_0_m;
if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_bshape_1_m;
__pyx_t_17 = __pyx_v_i;
__pyx_t_18 = __pyx_v_j;
if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_bshape_0_res;
if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_bshape_1_res;
*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_res.buf, __pyx_t_17, __pyx_bstride_0_res, __pyx_t_18, __pyx_bstride_1_res) += __pyx_f_15dense_distances_chisquare((*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_m.buf, __pyx_t_13, __pyx_bstride_0_m, __pyx_t_14, __pyx_bstride_1_m)), (*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_m.buf, __pyx_t_15, __pyx_bstride_0_m, __pyx_t_16, __pyx_bstride_1_m)));
}
/* "dense_distances.pyx":150
* for k in range(d):
* res[i,j] += chisquare(m[i, k], m[j, k])
* res[j,i] = res[i,j] # <<<<<<<<<<<<<<
* return res
*/
__pyx_t_11 = __pyx_v_i;
__pyx_t_12 = __pyx_v_j;
if (__pyx_t_11 < 0) __pyx_t_11 += __pyx_bshape_0_res;
if (__pyx_t_12 < 0) __pyx_t_12 += __pyx_bshape_1_res;
__pyx_t_19 = __pyx_v_j;
__pyx_t_20 = __pyx_v_i;
if (__pyx_t_19 < 0) __pyx_t_19 += __pyx_bshape_0_res;
if (__pyx_t_20 < 0) __pyx_t_20 += __pyx_bshape_1_res;
*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_res.buf, __pyx_t_19, __pyx_bstride_0_res, __pyx_t_20, __pyx_bstride_1_res) = (*__Pyx_BufPtrStrided2d(__pyx_t_15dense_distances_DTYPE_t *, __pyx_bstruct_res.buf, __pyx_t_11, __pyx_bstride_0_res, __pyx_t_12, __pyx_bstride_1_res));
}
}
}
}
}
}
}
/* "dense_distances.pyx":146
* cdef int j
* cdef int k
* for i in prange(n, nogil=True, schedule='runtime'): # <<<<<<<<<<<<<<
* for j in range(i):
* for k in range(d):
*/
/*finally:*/ {
Py_BLOCK_THREADS
}
}
/* "dense_distances.pyx":151
* res[i,j] += chisquare(m[i, k], m[j, k])
* res[j,i] = res[i,j]
* return res # <<<<<<<<<<<<<<
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_res));
__pyx_r = ((PyObject *)__pyx_v_res);
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_res);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_m);
__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
__Pyx_AddTraceback("dense_distances.gram_chisquare", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
goto __pyx_L2;
__pyx_L0:;
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_res);
__Pyx_SafeReleaseBuffer(&__pyx_bstruct_m);
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_res);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "numpy.pxd":190
* # experimental exception made for __getbuffer__ and __releasebuffer__
* # -- the details of this may change.
* def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fullfill the PEP.
*/
static CYTHON_UNUSED int __pyx_pf_5numpy_7ndarray___getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_pf_5numpy_7ndarray___getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_copy_shape;
int __pyx_v_i;
int __pyx_v_ndim;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
int __pyx_v_t;
char *__pyx_v_f;
PyArray_Descr *__pyx_v_descr = 0;
int __pyx_v_offset;
int __pyx_v_hasfields;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
char *__pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getbuffer__");
if (__pyx_v_info != NULL) {
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
}
/* "numpy.pxd":196
* # of flags
*
* if info == NULL: return # <<<<<<<<<<<<<<
*
* cdef int copy_shape, i, ndim
*/
__pyx_t_1 = (__pyx_v_info == NULL);
if (__pyx_t_1) {
__pyx_r = 0;
goto __pyx_L0;
goto __pyx_L5;
}
__pyx_L5:;
/* "numpy.pxd":199
*
* cdef int copy_shape, i, ndim
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
*
*/
__pyx_v_endian_detector = 1;
/* "numpy.pxd":200
* cdef int copy_shape, i, ndim
* cdef int endian_detector = 1
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
*
* ndim = PyArray_NDIM(self)
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "numpy.pxd":202
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
*
* ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<<
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_v_ndim = PyArray_NDIM(((PyArrayObject *)__pyx_v_self));
/* "numpy.pxd":204
* ndim = PyArray_NDIM(self)
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* copy_shape = 1
* else:
*/
__pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t)));
if (__pyx_t_1) {
/* "numpy.pxd":205
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* copy_shape = 1 # <<<<<<<<<<<<<<
* else:
* copy_shape = 0
*/
__pyx_v_copy_shape = 1;
goto __pyx_L6;
}
/*else*/ {
/* "numpy.pxd":207
* copy_shape = 1
* else:
* copy_shape = 0 # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
*/
__pyx_v_copy_shape = 0;
}
__pyx_L6:;
/* "numpy.pxd":209
* copy_shape = 0
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS);
if (__pyx_t_1) {
/* "numpy.pxd":210
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not C contiguous")
*
*/
__pyx_t_2 = (!PyArray_CHKFLAGS(((PyArrayObject *)__pyx_v_self), NPY_C_CONTIGUOUS));
__pyx_t_3 = __pyx_t_2;
} else {
__pyx_t_3 = __pyx_t_1;
}
if (__pyx_t_3) {
/* "numpy.pxd":211
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_4), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L7;
}
__pyx_L7:;
/* "numpy.pxd":213
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
__pyx_t_3 = ((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS);
if (__pyx_t_3) {
/* "numpy.pxd":214
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not Fortran contiguous")
*
*/
__pyx_t_1 = (!PyArray_CHKFLAGS(((PyArrayObject *)__pyx_v_self), NPY_F_CONTIGUOUS));
__pyx_t_2 = __pyx_t_1;
} else {
__pyx_t_2 = __pyx_t_3;
}
if (__pyx_t_2) {
/* "numpy.pxd":215
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_6), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L8;
}
__pyx_L8:;
/* "numpy.pxd":217
* raise ValueError(u"ndarray is not Fortran contiguous")
*
* info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<<
* info.ndim = ndim
* if copy_shape:
*/
__pyx_v_info->buf = PyArray_DATA(((PyArrayObject *)__pyx_v_self));
/* "numpy.pxd":218
*
* info.buf = PyArray_DATA(self)
* info.ndim = ndim # <<<<<<<<<<<<<<
* if copy_shape:
* # Allocate new buffer for strides and shape info.
*/
__pyx_v_info->ndim = __pyx_v_ndim;
/* "numpy.pxd":219
* info.buf = PyArray_DATA(self)
* info.ndim = ndim
* if copy_shape: # <<<<<<<<<<<<<<
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
*/
if (__pyx_v_copy_shape) {
/* "numpy.pxd":222
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) # <<<<<<<<<<<<<<
* info.shape = info.strides + ndim
* for i in range(ndim):
*/
__pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2)));
/* "numpy.pxd":223
* # This is allocated as one block, strides first.
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
* info.shape = info.strides + ndim # <<<<<<<<<<<<<<
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
*/
__pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim);
/* "numpy.pxd":224
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
* info.shape = info.strides + ndim
* for i in range(ndim): # <<<<<<<<<<<<<<
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i]
*/
__pyx_t_5 = __pyx_v_ndim;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "numpy.pxd":225
* info.shape = info.strides + ndim
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<<
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
*/
(__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(((PyArrayObject *)__pyx_v_self))[__pyx_v_i]);
/* "numpy.pxd":226
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<<
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
*/
(__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(((PyArrayObject *)__pyx_v_self))[__pyx_v_i]);
}
goto __pyx_L9;
}
/*else*/ {
/* "numpy.pxd":228
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<<
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL
*/
__pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(((PyArrayObject *)__pyx_v_self)));
/* "numpy.pxd":229
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
* info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
*/
__pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(((PyArrayObject *)__pyx_v_self)));
}
__pyx_L9:;
/* "numpy.pxd":230
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self)
*/
__pyx_v_info->suboffsets = NULL;
/* "numpy.pxd":231
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<<
* info.readonly = not PyArray_ISWRITEABLE(self)
*
*/
__pyx_v_info->itemsize = PyArray_ITEMSIZE(((PyArrayObject *)__pyx_v_self));
/* "numpy.pxd":232
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<<
*
* cdef int t
*/
__pyx_v_info->readonly = (!PyArray_ISWRITEABLE(((PyArrayObject *)__pyx_v_self)));
/* "numpy.pxd":235
*
* cdef int t
* cdef char* f = NULL # <<<<<<<<<<<<<<
* cdef dtype descr = self.descr
* cdef list stack
*/
__pyx_v_f = NULL;
/* "numpy.pxd":236
* cdef int t
* cdef char* f = NULL
* cdef dtype descr = self.descr # <<<<<<<<<<<<<<
* cdef list stack
* cdef int offset
*/
__Pyx_INCREF(((PyObject *)((PyArrayObject *)__pyx_v_self)->descr));
__pyx_v_descr = ((PyArrayObject *)__pyx_v_self)->descr;
/* "numpy.pxd":240
* cdef int offset
*
* cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<<
*
* if not hasfields and not copy_shape:
*/
__pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr);
/* "numpy.pxd":242
* cdef bint hasfields = PyDataType_HASFIELDS(descr)
*
* if not hasfields and not copy_shape: # <<<<<<<<<<<<<<
* # do not call releasebuffer
* info.obj = None
*/
__pyx_t_2 = (!__pyx_v_hasfields);
if (__pyx_t_2) {
__pyx_t_3 = (!__pyx_v_copy_shape);
__pyx_t_1 = __pyx_t_3;
} else {
__pyx_t_1 = __pyx_t_2;
}
if (__pyx_t_1) {
/* "numpy.pxd":244
* if not hasfields and not copy_shape:
* # do not call releasebuffer
* info.obj = None # <<<<<<<<<<<<<<
* else:
* # need to call releasebuffer
*/
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = Py_None;
goto __pyx_L12;
}
/*else*/ {
/* "numpy.pxd":247
* else:
* # need to call releasebuffer
* info.obj = self # <<<<<<<<<<<<<<
*
* if not hasfields:
*/
__Pyx_INCREF(__pyx_v_self);
__Pyx_GIVEREF(__pyx_v_self);
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = __pyx_v_self;
}
__pyx_L12:;
/* "numpy.pxd":249
* info.obj = self
*
* if not hasfields: # <<<<<<<<<<<<<<
* t = descr.type_num
* if ((descr.byteorder == '>' and little_endian) or
*/
__pyx_t_1 = (!__pyx_v_hasfields);
if (__pyx_t_1) {
/* "numpy.pxd":250
*
* if not hasfields:
* t = descr.type_num # <<<<<<<<<<<<<<
* if ((descr.byteorder == '>' and little_endian) or
* (descr.byteorder == '<' and not little_endian)):
*/
__pyx_v_t = __pyx_v_descr->type_num;
/* "numpy.pxd":251
* if not hasfields:
* t = descr.type_num
* if ((descr.byteorder == '>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == '<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_1 = (__pyx_v_descr->byteorder == '>');
if (__pyx_t_1) {
__pyx_t_2 = __pyx_v_little_endian;
} else {
__pyx_t_2 = __pyx_t_1;
}
if (!__pyx_t_2) {
/* "numpy.pxd":252
* t = descr.type_num
* if ((descr.byteorder == '>' and little_endian) or
* (descr.byteorder == '<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
*/
__pyx_t_1 = (__pyx_v_descr->byteorder == '<');
if (__pyx_t_1) {
__pyx_t_3 = (!__pyx_v_little_endian);
__pyx_t_7 = __pyx_t_3;
} else {
__pyx_t_7 = __pyx_t_1;
}
__pyx_t_1 = __pyx_t_7;
} else {
__pyx_t_1 = __pyx_t_2;
}
if (__pyx_t_1) {
/* "numpy.pxd":253
* if ((descr.byteorder == '>' and little_endian) or
* (descr.byteorder == '<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_t_4 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_8), NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L14;
}
__pyx_L14:;
/* "numpy.pxd":254
* (descr.byteorder == '<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
*/
__pyx_t_1 = (__pyx_v_t == NPY_BYTE);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__b;
goto __pyx_L15;
}
/* "numpy.pxd":255
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
*/
__pyx_t_1 = (__pyx_v_t == NPY_UBYTE);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__B;
goto __pyx_L15;
}
/* "numpy.pxd":256
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
*/
__pyx_t_1 = (__pyx_v_t == NPY_SHORT);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__h;
goto __pyx_L15;
}
/* "numpy.pxd":257
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
*/
__pyx_t_1 = (__pyx_v_t == NPY_USHORT);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__H;
goto __pyx_L15;
}
/* "numpy.pxd":258
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
*/
__pyx_t_1 = (__pyx_v_t == NPY_INT);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__i;
goto __pyx_L15;
}
/* "numpy.pxd":259
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
*/
__pyx_t_1 = (__pyx_v_t == NPY_UINT);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__I;
goto __pyx_L15;
}
/* "numpy.pxd":260
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
*/
__pyx_t_1 = (__pyx_v_t == NPY_LONG);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__l;
goto __pyx_L15;
}
/* "numpy.pxd":261
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
*/
__pyx_t_1 = (__pyx_v_t == NPY_ULONG);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__L;
goto __pyx_L15;
}
/* "numpy.pxd":262
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
*/
__pyx_t_1 = (__pyx_v_t == NPY_LONGLONG);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__q;
goto __pyx_L15;
}
/* "numpy.pxd":263
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
*/
__pyx_t_1 = (__pyx_v_t == NPY_ULONGLONG);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__Q;
goto __pyx_L15;
}
/* "numpy.pxd":264
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
*/
__pyx_t_1 = (__pyx_v_t == NPY_FLOAT);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__f;
goto __pyx_L15;
}
/* "numpy.pxd":265
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
*/
__pyx_t_1 = (__pyx_v_t == NPY_DOUBLE);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__d;
goto __pyx_L15;
}
/* "numpy.pxd":266
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
*/
__pyx_t_1 = (__pyx_v_t == NPY_LONGDOUBLE);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__g;
goto __pyx_L15;
}
/* "numpy.pxd":267
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
*/
__pyx_t_1 = (__pyx_v_t == NPY_CFLOAT);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__Zf;
goto __pyx_L15;
}
/* "numpy.pxd":268
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O"
*/
__pyx_t_1 = (__pyx_v_t == NPY_CDOUBLE);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__Zd;
goto __pyx_L15;
}
/* "numpy.pxd":269
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f = "O"
* else:
*/
__pyx_t_1 = (__pyx_v_t == NPY_CLONGDOUBLE);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__Zg;
goto __pyx_L15;
}
/* "numpy.pxd":270
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
__pyx_t_1 = (__pyx_v_t == NPY_OBJECT);
if (__pyx_t_1) {
__pyx_v_f = __pyx_k__O;
goto __pyx_L15;
}
/*else*/ {
/* "numpy.pxd":272
* elif t == NPY_OBJECT: f = "O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* info.format = f
* return
*/
__pyx_t_4 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_8 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_9), __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_8));
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_4));
PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_t_8));
__Pyx_GIVEREF(((PyObject *)__pyx_t_8));
__pyx_t_8 = 0;
__pyx_t_8 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_4), NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(((PyObject *)__pyx_t_4)); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_L15:;
/* "numpy.pxd":273
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f # <<<<<<<<<<<<<<
* return
* else:
*/
__pyx_v_info->format = __pyx_v_f;
/* "numpy.pxd":274
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f
* return # <<<<<<<<<<<<<<
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
*/
__pyx_r = 0;
goto __pyx_L0;
goto __pyx_L13;
}
/*else*/ {
/* "numpy.pxd":276
* return
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<<
* info.format[0] = '^' # Native data types, manual alignment
* offset = 0
*/
__pyx_v_info->format = ((char *)malloc(255));
/* "numpy.pxd":277
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
* info.format[0] = '^' # Native data types, manual alignment # <<<<<<<<<<<<<<
* offset = 0
* f = _util_dtypestring(descr, info.format + 1,
*/
(__pyx_v_info->format[0]) = '^';
/* "numpy.pxd":278
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
* info.format[0] = '^' # Native data types, manual alignment
* offset = 0 # <<<<<<<<<<<<<<
* f = _util_dtypestring(descr, info.format + 1,
* info.format + _buffer_format_string_len,
*/
__pyx_v_offset = 0;
/* "numpy.pxd":281
* f = _util_dtypestring(descr, info.format + 1,
* info.format + _buffer_format_string_len,
* &offset) # <<<<<<<<<<<<<<
* f[0] = 0 # Terminate format string
*
*/
__pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_f = __pyx_t_9;
/* "numpy.pxd":282
* info.format + _buffer_format_string_len,
* &offset)
* f[0] = 0 # Terminate format string # <<<<<<<<<<<<<<
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
*/
(__pyx_v_f[0]) = 0;
}
__pyx_L13:;
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(Py_None);
__Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
}
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_descr);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "numpy.pxd":284
* f[0] = 0 # Terminate format string
*
* def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
*/
static CYTHON_UNUSED void __pyx_pf_5numpy_7ndarray_1__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/
static CYTHON_UNUSED void __pyx_pf_5numpy_7ndarray_1__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__releasebuffer__");
/* "numpy.pxd":285
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<<
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_t_1 = PyArray_HASFIELDS(((PyArrayObject *)__pyx_v_self));
if (__pyx_t_1) {
/* "numpy.pxd":286
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format) # <<<<<<<<<<<<<<
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* stdlib.free(info.strides)
*/
free(__pyx_v_info->format);
goto __pyx_L5;
}
__pyx_L5:;
/* "numpy.pxd":287
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* stdlib.free(info.strides)
* # info.shape was stored after info.strides in the same block
*/
__pyx_t_1 = ((sizeof(npy_intp)) != (sizeof(Py_ssize_t)));
if (__pyx_t_1) {
/* "numpy.pxd":288
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* stdlib.free(info.strides) # <<<<<<<<<<<<<<
* # info.shape was stored after info.strides in the same block
*
*/
free(__pyx_v_info->strides);
goto __pyx_L6;
}
__pyx_L6:;
__Pyx_RefNannyFinishContext();
}
/* "numpy.pxd":764
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew1");
/* "numpy.pxd":765
*
* cdef inline object PyArray_MultiIterNew1(a):
* return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew2(a, b):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 765; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "numpy.pxd":767
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew2");
/* "numpy.pxd":768
*
* cdef inline object PyArray_MultiIterNew2(a, b):
* return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 768; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "numpy.pxd":770
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew3");
/* "numpy.pxd":771
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 771; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "numpy.pxd":773
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew4");
/* "numpy.pxd":774
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 774; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "numpy.pxd":776
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew5");
/* "numpy.pxd":777
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<<
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 777; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "numpy.pxd":779
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
* # Recursive utility function used in __getbuffer__ to get format
* # string. The new location in the format string is returned.
*/
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) {
PyArray_Descr *__pyx_v_child = 0;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
PyObject *__pyx_v_fields = 0;
PyObject *__pyx_v_childname = NULL;
PyObject *__pyx_v_new_offset = NULL;
PyObject *__pyx_v_t = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
long __pyx_t_10;
char *__pyx_t_11;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("_util_dtypestring");
/* "numpy.pxd":786
* cdef int delta_offset
* cdef tuple i
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
* cdef tuple fields
*/
__pyx_v_endian_detector = 1;
/* "numpy.pxd":787
* cdef tuple i
* cdef int endian_detector = 1
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
* cdef tuple fields
*
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "numpy.pxd":790
* cdef tuple fields
*
* for childname in descr.names: # <<<<<<<<<<<<<<
* fields = descr.fields[childname]
* child, new_offset = fields
*/
if (unlikely(((PyObject *)__pyx_v_descr->names) == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 790; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_1 = ((PyObject *)__pyx_v_descr->names); __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
for (;;) {
if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
__pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++;
__Pyx_XDECREF(__pyx_v_childname);
__pyx_v_childname = __pyx_t_3;
__pyx_t_3 = 0;
/* "numpy.pxd":791
*
* for childname in descr.names:
* fields = descr.fields[childname] # <<<<<<<<<<<<<<
* child, new_offset = fields
*
*/
__pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (!__pyx_t_3) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 791; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_XDECREF(((PyObject *)__pyx_v_fields));
__pyx_v_fields = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "numpy.pxd":792
* for childname in descr.names:
* fields = descr.fields[childname]
* child, new_offset = fields # <<<<<<<<<<<<<<
*
* if (end - f) - (new_offset - offset[0]) < 15:
*/
if (likely(PyTuple_CheckExact(((PyObject *)__pyx_v_fields)))) {
PyObject* sequence = ((PyObject *)__pyx_v_fields);
if (unlikely(PyTuple_GET_SIZE(sequence) != 2)) {
if (PyTuple_GET_SIZE(sequence) > 2) __Pyx_RaiseTooManyValuesError(2);
else __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(sequence));
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
} else {
__Pyx_UnpackTupleError(((PyObject *)__pyx_v_fields), 2);
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 792; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_XDECREF(((PyObject *)__pyx_v_child));
__pyx_v_child = ((PyArray_Descr *)__pyx_t_3);
__pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_v_new_offset);
__pyx_v_new_offset = __pyx_t_4;
__pyx_t_4 = 0;
/* "numpy.pxd":794
* child, new_offset = fields
*
* if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
*/
__pyx_t_4 = PyInt_FromLong((__pyx_v_end - __pyx_v_f)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyNumber_Subtract(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = PyObject_RichCompare(__pyx_t_3, __pyx_int_15, Py_LT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
/* "numpy.pxd":795
*
* if (end - f) - (new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == '>' and little_endian) or
*/
__pyx_t_5 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_11), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
/* "numpy.pxd":797
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == '>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == '<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_6 = (__pyx_v_child->byteorder == '>');
if (__pyx_t_6) {
__pyx_t_7 = __pyx_v_little_endian;
} else {
__pyx_t_7 = __pyx_t_6;
}
if (!__pyx_t_7) {
/* "numpy.pxd":798
*
* if ((child.byteorder == '>' and little_endian) or
* (child.byteorder == '<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* # One could encode it in the format string and have Cython
*/
__pyx_t_6 = (__pyx_v_child->byteorder == '<');
if (__pyx_t_6) {
__pyx_t_8 = (!__pyx_v_little_endian);
__pyx_t_9 = __pyx_t_8;
} else {
__pyx_t_9 = __pyx_t_6;
}
__pyx_t_6 = __pyx_t_9;
} else {
__pyx_t_6 = __pyx_t_7;
}
if (__pyx_t_6) {
/* "numpy.pxd":799
* if ((child.byteorder == '>' and little_endian) or
* (child.byteorder == '<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* # One could encode it in the format string and have Cython
* # complain instead, BUT: < and > in format strings also imply
*/
__pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_k_tuple_12), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
/* "numpy.pxd":809
*
* # Output padding bytes
* while offset[0] < new_offset: # <<<<<<<<<<<<<<
* f[0] = 120 # "x"; pad byte
* f += 1
*/
while (1) {
__pyx_t_5 = PyInt_FromLong((__pyx_v_offset[0])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_t_5, __pyx_v_new_offset, Py_LT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 809; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (!__pyx_t_6) break;
/* "numpy.pxd":810
* # Output padding bytes
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<<
* f += 1
* offset[0] += 1
*/
(__pyx_v_f[0]) = 120;
/* "numpy.pxd":811
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte
* f += 1 # <<<<<<<<<<<<<<
* offset[0] += 1
*
*/
__pyx_v_f = (__pyx_v_f + 1);
/* "numpy.pxd":812
* f[0] = 120 # "x"; pad byte
* f += 1
* offset[0] += 1 # <<<<<<<<<<<<<<
*
* offset[0] += child.itemsize
*/
__pyx_t_10 = 0;
(__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + 1);
}
/* "numpy.pxd":814
* offset[0] += 1
*
* offset[0] += child.itemsize # <<<<<<<<<<<<<<
*
* if not PyDataType_HASFIELDS(child):
*/
__pyx_t_10 = 0;
(__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + __pyx_v_child->elsize);
/* "numpy.pxd":816
* offset[0] += child.itemsize
*
* if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<<
* t = child.type_num
* if end - f < 5:
*/
__pyx_t_6 = (!PyDataType_HASFIELDS(__pyx_v_child));
if (__pyx_t_6) {
/* "numpy.pxd":817
*
* if not PyDataType_HASFIELDS(child):
* t = child.type_num # <<<<<<<<<<<<<<
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.")
*/
__pyx_t_3 = PyInt_FromLong(__pyx_v_child->type_num); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 817; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_v_t);
__pyx_v_t = __pyx_t_3;
__pyx_t_3 = 0;
/* "numpy.pxd":818
* if not PyDataType_HASFIELDS(child):
* t = child.type_num
* if end - f < 5: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short.")
*
*/
__pyx_t_6 = ((__pyx_v_end - __pyx_v_f) < 5);
if (__pyx_t_6) {
/* "numpy.pxd":819
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_t_3 = PyObject_Call(__pyx_builtin_RuntimeError, ((PyObject *)__pyx_k_tuple_14), NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L10;
}
__pyx_L10:;
/* "numpy.pxd":822
*
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
*/
__pyx_t_3 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 822; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 98;
goto __pyx_L11;
}
/* "numpy.pxd":823
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
*/
__pyx_t_5 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 66;
goto __pyx_L11;
}
/* "numpy.pxd":824
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
*/
__pyx_t_3 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 104;
goto __pyx_L11;
}
/* "numpy.pxd":825
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
*/
__pyx_t_5 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 825; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 72;
goto __pyx_L11;
}
/* "numpy.pxd":826
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
*/
__pyx_t_3 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 105;
goto __pyx_L11;
}
/* "numpy.pxd":827
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
*/
__pyx_t_5 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 73;
goto __pyx_L11;
}
/* "numpy.pxd":828
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
*/
__pyx_t_3 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 108;
goto __pyx_L11;
}
/* "numpy.pxd":829
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
*/
__pyx_t_5 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 76;
goto __pyx_L11;
}
/* "numpy.pxd":830
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
*/
__pyx_t_3 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 113;
goto __pyx_L11;
}
/* "numpy.pxd":831
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
*/
__pyx_t_5 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 81;
goto __pyx_L11;
}
/* "numpy.pxd":832
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
*/
__pyx_t_3 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 102;
goto __pyx_L11;
}
/* "numpy.pxd":833
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
*/
__pyx_t_5 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 100;
goto __pyx_L11;
}
/* "numpy.pxd":834
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
*/
__pyx_t_3 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 103;
goto __pyx_L11;
}
/* "numpy.pxd":835
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
*/
__pyx_t_5 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 102;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L11;
}
/* "numpy.pxd":836
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O"
*/
__pyx_t_3 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 100;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L11;
}
/* "numpy.pxd":837
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
*/
__pyx_t_5 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_5, Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 103;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L11;
}
/* "numpy.pxd":838
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
__pyx_t_3 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 79;
goto __pyx_L11;
}
/*else*/ {
/* "numpy.pxd":840
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* f += 1
* else:
*/
__pyx_t_5 = PyNumber_Remainder(((PyObject *)__pyx_kp_u_9), __pyx_v_t); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_5));
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_3));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_t_5));
__Pyx_GIVEREF(((PyObject *)__pyx_t_5));
__pyx_t_5 = 0;
__pyx_t_5 = PyObject_Call(__pyx_builtin_ValueError, ((PyObject *)__pyx_t_3), NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_L11:;
/* "numpy.pxd":841
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* f += 1 # <<<<<<<<<<<<<<
* else:
* # Cython ignores struct boundary information ("T{...}"),
*/
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L9;
}
/*else*/ {
/* "numpy.pxd":845
* # Cython ignores struct boundary information ("T{...}"),
* # so don't output it
* f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<<
* return f
*
*/
__pyx_t_11 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_11 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_f = __pyx_t_11;
}
__pyx_L9:;
}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "numpy.pxd":846
* # so don't output it
* f = _util_dtypestring(child, f, end, offset)
* return f # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_f;
goto __pyx_L0;
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_child);
__Pyx_XDECREF(__pyx_v_fields);
__Pyx_XDECREF(__pyx_v_childname);
__Pyx_XDECREF(__pyx_v_new_offset);
__Pyx_XDECREF(__pyx_v_t);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "numpy.pxd":961
*
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* cdef PyObject* baseptr
* if base is None:
*/
static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) {
PyObject *__pyx_v_baseptr;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("set_array_base");
/* "numpy.pxd":963
* cdef inline void set_array_base(ndarray arr, object base):
* cdef PyObject* baseptr
* if base is None: # <<<<<<<<<<<<<<
* baseptr = NULL
* else:
*/
__pyx_t_1 = (__pyx_v_base == Py_None);
if (__pyx_t_1) {
/* "numpy.pxd":964
* cdef PyObject* baseptr
* if base is None:
* baseptr = NULL # <<<<<<<<<<<<<<
* else:
* Py_INCREF(base) # important to do this before decref below!
*/
__pyx_v_baseptr = NULL;
goto __pyx_L3;
}
/*else*/ {
/* "numpy.pxd":966
* baseptr = NULL
* else:
* Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<<
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base)
*/
Py_INCREF(__pyx_v_base);
/* "numpy.pxd":967
* else:
* Py_INCREF(base) # important to do this before decref below!
* baseptr = <PyObject*>base # <<<<<<<<<<<<<<
* Py_XDECREF(arr.base)
* arr.base = baseptr
*/
__pyx_v_baseptr = ((PyObject *)__pyx_v_base);
}
__pyx_L3:;
/* "numpy.pxd":968
* Py_INCREF(base) # important to do this before decref below!
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base) # <<<<<<<<<<<<<<
* arr.base = baseptr
*
*/
Py_XDECREF(__pyx_v_arr->base);
/* "numpy.pxd":969
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base)
* arr.base = baseptr # <<<<<<<<<<<<<<
*
* cdef inline object get_array_base(ndarray arr):
*/
__pyx_v_arr->base = __pyx_v_baseptr;
__Pyx_RefNannyFinishContext();
}
/* "numpy.pxd":971
* arr.base = baseptr
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* if arr.base is NULL:
* return None
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("get_array_base");
/* "numpy.pxd":972
*
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL: # <<<<<<<<<<<<<<
* return None
* else:
*/
__pyx_t_1 = (__pyx_v_arr->base == NULL);
if (__pyx_t_1) {
/* "numpy.pxd":973
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL:
* return None # <<<<<<<<<<<<<<
* else:
* return <object>arr.base
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(Py_None);
__pyx_r = Py_None;
goto __pyx_L0;
goto __pyx_L3;
}
/*else*/ {
/* "numpy.pxd":975
* return None
* else:
* return <object>arr.base # <<<<<<<<<<<<<<
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_arr->base));
__pyx_r = ((PyObject *)__pyx_v_arr->base);
goto __pyx_L0;
}
__pyx_L3:;
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyMethodDef __pyx_methods[] = {
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef __pyx_moduledef = {
PyModuleDef_HEAD_INIT,
__Pyx_NAMESTR("dense_distances"),
0, /* m_doc */
-1, /* m_size */
__pyx_methods /* m_methods */,
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_kp_s_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 0, 1, 0},
{&__pyx_kp_u_10, __pyx_k_10, sizeof(__pyx_k_10), 0, 1, 0, 0},
{&__pyx_kp_u_13, __pyx_k_13, sizeof(__pyx_k_13), 0, 1, 0, 0},
{&__pyx_kp_s_2, __pyx_k_2, sizeof(__pyx_k_2), 0, 0, 1, 0},
{&__pyx_kp_u_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 1, 0, 0},
{&__pyx_kp_u_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 1, 0, 0},
{&__pyx_kp_u_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 1, 0, 0},
{&__pyx_kp_u_9, __pyx_k_9, sizeof(__pyx_k_9), 0, 1, 0, 0},
{&__pyx_n_s__DTYPE, __pyx_k__DTYPE, sizeof(__pyx_k__DTYPE), 0, 0, 1, 1},
{&__pyx_n_s__RuntimeError, __pyx_k__RuntimeError, sizeof(__pyx_k__RuntimeError), 0, 0, 1, 1},
{&__pyx_n_s__ValueError, __pyx_k__ValueError, sizeof(__pyx_k__ValueError), 0, 0, 1, 1},
{&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1},
{&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1},
{&__pyx_n_s__dense_distances, __pyx_k__dense_distances, sizeof(__pyx_k__dense_distances), 0, 0, 1, 1},
{&__pyx_n_s__float, __pyx_k__float, sizeof(__pyx_k__float), 0, 0, 1, 1},
{&__pyx_n_s__gram_chisquare, __pyx_k__gram_chisquare, sizeof(__pyx_k__gram_chisquare), 0, 0, 1, 1},
{&__pyx_n_s__gram_euclidean, __pyx_k__gram_euclidean, sizeof(__pyx_k__gram_euclidean), 0, 0, 1, 1},
{&__pyx_n_s__m, __pyx_k__m, sizeof(__pyx_k__m), 0, 0, 1, 1},
{&__pyx_n_s__m1, __pyx_k__m1, sizeof(__pyx_k__m1), 0, 0, 1, 1},
{&__pyx_n_s__m2, __pyx_k__m2, sizeof(__pyx_k__m2), 0, 0, 1, 1},
{&__pyx_n_s__m2m_chisquare, __pyx_k__m2m_chisquare, sizeof(__pyx_k__m2m_chisquare), 0, 0, 1, 1},
{&__pyx_n_s__m2m_euclidean, __pyx_k__m2m_euclidean, sizeof(__pyx_k__m2m_euclidean), 0, 0, 1, 1},
{&__pyx_n_s__np, __pyx_k__np, sizeof(__pyx_k__np), 0, 0, 1, 1},
{&__pyx_n_s__numpy, __pyx_k__numpy, sizeof(__pyx_k__numpy), 0, 0, 1, 1},
{&__pyx_n_s__range, __pyx_k__range, sizeof(__pyx_k__range), 0, 0, 1, 1},
{&__pyx_n_s__v2m_chisquare, __pyx_k__v2m_chisquare, sizeof(__pyx_k__v2m_chisquare), 0, 0, 1, 1},
{&__pyx_n_s__v2m_euclidean, __pyx_k__v2m_euclidean, sizeof(__pyx_k__v2m_euclidean), 0, 0, 1, 1},
{&__pyx_n_s__v2v_chisquare, __pyx_k__v2v_chisquare, sizeof(__pyx_k__v2v_chisquare), 0, 0, 1, 1},
{&__pyx_n_s__v2v_euclidean, __pyx_k__v2v_euclidean, sizeof(__pyx_k__v2v_euclidean), 0, 0, 1, 1},
{&__pyx_n_s__x, __pyx_k__x, sizeof(__pyx_k__x), 0, 0, 1, 1},
{&__pyx_n_s__x1, __pyx_k__x1, sizeof(__pyx_k__x1), 0, 0, 1, 1},
{&__pyx_n_s__x2, __pyx_k__x2, sizeof(__pyx_k__x2), 0, 0, 1, 1},
{&__pyx_n_s__zeros, __pyx_k__zeros, sizeof(__pyx_k__zeros), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_range = __Pyx_GetName(__pyx_b, __pyx_n_s__range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_ValueError = __Pyx_GetName(__pyx_b, __pyx_n_s__ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_RuntimeError = __Pyx_GetName(__pyx_b, __pyx_n_s__RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
return 0;
__pyx_L1_error:;
return -1;
}
static int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants");
/* "numpy.pxd":211
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_k_tuple_4 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 211; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_k_tuple_4));
__Pyx_INCREF(((PyObject *)__pyx_kp_u_3));
PyTuple_SET_ITEM(__pyx_k_tuple_4, 0, ((PyObject *)__pyx_kp_u_3));
__Pyx_GIVEREF(((PyObject *)__pyx_kp_u_3));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_4));
/* "numpy.pxd":215
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_k_tuple_6 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_k_tuple_6));
__Pyx_INCREF(((PyObject *)__pyx_kp_u_5));
PyTuple_SET_ITEM(__pyx_k_tuple_6, 0, ((PyObject *)__pyx_kp_u_5));
__Pyx_GIVEREF(((PyObject *)__pyx_kp_u_5));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_6));
/* "numpy.pxd":253
* if ((descr.byteorder == '>' and little_endian) or
* (descr.byteorder == '<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_k_tuple_8 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 253; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_k_tuple_8));
__Pyx_INCREF(((PyObject *)__pyx_kp_u_7));
PyTuple_SET_ITEM(__pyx_k_tuple_8, 0, ((PyObject *)__pyx_kp_u_7));
__Pyx_GIVEREF(((PyObject *)__pyx_kp_u_7));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_8));
/* "numpy.pxd":795
*
* if (end - f) - (new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == '>' and little_endian) or
*/
__pyx_k_tuple_11 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_11)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_k_tuple_11));
__Pyx_INCREF(((PyObject *)__pyx_kp_u_10));
PyTuple_SET_ITEM(__pyx_k_tuple_11, 0, ((PyObject *)__pyx_kp_u_10));
__Pyx_GIVEREF(((PyObject *)__pyx_kp_u_10));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_11));
/* "numpy.pxd":799
* if ((child.byteorder == '>' and little_endian) or
* (child.byteorder == '<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* # One could encode it in the format string and have Cython
* # complain instead, BUT: < and > in format strings also imply
*/
__pyx_k_tuple_12 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_k_tuple_12));
__Pyx_INCREF(((PyObject *)__pyx_kp_u_7));
PyTuple_SET_ITEM(__pyx_k_tuple_12, 0, ((PyObject *)__pyx_kp_u_7));
__Pyx_GIVEREF(((PyObject *)__pyx_kp_u_7));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_12));
/* "numpy.pxd":819
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_k_tuple_14 = PyTuple_New(1); if (unlikely(!__pyx_k_tuple_14)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 819; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_k_tuple_14));
__Pyx_INCREF(((PyObject *)__pyx_kp_u_13));
PyTuple_SET_ITEM(__pyx_k_tuple_14, 0, ((PyObject *)__pyx_kp_u_13));
__Pyx_GIVEREF(((PyObject *)__pyx_kp_u_13));
__Pyx_GIVEREF(((PyObject *)__pyx_k_tuple_14));
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_InitGlobals(void) {
/* Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and
a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is
a quiet NaN. */
memset(&__PYX_NAN, 0xFF, sizeof(__PYX_NAN));
PyEval_InitThreads();
if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__pyx_int_15 = PyInt_FromLong(15); if (unlikely(!__pyx_int_15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
return 0;
__pyx_L1_error:;
return -1;
}
#if PY_MAJOR_VERSION < 3
PyMODINIT_FUNC initdense_distances(void); /*proto*/
PyMODINIT_FUNC initdense_distances(void)
#else
PyMODINIT_FUNC PyInit_dense_distances(void); /*proto*/
PyMODINIT_FUNC PyInit_dense_distances(void)
#endif
{
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
__Pyx_RefNannyDeclarations
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_dense_distances(void)");
if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#ifdef __pyx_binding_PyCFunctionType_USED
if (__pyx_binding_PyCFunctionType_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4(__Pyx_NAMESTR("dense_distances"), __pyx_methods, 0, 0, PYTHON_API_VERSION);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
#if PY_MAJOR_VERSION < 3
Py_INCREF(__pyx_m);
#endif
__pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME));
if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
/*--- Initialize various global constants etc. ---*/
if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_module_is_main_dense_distances) {
if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
}
/*--- Builtin init code ---*/
if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/*--- Constants init code ---*/
if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/*--- Global init code ---*/
/*--- Variable export code ---*/
/*--- Function export code ---*/
/*--- Type init code ---*/
/*--- Type import code ---*/
__pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 161; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 857; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/*--- Variable import code ---*/
/*--- Function import code ---*/
/*--- Execution code ---*/
/* "dense_distances.pyx":4
* #cython: boundscheck=False
*
* import numpy as np # <<<<<<<<<<<<<<
* cimport numpy as np
* cimport cython
*/
__pyx_t_1 = __Pyx_Import(((PyObject *)__pyx_n_s__numpy), 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (PyObject_SetAttr(__pyx_m, __pyx_n_s__np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "dense_distances.pyx":10
*
* # data type of numpy arrays (double precision)
* DTYPE = np.float # <<<<<<<<<<<<<<
* # corresponding compile-time type
* ctypedef np.float_t DTYPE_t
*/
__pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__np); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 10; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyObject_GetAttr(__pyx_t_1, __pyx_n_s__float); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 10; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (PyObject_SetAttr(__pyx_m, __pyx_n_s__DTYPE, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 10; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "dense_distances.pyx":28
*
*
* def v2v_euclidean(np.ndarray[DTYPE_t, ndim=1] x1, np.ndarray[DTYPE_t, ndim=1] x2): # <<<<<<<<<<<<<<
* cdef int d = len(x1)
* cdef int _d = len(x2)
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_15dense_distances_v2v_euclidean, NULL, __pyx_n_s__dense_distances); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
if (PyObject_SetAttr(__pyx_m, __pyx_n_s__v2v_euclidean, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "dense_distances.pyx":39
*
*
* def v2v_chisquare(np.ndarray[DTYPE_t, ndim=1] x1, np.ndarray[DTYPE_t, ndim=1] x2): # <<<<<<<<<<<<<<
* cdef int d = len(x1)
* cdef int _d = len(x2)
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_15dense_distances_1v2v_chisquare, NULL, __pyx_n_s__dense_distances); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
if (PyObject_SetAttr(__pyx_m, __pyx_n_s__v2v_chisquare, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "dense_distances.pyx":50
*
*
* def v2m_euclidean(np.ndarray[DTYPE_t, ndim=1] x, np.ndarray[DTYPE_t, ndim=2] m): # <<<<<<<<<<<<<<
* """ Euclidean distances between vector x and row vectors in m
* """
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_15dense_distances_2v2m_euclidean, NULL, __pyx_n_s__dense_distances); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
if (PyObject_SetAttr(__pyx_m, __pyx_n_s__v2m_euclidean, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "dense_distances.pyx":66
*
*
* def v2m_chisquare(np.ndarray[DTYPE_t, ndim=1] x, np.ndarray[DTYPE_t, ndim=2] m): # <<<<<<<<<<<<<<
* """ Chisquare distances between vector x and row vectors in m
* """
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_15dense_distances_3v2m_chisquare, NULL, __pyx_n_s__dense_distances); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
if (PyObject_SetAttr(__pyx_m, __pyx_n_s__v2m_chisquare, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "dense_distances.pyx":82
*
*
* def m2m_euclidean(np.ndarray[DTYPE_t, ndim=2] m1, np.ndarray[DTYPE_t, ndim=2] m2): # <<<<<<<<<<<<<<
* """ Parallelized Euclidean distances between row vectors in m1 and m2
* """
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_15dense_distances_4m2m_euclidean, NULL, __pyx_n_s__dense_distances); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
if (PyObject_SetAttr(__pyx_m, __pyx_n_s__m2m_euclidean, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "dense_distances.pyx":101
*
*
* def m2m_chisquare(np.ndarray[DTYPE_t, ndim=2] m1, np.ndarray[DTYPE_t, ndim=2] m2): # <<<<<<<<<<<<<<
* """ Parallelized Chisquare distances between row vectors in m1 and m2
* """
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_15dense_distances_5m2m_chisquare, NULL, __pyx_n_s__dense_distances); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
if (PyObject_SetAttr(__pyx_m, __pyx_n_s__m2m_chisquare, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "dense_distances.pyx":120
*
*
* def gram_euclidean(np.ndarray[DTYPE_t, ndim=2] m): # <<<<<<<<<<<<<<
* """ Parallelized Euclidean distances between all row vectors of m
* """
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_15dense_distances_6gram_euclidean, NULL, __pyx_n_s__dense_distances); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
if (PyObject_SetAttr(__pyx_m, __pyx_n_s__gram_euclidean, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "dense_distances.pyx":137
*
*
* def gram_chisquare(np.ndarray[DTYPE_t, ndim=2] m): # <<<<<<<<<<<<<<
* """ Parallelized Chisquare distances between all row vectors of m
* """
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_15dense_distances_7gram_chisquare, NULL, __pyx_n_s__dense_distances); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
if (PyObject_SetAttr(__pyx_m, __pyx_n_s__gram_chisquare, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 137; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "dense_distances.pyx":1
* #cython: cdivision=True # <<<<<<<<<<<<<<
* #cython: boundscheck=False
*
*/
__pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(((PyObject *)__pyx_t_2));
if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_2)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
/* "numpy.pxd":971
* arr.base = baseptr
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* if arr.base is NULL:
* return None
*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
if (__pyx_m) {
__Pyx_AddTraceback("init dense_distances", __pyx_clineno, __pyx_lineno, __pyx_filename);
Py_DECREF(__pyx_m); __pyx_m = 0;
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init dense_distances");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if PY_MAJOR_VERSION < 3
return;
#else
return __pyx_m;
#endif
}
/* Runtime support code */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule((char *)modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, (char *)"RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif /* CYTHON_REFNANNY */
static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) {
PyObject *result;
result = PyObject_GetAttr(dict, name);
if (!result) {
if (dict != __pyx_b) {
PyErr_Clear();
result = PyObject_GetAttr(__pyx_b, name);
}
if (!result) {
PyErr_SetObject(PyExc_NameError, name);
}
}
return result;
}
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%s() takes %s %"PY_FORMAT_SIZE_T"d positional argument%s (%"PY_FORMAT_SIZE_T"d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AS_STRING(kw_name));
#endif
}
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
} else {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key))) {
#else
if (unlikely(!PyUnicode_CheckExact(key)) && unlikely(!PyUnicode_Check(key))) {
#endif
goto invalid_keyword_type;
} else {
for (name = first_kw_arg; *name; name++) {
#if PY_MAJOR_VERSION >= 3
if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) &&
PyUnicode_Compare(**name, key) == 0) break;
#else
if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) &&
_PyString_Eq(**name, key)) break;
#endif
}
if (*name) {
values[name-argnames] = value;
} else {
/* unexpected keyword found */
for (name=argnames; name != first_kw_arg; name++) {
if (**name == key) goto arg_passed_twice;
#if PY_MAJOR_VERSION >= 3
if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) &&
PyUnicode_Compare(**name, key) == 0) goto arg_passed_twice;
#else
if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) &&
_PyString_Eq(**name, key)) goto arg_passed_twice;
#endif
}
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
}
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, **name);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%s() got an unexpected keyword argument '%s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact)
{
if (!type) {
PyErr_Format(PyExc_SystemError, "Missing type object");
return 0;
}
if (none_allowed && obj == Py_None) return 1;
else if (exact) {
if (Py_TYPE(obj) == type) return 1;
}
else {
if (PyObject_TypeCheck(obj, type)) return 1;
}
PyErr_Format(PyExc_TypeError,
"Argument '%s' has incorrect type (expected %s, got %s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
return 0;
}
static CYTHON_INLINE int __Pyx_IsLittleEndian(void) {
unsigned int n = 1;
return *(unsigned char*)(&n) != 0;
}
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
} __Pyx_BufFmt_Context;
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t < '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case 'b': return "'char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case 'c': case 'b': case 'B': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c': case 'b': case 'h': case 'i': case 'l': case 'q': return 'I';
case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U';
case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R');
case 'O': return 'O';
case 'P': return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset;
if (ctx->enc_type == 0) return 0;
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
/* special case -- treat as struct rather than complex number */
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %"PY_FORMAT_SIZE_T"d but %"PY_FORMAT_SIZE_T"d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
--ctx->enc_count; /* Consume from buffer string */
/* Done checking, move to next field, pushing or popping struct stack if needed */
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break; /* breaks both loops as ctx->enc_count == 0 */
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue; /* empty struct */
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case 10:
case 13:
++ts;
break;
case '<':
if (!__Pyx_IsLittleEndian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_IsLittleEndian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T': /* substruct */
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
}
break;
case '}': /* end of substruct; either repeat or move on */
++ts;
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
} /* fall through */
case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O':
if (ctx->enc_type == *ts && got_Z == ctx->is_complex &&
ctx->enc_packmode == ctx->new_packmode) {
/* Continue pooling same type */
ctx->enc_count += ctx->new_count;
} else {
/* New type */
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
}
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
default:
{
int number = __Pyx_BufFmt_ParseNumber(&ts);
if (number == -1) { /* First char was not a digit */
PyErr_Format(PyExc_ValueError,
"Does not understand character buffer dtype format string ('%c')", *ts);
return NULL;
}
ctx->new_count = (size_t)number;
}
}
}
}
static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) {
buf->buf = NULL;
buf->obj = NULL;
buf->strides = __Pyx_zeros;
buf->shape = __Pyx_zeros;
buf->suboffsets = __Pyx_minusones;
}
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) {
if (obj == Py_None || obj == NULL) {
__Pyx_ZeroBuffer(buf);
return 0;
}
buf->buf = NULL;
if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail;
if (buf->ndim != nd) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
nd, buf->ndim);
goto fail;
}
if (!cast) {
__Pyx_BufFmt_Context ctx;
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if ((unsigned)buf->itemsize != dtype->size) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%"PY_FORMAT_SIZE_T"d byte%s) does not match size of '%s' (%"PY_FORMAT_SIZE_T"d byte%s)",
buf->itemsize, (buf->itemsize > 1) ? "s" : "",
dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones;
return 0;
fail:;
__Pyx_ZeroBuffer(buf);
return -1;
}
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) {
if (info->buf == NULL) return;
if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL;
__Pyx_ReleaseBuffer(info);
}
static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyThreadState *tstate = PyThreadState_GET();
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) {
PyThreadState *tstate = PyThreadState_GET();
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_Format(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(PyObject_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
/* cause is unused */
Py_XINCREF(type);
Py_XINCREF(value);
Py_XINCREF(tb);
/* First, check the traceback argument, replacing None with NULL. */
if (tb == Py_None) {
Py_DECREF(tb);
tb = 0;
}
else if (tb != NULL && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
/* Next, replace a missing value with None */
if (value == NULL) {
value = Py_None;
Py_INCREF(value);
}
#if PY_VERSION_HEX < 0x02050000
if (!PyClass_Check(type))
#else
if (!PyType_Check(type))
#endif
{
/* Raising an instance. The value should be a dummy. */
if (value != Py_None) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
/* Normalize to raise <class>, <instance> */
Py_DECREF(value);
value = type;
#if PY_VERSION_HEX < 0x02050000
if (PyInstance_Check(type)) {
type = (PyObject*) ((PyInstanceObject*)type)->in_class;
Py_INCREF(type);
}
else {
type = 0;
PyErr_SetString(PyExc_TypeError,
"raise: exception must be an old-style class or instance");
goto raise_error;
}
#else
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
#endif
}
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else /* Python 3+ */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (!PyExceptionClass_Check(type)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
}
else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
}
else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
if (!value) {
value = PyObject_CallObject(type, NULL);
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
PyThreadState *tstate = PyThreadState_GET();
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
}
bad:
return;
}
#endif
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %"PY_FORMAT_SIZE_T"d value%s to unpack",
index, (index == 1) ? "" : "s");
}
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %"PY_FORMAT_SIZE_T"d)", expected);
}
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) {
if (t == Py_None) {
__Pyx_RaiseNoneNotIterableError();
} else if (PyTuple_GET_SIZE(t) < index) {
__Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(t));
} else {
__Pyx_RaiseTooManyValuesError(index);
}
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
#if PY_VERSION_HEX >= 0x02060000
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
#endif
if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pf_5numpy_7ndarray___getbuffer__(obj, view, flags);
else {
PyErr_Format(PyExc_TypeError, "'%100s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject* obj = view->obj;
if (obj) {
#if PY_VERSION_HEX >= 0x02060000
if (PyObject_CheckBuffer(obj)) {PyBuffer_Release(view); return;}
#endif
if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pf_5numpy_7ndarray_1__releasebuffer__(obj, view);
Py_DECREF(obj);
view->obj = NULL;
}
}
#endif
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) {
PyObject *py_import = 0;
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
py_import = __Pyx_GetAttrString(__pyx_b, "__import__");
if (!py_import)
goto bad;
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
#if PY_VERSION_HEX >= 0x02050000
{
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
}
#else
if (level>0) {
PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4.");
goto bad;
}
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, NULL);
#endif
bad:
Py_XDECREF(empty_list);
Py_XDECREF(py_import);
Py_XDECREF(empty_dict);
return module;
}
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return ::std::complex< float >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return x + y*(__pyx_t_float_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
__pyx_t_float_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float denom = b.real * b.real + b.imag * b.imag;
z.real = (a.real * b.real + a.imag * b.imag) / denom;
z.imag = (a.imag * b.real - a.real * b.imag) / denom;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrtf(z.real*z.real + z.imag*z.imag);
#else
return hypotf(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
float denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prodf(a, a);
return __Pyx_c_prodf(a, a);
case 3:
z = __Pyx_c_prodf(a, a);
return __Pyx_c_prodf(z, a);
case 4:
z = __Pyx_c_prodf(a, a);
return __Pyx_c_prodf(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
}
r = a.real;
theta = 0;
} else {
r = __Pyx_c_absf(a);
theta = atan2f(a.imag, a.real);
}
lnr = logf(r);
z_r = expf(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cosf(z_theta);
z.imag = z_r * sinf(z_theta);
return z;
}
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return ::std::complex< double >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return x + y*(__pyx_t_double_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
__pyx_t_double_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double denom = b.real * b.real + b.imag * b.imag;
z.real = (a.real * b.real + a.imag * b.imag) / denom;
z.imag = (a.imag * b.real - a.real * b.imag) / denom;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrt(z.real*z.real + z.imag*z.imag);
#else
return hypot(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
double denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prod(a, a);
return __Pyx_c_prod(a, a);
case 3:
z = __Pyx_c_prod(a, a);
return __Pyx_c_prod(z, a);
case 4:
z = __Pyx_c_prod(a, a);
return __Pyx_c_prod(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
}
r = a.real;
theta = 0;
} else {
r = __Pyx_c_abs(a);
theta = atan2(a.imag, a.real);
}
lnr = log(r);
z_r = exp(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cos(z_theta);
z.imag = z_r * sin(z_theta);
return z;
}
#endif
#endif
static CYTHON_INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) {
const unsigned char neg_one = (unsigned char)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(unsigned char) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(unsigned char)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to unsigned char" :
"value too large to convert to unsigned char");
}
return (unsigned char)-1;
}
return (unsigned char)val;
}
return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x);
}
static CYTHON_INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) {
const unsigned short neg_one = (unsigned short)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(unsigned short) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(unsigned short)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to unsigned short" :
"value too large to convert to unsigned short");
}
return (unsigned short)-1;
}
return (unsigned short)val;
}
return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x);
}
static CYTHON_INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) {
const unsigned int neg_one = (unsigned int)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(unsigned int) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(unsigned int)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to unsigned int" :
"value too large to convert to unsigned int");
}
return (unsigned int)-1;
}
return (unsigned int)val;
}
return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x);
}
static CYTHON_INLINE char __Pyx_PyInt_AsChar(PyObject* x) {
const char neg_one = (char)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(char) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(char)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to char" :
"value too large to convert to char");
}
return (char)-1;
}
return (char)val;
}
return (char)__Pyx_PyInt_AsLong(x);
}
static CYTHON_INLINE short __Pyx_PyInt_AsShort(PyObject* x) {
const short neg_one = (short)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(short) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(short)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to short" :
"value too large to convert to short");
}
return (short)-1;
}
return (short)val;
}
return (short)__Pyx_PyInt_AsLong(x);
}
static CYTHON_INLINE int __Pyx_PyInt_AsInt(PyObject* x) {
const int neg_one = (int)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(int) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(int)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to int" :
"value too large to convert to int");
}
return (int)-1;
}
return (int)val;
}
return (int)__Pyx_PyInt_AsLong(x);
}
static CYTHON_INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) {
const signed char neg_one = (signed char)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(signed char) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(signed char)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to signed char" :
"value too large to convert to signed char");
}
return (signed char)-1;
}
return (signed char)val;
}
return (signed char)__Pyx_PyInt_AsSignedLong(x);
}
static CYTHON_INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) {
const signed short neg_one = (signed short)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(signed short) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(signed short)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to signed short" :
"value too large to convert to signed short");
}
return (signed short)-1;
}
return (signed short)val;
}
return (signed short)__Pyx_PyInt_AsSignedLong(x);
}
static CYTHON_INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) {
const signed int neg_one = (signed int)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(signed int) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(signed int)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to signed int" :
"value too large to convert to signed int");
}
return (signed int)-1;
}
return (signed int)val;
}
return (signed int)__Pyx_PyInt_AsSignedLong(x);
}
static CYTHON_INLINE int __Pyx_PyInt_AsLongDouble(PyObject* x) {
const int neg_one = (int)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (sizeof(int) < sizeof(long)) {
long val = __Pyx_PyInt_AsLong(x);
if (unlikely(val != (long)(int)val)) {
if (!unlikely(val == -1 && PyErr_Occurred())) {
PyErr_SetString(PyExc_OverflowError,
(is_unsigned && unlikely(val < 0)) ?
"can't convert negative value to int" :
"value too large to convert to int");
}
return (int)-1;
}
return (int)val;
}
return (int)__Pyx_PyInt_AsLong(x);
}
static CYTHON_INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) {
const unsigned long neg_one = (unsigned long)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_VERSION_HEX < 0x03000000
if (likely(PyInt_Check(x))) {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to unsigned long");
return (unsigned long)-1;
}
return (unsigned long)val;
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to unsigned long");
return (unsigned long)-1;
}
return (unsigned long)PyLong_AsUnsignedLong(x);
} else {
return (unsigned long)PyLong_AsLong(x);
}
} else {
unsigned long val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (unsigned long)-1;
val = __Pyx_PyInt_AsUnsignedLong(tmp);
Py_DECREF(tmp);
return val;
}
}
static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) {
const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_VERSION_HEX < 0x03000000
if (likely(PyInt_Check(x))) {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to unsigned PY_LONG_LONG");
return (unsigned PY_LONG_LONG)-1;
}
return (unsigned PY_LONG_LONG)val;
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to unsigned PY_LONG_LONG");
return (unsigned PY_LONG_LONG)-1;
}
return (unsigned PY_LONG_LONG)PyLong_AsUnsignedLongLong(x);
} else {
return (unsigned PY_LONG_LONG)PyLong_AsLongLong(x);
}
} else {
unsigned PY_LONG_LONG val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (unsigned PY_LONG_LONG)-1;
val = __Pyx_PyInt_AsUnsignedLongLong(tmp);
Py_DECREF(tmp);
return val;
}
}
static CYTHON_INLINE long __Pyx_PyInt_AsLong(PyObject* x) {
const long neg_one = (long)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_VERSION_HEX < 0x03000000
if (likely(PyInt_Check(x))) {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long)-1;
}
return (long)val;
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long)-1;
}
return (long)PyLong_AsUnsignedLong(x);
} else {
return (long)PyLong_AsLong(x);
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (long)-1;
val = __Pyx_PyInt_AsLong(tmp);
Py_DECREF(tmp);
return val;
}
}
static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) {
const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_VERSION_HEX < 0x03000000
if (likely(PyInt_Check(x))) {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to PY_LONG_LONG");
return (PY_LONG_LONG)-1;
}
return (PY_LONG_LONG)val;
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to PY_LONG_LONG");
return (PY_LONG_LONG)-1;
}
return (PY_LONG_LONG)PyLong_AsUnsignedLongLong(x);
} else {
return (PY_LONG_LONG)PyLong_AsLongLong(x);
}
} else {
PY_LONG_LONG val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (PY_LONG_LONG)-1;
val = __Pyx_PyInt_AsLongLong(tmp);
Py_DECREF(tmp);
return val;
}
}
static CYTHON_INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) {
const signed long neg_one = (signed long)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_VERSION_HEX < 0x03000000
if (likely(PyInt_Check(x))) {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to signed long");
return (signed long)-1;
}
return (signed long)val;
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to signed long");
return (signed long)-1;
}
return (signed long)PyLong_AsUnsignedLong(x);
} else {
return (signed long)PyLong_AsLong(x);
}
} else {
signed long val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (signed long)-1;
val = __Pyx_PyInt_AsSignedLong(tmp);
Py_DECREF(tmp);
return val;
}
}
static CYTHON_INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) {
const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_VERSION_HEX < 0x03000000
if (likely(PyInt_Check(x))) {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to signed PY_LONG_LONG");
return (signed PY_LONG_LONG)-1;
}
return (signed PY_LONG_LONG)val;
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to signed PY_LONG_LONG");
return (signed PY_LONG_LONG)-1;
}
return (signed PY_LONG_LONG)PyLong_AsUnsignedLongLong(x);
} else {
return (signed PY_LONG_LONG)PyLong_AsLongLong(x);
}
} else {
signed PY_LONG_LONG val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (signed PY_LONG_LONG)-1;
val = __Pyx_PyInt_AsSignedLongLong(tmp);
Py_DECREF(tmp);
return val;
}
}
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
#if PY_VERSION_HEX < 0x02050000
return PyErr_Warn(NULL, message);
#else
return PyErr_WarnEx(NULL, message, 1);
#endif
}
return 0;
}
#ifndef __PYX_HAVE_RT_ImportType
#define __PYX_HAVE_RT_ImportType
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name,
size_t size, int strict)
{
PyObject *py_module = 0;
PyObject *result = 0;
PyObject *py_name = 0;
char warning[200];
py_module = __Pyx_ImportModule(module_name);
if (!py_module)
goto bad;
#if PY_MAJOR_VERSION < 3
py_name = PyString_FromString(class_name);
#else
py_name = PyUnicode_FromString(class_name);
#endif
if (!py_name)
goto bad;
result = PyObject_GetAttr(py_module, py_name);
Py_DECREF(py_name);
py_name = 0;
Py_DECREF(py_module);
py_module = 0;
if (!result)
goto bad;
if (!PyType_Check(result)) {
PyErr_Format(PyExc_TypeError,
"%s.%s is not a type object",
module_name, class_name);
goto bad;
}
if (!strict && ((PyTypeObject *)result)->tp_basicsize > (Py_ssize_t)size) {
PyOS_snprintf(warning, sizeof(warning),
"%s.%s size changed, may indicate binary incompatibility",
module_name, class_name);
#if PY_VERSION_HEX < 0x02050000
if (PyErr_Warn(NULL, warning) < 0) goto bad;
#else
if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
#endif
}
else if (((PyTypeObject *)result)->tp_basicsize != (Py_ssize_t)size) {
PyErr_Format(PyExc_ValueError,
"%s.%s has the wrong size, try recompiling",
module_name, class_name);
goto bad;
}
return (PyTypeObject *)result;
bad:
Py_XDECREF(py_module);
Py_XDECREF(result);
return NULL;
}
#endif
#ifndef __PYX_HAVE_RT_ImportModule
#define __PYX_HAVE_RT_ImportModule
static PyObject *__Pyx_ImportModule(const char *name) {
PyObject *py_name = 0;
PyObject *py_module = 0;
#if PY_MAJOR_VERSION < 3
py_name = PyString_FromString(name);
#else
py_name = PyUnicode_FromString(name);
#endif
if (!py_name)
goto bad;
py_module = PyImport_Import(py_name);
Py_DECREF(py_name);
return py_module;
bad:
Py_XDECREF(py_name);
return 0;
}
#endif
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static void __Pyx_AddTraceback(const char *funcname, int __pyx_clineno,
int __pyx_lineno, const char *__pyx_filename) {
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
PyObject *py_globals = 0;
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(__pyx_filename);
#else
py_srcfile = PyUnicode_FromString(__pyx_filename);
#endif
if (!py_srcfile) goto bad;
if (__pyx_clineno) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_globals = PyModule_GetDict(__pyx_m);
if (!py_globals) goto bad;
py_code = PyCode_New(
0, /*int argcount,*/
#if PY_MAJOR_VERSION >= 3
0, /*int kwonlyargcount,*/
#endif
0, /*int nlocals,*/
0, /*int stacksize,*/
0, /*int flags,*/
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
__pyx_lineno, /*int firstlineno,*/
__pyx_empty_bytes /*PyObject *lnotab*/
);
if (!py_code) goto bad;
py_frame = PyFrame_New(
PyThreadState_GET(), /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
py_globals, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
py_frame->f_lineno = __pyx_lineno;
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else /* Python 3+ has unicode identifiers */
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
++t;
}
return 0;
}
/* Type Conversion Functions */
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) {
PyNumberMethods *m;
const char *name = NULL;
PyObject *res = NULL;
#if PY_VERSION_HEX < 0x03000000
if (PyInt_Check(x) || PyLong_Check(x))
#else
if (PyLong_Check(x))
#endif
return Py_INCREF(x), x;
m = Py_TYPE(x)->tp_as_number;
#if PY_VERSION_HEX < 0x03000000
if (m && m->nb_int) {
name = "int";
res = PyNumber_Int(x);
}
else if (m && m->nb_long) {
name = "long";
res = PyNumber_Long(x);
}
#else
if (m && m->nb_int) {
name = "int";
res = PyNumber_Long(x);
}
#endif
if (res) {
#if PY_VERSION_HEX < 0x03000000
if (!PyInt_Check(res) && !PyLong_Check(res)) {
#else
if (!PyLong_Check(res)) {
#endif
PyErr_Format(PyExc_TypeError,
"__%s__ returned non-%s (type %.200s)",
name, name, Py_TYPE(res)->tp_name);
Py_DECREF(res);
return NULL;
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject* x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
#if PY_VERSION_HEX < 0x02050000
if (ival <= LONG_MAX)
return PyInt_FromLong((long)ival);
else {
unsigned char *bytes = (unsigned char *) &ival;
int one = 1; int little = (int)*(unsigned char*)&one;
return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0);
}
#else
return PyInt_FromSize_t(ival);
#endif
}
static CYTHON_INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) {
unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x);
if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) {
return (size_t)-1;
} else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) {
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to size_t");
return (size_t)-1;
}
return (size_t)val;
}
#endif /* Py_PYTHON_H */
|
GB_unop__identity_fp32_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fp32_uint16)
// op(A') function: GB (_unop_tran__identity_fp32_uint16)
// C type: float
// A type: uint16_t
// cast: float cij = (float) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
float z = (float) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = (float) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fp32_uint16)
(
float *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fp32_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
tmul.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <ParTI.h>
#include <stdlib.h>
#include "sptensor.h"
#include <string.h>
#include <limits.h>
#include <numa.h>
/** All combined:
* 0: COOY + SPA
* 1: COOY + HTA
* 2: HTY + SPA
* 3: HTY + HTA
* 4: HTY + HTA on HM
**/
int sptSparseTensorMulTensor(sptSparseTensor *Z, sptSparseTensor * const X, sptSparseTensor *const Y, sptIndex num_cmodes, sptIndex * cmodes_X, sptIndex * cmodes_Y, int tk, int output_sorting, int placement)
{
// Experiment modes
int experiment_modes;
sscanf(getenv("EXPERIMENT_MODES"), "%d", &experiment_modes);
//0: COOY + SPA
if(experiment_modes == 0){
int result;
/// The number of threads
sptIndex nmodes_X = X->nmodes;
sptIndex nmodes_Y = Y->nmodes;
sptTimer timer;
double total_time = 0;
sptNewTimer(&timer, 0);
if(num_cmodes >= X->nmodes) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU SpTns * SpTns", "shape mismatch");
}
for(sptIndex m = 0; m < num_cmodes; ++m) {
if(X->ndims[cmodes_X[m]] != Y->ndims[cmodes_Y[m]]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU SpTns * SpTns", "shape mismatch");
}
}
sptStartTimer(timer);
/// Shuffle X indices and sort X as the order of free modes -> contract modes; mode_order also separate all the modes to free and contract modes separately.
sptIndex * mode_order_X = (sptIndex *)malloc(nmodes_X * sizeof(sptIndex));
sptIndex ci = nmodes_X - num_cmodes, fi = 0;
for(sptIndex m = 0; m < nmodes_X; ++m) {
if(sptInArray(cmodes_X, num_cmodes, m) == -1) {
mode_order_X[fi] = m;
++ fi;
}
}
sptAssert(fi == nmodes_X - num_cmodes);
/// Copy the contract modes while keeping the contraction mode order
for(sptIndex m = 0; m < num_cmodes; ++m) {
mode_order_X[ci] = cmodes_X[m];
++ ci;
}
sptAssert(ci == nmodes_X);
/// Shuffle tensor indices according to mode_order_X
sptSparseTensorShuffleModes(X, mode_order_X);
// printf("Permuted X:\n");
// sptAssert(sptDumpSparseTensor(X, 0, stdout) == 0);
for(sptIndex m = 0; m < nmodes_X; ++m) mode_order_X[m] = m; // reset mode_order
sptSparseTensorSortIndex(X, 1, tk);
sptStopTimer(timer);
double X_time = sptElapsedTime(timer);
total_time += X_time;
sptStartTimer(timer);
/// Shuffle Y indices and sort Y as the order of free modes -> contract modes
//sptAssert(sptDumpSparseTensor(Y, 0, stdout) == 0);
sptIndex * mode_order_Y = (sptIndex *)malloc(nmodes_Y * sizeof(sptIndex));
ci = 0;
fi = num_cmodes;
for(sptIndex m = 0; m < nmodes_Y; ++m) {
if(sptInArray(cmodes_Y, num_cmodes, m) == -1) { // m is not a contraction mode
mode_order_Y[fi] = m;
++ fi;
}
}
sptAssert(fi == nmodes_Y);
/// Copy the contract modes while keeping the contraction mode order
for(sptIndex m = 0; m < num_cmodes; ++m) {
mode_order_Y[ci] = cmodes_Y[m];
++ ci;
}
sptAssert(ci == num_cmodes);
/// Shuffle tensor indices according to mode_order_Y
sptSparseTensorShuffleModes(Y, mode_order_Y);
// printf("Permuted Y:\n");
for(sptIndex m = 0; m < nmodes_Y; ++m) mode_order_Y[m] = m; // reset mode_order
sptSparseTensorSortIndex(Y, 1, tk);
sptStopTimer(timer);
total_time += sptElapsedTime(timer);
printf("[Input Processing]: %.6f s\n", sptElapsedTime(timer) + X_time );
//printf("Sorted X:\n");
//sptAssert(sptDumpSparseTensor(X, 0, stdout) == 0);
//printf("Sorted Y:\n");
//sptAssert(sptDumpSparseTensor(Y, 0, stdout) == 0);
/// Set fidx_X: indexing the combined free indices and fidx_Y: indexing the combined contract indices
sptNnzIndexVector fidx_X, fidx_Y;
//sptStartTimer(timer);
/// Set indices for free modes, use X
sptSparseTensorSetIndices(X, mode_order_X, nmodes_X - num_cmodes, &fidx_X);
/// Set indices for contract modes, use Y
sptSparseTensorSetIndices(Y, mode_order_Y, num_cmodes, &fidx_Y);
//sptStopTimer(timer);
//sptPrintElapsedTime(timer, "Set fidx X,Y");
//sptPrintElapsedTime(timer, "Set fidx X");
//printf("fidx_X: \n");
//sptDumpNnzIndexVector(&fidx_X, stdout);
//printf("fidx_Y: \n");
//sptDumpNnzIndexVector(&fidx_Y, stdout);
free(mode_order_X);
free(mode_order_Y);
/// Allocate the output tensor
sptIndex nmodes_Z = nmodes_X + nmodes_Y - 2 * num_cmodes;
sptIndex *ndims_buf = malloc(nmodes_Z * sizeof *ndims_buf);
spt_CheckOSError(!ndims_buf, "CPU SpTns * SpTns");
for(sptIndex m = 0; m < nmodes_X - num_cmodes; ++m) {
ndims_buf[m] = X->ndims[m];
}
for(sptIndex m = num_cmodes; m < nmodes_Y; ++m) {
ndims_buf[(m - num_cmodes) + nmodes_X - num_cmodes] = Y->ndims[m];
}
/// Each thread with a local Z_tmp
sptSparseTensor *Z_tmp = malloc(tk * sizeof (sptSparseTensor));
for (int i = 0; i < tk; i++){
result = sptNewSparseTensor(&(Z_tmp[i]), nmodes_Z, ndims_buf);
}
//free(ndims_buf);
spt_CheckError(result, "CPU SpTns * SpTns", NULL);
sptTimer timer_SPA;
double time_prep = 0;
double time_free_mode = 0;
double time_spa = 0;
double time_accumulate_z = 0;
sptNewTimer(&timer_SPA, 0);
sptStartTimer(timer);
// For the progress
int fx_counter = fidx_X.len;
#pragma omp parallel for schedule(static) num_threads(tk) shared(fidx_X, fidx_Y, nmodes_X, nmodes_Y, num_cmodes, Z_tmp, fx_counter)
for(sptNnzIndex fx_ptr = 0; fx_ptr < fidx_X.len - 1; ++fx_ptr) { // Loop fiber pointers of X
int tid = omp_get_thread_num();
//Print the progress
fx_counter--;
//if (fx_counter % 1 == 0) printf("Progress: %d\/%d\n", fx_counter, fidx_X.len);
sptNnzIndex fx_begin = fidx_X.data[fx_ptr];
sptNnzIndex fx_end = fidx_X.data[fx_ptr+1];
if (tid == 0){
sptStartTimer(timer_SPA);
}
/// Allocate the SPA buffer
sptIndex nmodes_spa = nmodes_Y - num_cmodes;
sptIndexVector * spa_inds = (sptIndexVector*)malloc(nmodes_spa * sizeof(sptIndexVector));
sptValueVector spa_vals;
for(sptIndex m = 0; m < nmodes_spa; ++m)
sptNewIndexVector(&spa_inds[m], 0, 0);
sptNewValueVector(&spa_vals, 0, 0);
/// Allocate a small index buffer
sptIndexVector inds_buf;
sptNewIndexVector(&inds_buf, (nmodes_Y - num_cmodes), (nmodes_Y - num_cmodes));
//printf("\nzX: [%lu, %lu]\n", fx_begin, fx_end);
if (tid == 0){
sptStopTimer(timer_SPA);
time_prep += sptElapsedTime(timer_SPA);
sptStartTimer(timer_SPA);
}
/// zX has common free indices
for(sptNnzIndex zX = fx_begin; zX < fx_end; ++ zX) { // Loop nnzs inside a X fiber
if (tid == 0) {
sptStartTimer(timer_SPA);
}
sptValue valX = X->values.data[zX];
sptIndexVector cmode_index_X;
sptNewIndexVector(&cmode_index_X, num_cmodes, num_cmodes);
for(sptIndex i = 0; i < num_cmodes; ++i){
cmode_index_X.data[i] = X->inds[nmodes_X - num_cmodes + i].data[zX];
//printf("\ncmode_index_X[%lu]: %lu", i, cmode_index_X[i]);
}
sptNnzIndex fy_begin = -1;
sptNnzIndex fy_end = -1;
unsigned int current_idx = 0;
for(sptIndex j = 0; j < fidx_Y.len; j++){
for(sptIndex i = 0; i< num_cmodes; i++){
if(cmode_index_X.data[i] != Y->inds[i].data[fidx_Y.data[j]]) break;
if(i == (num_cmodes - 1)){
fy_begin = fidx_Y.data[j];
fy_end = fidx_Y.data[j+1];
break;
}
//printf("\ni: %lu, current_idx: %lu, Y->inds[i].data[fidx_Y.data[current_idx]]: %lu\n", i, current_idx, Y->inds[i].data[fidx_Y.data[current_idx]]);
}
if (fy_begin != -1 || fy_end != -1) break;
}
if (tid == 0){
sptStopTimer(timer_SPA);
time_free_mode += sptElapsedTime(timer_SPA);
}
if (fy_begin == -1 || fy_end == -1) continue;
//printf("zX: %lu, valX: %.2f, cmode_index_X[0]: %u, zY: [%lu, %lu]\n", zX, valX, cmode_index_X.data[0], fy_begin, fy_end);
if (tid == 0){
sptStartTimer(timer_SPA);
}
/// zY has common contraction indices
char tmp[32];
char index_str[128];
long int tmp_key;
for(sptNnzIndex zY = fy_begin; zY < fy_end; ++ zY) { // Loop nnzs inside a Y fiber
for(sptIndex m = 0; m < nmodes_spa; ++m)
inds_buf.data[m] = Y->inds[m + num_cmodes].data[zY];
//printf("inds_buf:\n");
//sptDumpIndexVector(&inds_buf, stdout);
long int found = sptInIndexVector(spa_inds, nmodes_spa, spa_inds[0].len, &inds_buf);
if( found == -1) {
for(sptIndex m = 0; m < nmodes_spa; ++m)
sptAppendIndexVector(&spa_inds[m], Y->inds[m + num_cmodes].data[zY]);
sptAppendValueVector(&spa_vals, Y->values.data[zY] * valX);
} else {
spa_vals.data[found] += Y->values.data[zY] * valX;
}
} // End Loop nnzs inside a Y fiber
//printf("spa_inds:\n");
//for(sptIndex m = 0; m < nmodes_spa; ++m) {
// printf("[m%u]:\n", m);
// sptDumpIndexVector(&spa_inds[m], stdout);
//}
//printf("spa_vals:\n");
//sptDumpValueVector(&spa_vals, stdout);
if (tid == 0){
sptStopTimer(timer_SPA);
time_spa += sptElapsedTime(timer_SPA);
}
} // End Loop nnzs inside a X fiber
if (tid == 0){
sptStartTimer(timer_SPA);
}
/// Write back to Z
Z_tmp[tid].nnz += spa_vals.len;
for(sptIndex i = 0; i < spa_vals.len; ++i) {
for(sptIndex m = 0; m < nmodes_X - num_cmodes; ++m) {
sptAppendIndexVector(&Z_tmp[tid].inds[m], X->inds[m].data[fx_begin]);
}
}
for(sptIndex m = 0; m < nmodes_spa; ++m)
sptAppendIndexVectorWithVector(&Z_tmp[tid].inds[m + (nmodes_X - num_cmodes)], &spa_inds[m]);
sptAppendValueVectorWithVector(&Z_tmp[tid].values, &spa_vals);
//printf("Z:\n");
//sptDumpSparseTensor(&Z_tmp[tid], 0, stdout);
/// Free SPA buffer
for(sptIndex m = 0; m < nmodes_spa; ++m){
sptFreeIndexVector(&(spa_inds[m]));
}
sptFreeValueVector(&spa_vals);
if (tid == 0){
sptStopTimer(timer_SPA);
time_accumulate_z += sptElapsedTime(timer_SPA);
}
} // End Loop fiber pointers of X
sptStopTimer(timer);
double main_computation = sptElapsedTime(timer);
total_time += main_computation;
double spa_total = time_prep + time_free_mode + time_spa + time_accumulate_z;
printf("[Index Search]: %.6f s\n", (time_free_mode + time_prep)/spa_total * main_computation);
printf("[Accumulation]: %.6f s\n", (time_spa + time_accumulate_z)/spa_total * main_computation);
sptStartTimer(timer);
/// Append Z_tmp to Z
//Calculate the indecies of Z
unsigned long long* Z_tmp_start = (unsigned long long*) malloc( (tk + 1) * sizeof(unsigned long long));
unsigned long long Z_total_size = 0;
Z_tmp_start[0] = 0;
for(int i = 0; i < tk; i++){
Z_tmp_start[i + 1] = Z_tmp[i].nnz + Z_tmp_start[i];
Z_total_size += Z_tmp[i].nnz;
}
result = sptNewSparseTensorWithSize(Z, nmodes_Z, ndims_buf, Z_total_size);
#pragma omp parallel for schedule(static) num_threads(tk) shared(Z, nmodes_Z, Z_tmp_start)
for(int i = 0; i < tk; i++){
int tid = omp_get_thread_num();
if(Z_tmp[tid].nnz > 0){
for(sptIndex m = 0; m < nmodes_Z; ++m)
sptAppendIndexVectorWithVectorStartFromNuma(&Z->inds[m], &Z_tmp[tid].inds[m], Z_tmp_start[tid]);
sptAppendValueVectorWithVectorStartFromNuma(&Z->values, &Z_tmp[tid].values, Z_tmp_start[tid]);
}
}
sptStopTimer(timer);
total_time += sptPrintElapsedTime(timer, "Writeback");
sptStartTimer(timer);
sptSparseTensorSortIndex(Z, 1, tk);
sptStopTimer(timer);
total_time += sptPrintElapsedTime(timer, "Output Sorting");
printf("[Total time]: %.6f s\n", total_time);
printf("\n");
}
//1: COOY + HTA
if(experiment_modes == 1){
int result;
/// The number of threads
sptIndex nmodes_X = X->nmodes;
sptIndex nmodes_Y = Y->nmodes;
sptTimer timer;
double total_time = 0;
sptNewTimer(&timer, 0);
if(num_cmodes >= X->nmodes) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU SpTns * SpTns", "shape mismatch");
}
for(sptIndex m = 0; m < num_cmodes; ++m) {
if(X->ndims[cmodes_X[m]] != Y->ndims[cmodes_Y[m]]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU SpTns * SpTns", "shape mismatch");
}
}
sptStartTimer(timer);
/// Shuffle X indices and sort X as the order of free modes -> contract modes; mode_order also separate all the modes to free and contract modes separately.
sptIndex * mode_order_X = (sptIndex *)malloc(nmodes_X * sizeof(sptIndex));
sptIndex ci = nmodes_X - num_cmodes, fi = 0;
for(sptIndex m = 0; m < nmodes_X; ++m) {
if(sptInArray(cmodes_X, num_cmodes, m) == -1) {
mode_order_X[fi] = m;
++ fi;
}
}
sptAssert(fi == nmodes_X - num_cmodes);
/// Copy the contract modes while keeping the contraction mode order
for(sptIndex m = 0; m < num_cmodes; ++m) {
mode_order_X[ci] = cmodes_X[m];
++ ci;
}
sptAssert(ci == nmodes_X);
/// Shuffle tensor indices according to mode_order_X
sptSparseTensorShuffleModes(X, mode_order_X);
// printf("Permuted X:\n");
// sptAssert(sptDumpSparseTensor(X, 0, stdout) == 0);
for(sptIndex m = 0; m < nmodes_X; ++m) mode_order_X[m] = m; // reset mode_order
sptSparseTensorSortIndex(X, 1, tk);
sptStopTimer(timer);
double X_time = sptElapsedTime(timer);
total_time += X_time;
sptStartTimer(timer);
/// Shuffle Y indices and sort Y as the order of free modes -> contract modes
//sptAssert(sptDumpSparseTensor(Y, 0, stdout) == 0);
sptIndex * mode_order_Y = (sptIndex *)malloc(nmodes_Y * sizeof(sptIndex));
ci = 0;
fi = num_cmodes;
for(sptIndex m = 0; m < nmodes_Y; ++m) {
if(sptInArray(cmodes_Y, num_cmodes, m) == -1) { // m is not a contraction mode
mode_order_Y[fi] = m;
++ fi;
}
}
sptAssert(fi == nmodes_Y);
/// Copy the contract modes while keeping the contraction mode order
for(sptIndex m = 0; m < num_cmodes; ++m) {
mode_order_Y[ci] = cmodes_Y[m];
++ ci;
}
sptAssert(ci == num_cmodes);
/// Shuffle tensor indices according to mode_order_Y
sptSparseTensorShuffleModes(Y, mode_order_Y);
// printf("Permuted Y:\n");
for(sptIndex m = 0; m < nmodes_Y; ++m) mode_order_Y[m] = m; // reset mode_order
sptSparseTensorSortIndex(Y, 1, tk);
sptStopTimer(timer);
total_time += sptElapsedTime(timer);
printf("[Input Processing]: %.6f s\n", X_time + sptElapsedTime(timer));
//printf("Sorted X:\n");
//sptAssert(sptDumpSparseTensor(X, 0, stdout) == 0);
//printf("Sorted Y:\n");
//sptAssert(sptDumpSparseTensor(Y, 0, stdout) == 0);
/// Set fidx_X: indexing the combined free indices and fidx_Y: indexing the combined contract indices
sptNnzIndexVector fidx_X, fidx_Y;
//sptStartTimer(timer);
/// Set indices for free modes, use X
sptSparseTensorSetIndices(X, mode_order_X, nmodes_X - num_cmodes, &fidx_X);
/// Set indices for contract modes, use Y
sptSparseTensorSetIndices(Y, mode_order_Y, num_cmodes, &fidx_Y);
//sptStopTimer(timer);
//sptPrintElapsedTime(timer, "Set fidx X,Y");
//sptPrintElapsedTime(timer, "Set fidx X");
//printf("fidx_X: \n");
//sptDumpNnzIndexVector(&fidx_X, stdout);
//printf("fidx_Y: \n");
//sptDumpNnzIndexVector(&fidx_Y, stdout);
free(mode_order_X);
free(mode_order_Y);
/// Allocate the output tensor
sptIndex nmodes_Z = nmodes_X + nmodes_Y - 2 * num_cmodes;
sptIndex *ndims_buf = malloc(nmodes_Z * sizeof *ndims_buf);
spt_CheckOSError(!ndims_buf, "CPU SpTns * SpTns");
for(sptIndex m = 0; m < nmodes_X - num_cmodes; ++m) {
ndims_buf[m] = X->ndims[m];
}
for(sptIndex m = num_cmodes; m < nmodes_Y; ++m) {
ndims_buf[(m - num_cmodes) + nmodes_X - num_cmodes] = Y->ndims[m];
}
/// Each thread with a local Z_tmp
sptSparseTensor *Z_tmp = malloc(tk * sizeof (sptSparseTensor));
for (int i = 0; i < tk; i++){
result = sptNewSparseTensor(&(Z_tmp[i]), nmodes_Z, ndims_buf);
}
//free(ndims_buf);
spt_CheckError(result, "CPU SpTns * SpTns", NULL);
sptTimer timer_SPA;
double time_prep = 0;
double time_free_mode = 0;
double time_spa = 0;
double time_accumulate_z = 0;
sptNewTimer(&timer_SPA, 0);
sptStartTimer(timer);
// For the progress
int fx_counter = fidx_X.len;
#pragma omp parallel for schedule(static) num_threads(tk) shared(fidx_X, fidx_Y, nmodes_X, nmodes_Y, num_cmodes, Z_tmp, fx_counter)
for(sptNnzIndex fx_ptr = 0; fx_ptr < fidx_X.len - 1; ++fx_ptr) { // Loop fiber pointers of X
int tid = omp_get_thread_num();
//Print the progress
fx_counter--;
//if (fx_counter % 1 == 0) printf("Progress: %d\/%d\n", fx_counter, fidx_X.len);
if (tid == 0){
sptStartTimer(timer_SPA);
}
sptNnzIndex fx_begin = fidx_X.data[fx_ptr];
sptNnzIndex fx_end = fidx_X.data[fx_ptr+1];
sptIndex nmodes_spa = nmodes_Y - num_cmodes;
long int nnz_counter = 0;
/// Calculate key range for hashtable
sptIndex* inds_buf = (sptIndex*)malloc((nmodes_spa + 1) * sizeof(sptIndex));
sptIndex current_idx = 0;
for(sptIndex i = 0; i < nmodes_spa + 1; i++) inds_buf[i] = 1;
for(sptIndex i = 0; i < nmodes_spa;i++){
for(sptIndex j = i; j < nmodes_spa;j++)
inds_buf[i] = inds_buf[i] * Y->ndims[j + num_cmodes];
}
/// Create a hashtable for SPAs
table_t *ht;
const unsigned int ht_size = 10000;
ht = htCreate(ht_size);
if (tid == 0){
sptStopTimer(timer_SPA);
time_prep += sptElapsedTime(timer_SPA);
}
/// zX has common free indices
for(sptNnzIndex zX = fx_begin; zX < fx_end; ++ zX) { // Loop nnzs inside a X fiber
if (tid == 0){
sptStartTimer(timer_SPA);
}
sptValue valX = X->values.data[zX];
sptIndexVector cmode_index_X;
sptNewIndexVector(&cmode_index_X, num_cmodes, num_cmodes);
for(sptIndex i = 0; i < num_cmodes; ++i){
cmode_index_X.data[i] = X->inds[nmodes_X - num_cmodes + i].data[zX];
//printf("\ncmode_index_X[%lu]: %lu", i, cmode_index_X[i]);
}
sptNnzIndex fy_begin = -1;
sptNnzIndex fy_end = -1;
for(sptIndex j = 0; j < fidx_Y.len; j++){
for(sptIndex i = 0; i< num_cmodes; i++){
if(cmode_index_X.data[i] != Y->inds[i].data[fidx_Y.data[j]]) break;
if(i == (num_cmodes - 1)){
fy_begin = fidx_Y.data[j];
fy_end = fidx_Y.data[j+1];
break;
}
//printf("\ni: %lu, current_idx: %lu, Y->inds[i].data[fidx_Y.data[current_idx]]: %lu\n", i, current_idx, Y->inds[i].data[fidx_Y.data[current_idx]]);
}
if (fy_begin != -1 || fy_end != -1) break;
}
if (tid == 0){
sptStopTimer(timer_SPA);
time_free_mode += sptElapsedTime(timer_SPA);
}
if (fy_begin == -1 || fy_end == -1) continue;
//printf("zX: %lu, valX: %.2f, cmode_index_X[0]: %u, zY: [%lu, %lu]\n", zX, valX, cmode_index_X.data[0], fy_begin, fy_end);
if (tid == 0) sptStartTimer(timer_SPA);
/// zY has common contraction indices
for(sptNnzIndex zY = fy_begin; zY < fy_end; ++ zY) { // Loop nnzs inside a Y fiber
long int tmp_key = 0;
for(sptIndex m = 0; m < nmodes_spa; ++m)
tmp_key += Y->inds[m + num_cmodes].data[zY] * inds_buf[m + 1];
sptValue val = htGet(ht, tmp_key);
if(val == LONG_MIN)
htInsert(ht, tmp_key, Y->values.data[zY] * valX);
else
htUpdate(ht, tmp_key, val + (Y->values.data[zY] * valX));
//printf("val: %f\n", val);
}
if (tid == 0){
sptStopTimer(timer_SPA);
time_spa += sptElapsedTime(timer_SPA);
}
} // End Loop nnzs inside a X fiber
if (tid == 0){
sptStartTimer(timer_SPA);
}
/// Write back to Z
for(int i = 0; i < ht->size; i++){
node_t *temp = ht->list[i];
while(temp){
long int idx_tmp = temp->key;
nnz_counter++;
for(sptIndex m = 0; m < nmodes_spa; ++m) {
//printf("idx_tmp: %lu, m: %d, (idx_tmp inds_buf[m])/inds_buf[m+1]): %d\n", idx_tmp, m, (idx_tmp%inds_buf[m])/inds_buf[m+1]);
sptAppendIndexVector(&Z_tmp[tid].inds[m + (nmodes_X - num_cmodes)], (idx_tmp%inds_buf[m])/inds_buf[m+1]);
}
//printf("val: %f\n", temp->val);
sptAppendValueVector(&Z_tmp[tid].values, temp->val);
node_t* pre = temp;
temp = temp->next;
free(pre);
}
}
Z_tmp[tid].nnz += nnz_counter;
for(sptIndex i = 0; i < nnz_counter; ++i) {
for(sptIndex m = 0; m < nmodes_X - num_cmodes; ++m) {
sptAppendIndexVector(&Z_tmp[tid].inds[m], X->inds[m].data[fx_begin]);
}
}
// release spa hashtable
htFree(ht);
if (tid == 0){
sptStopTimer(timer_SPA);
time_accumulate_z += sptElapsedTime(timer_SPA);
}
} // End Loop fiber pointers of X
sptStopTimer(timer);
double main_computation = sptElapsedTime(timer);
total_time += main_computation;
double spa_total = time_prep + time_free_mode + time_spa + time_accumulate_z;
printf("[Index Search]: %.2f s\n", (time_free_mode + time_prep)/spa_total * main_computation);
printf("[Accumulation]: %.2f s\n", (time_spa + time_accumulate_z)/spa_total * main_computation);
sptStartTimer(timer);
/// Append Z_tmp to Z
//Calculate the indecies of Z
unsigned long long* Z_tmp_start = (unsigned long long*) malloc( (tk + 1) * sizeof(unsigned long long));
unsigned long long Z_total_size = 0;
Z_tmp_start[0] = 0;
for(int i = 0; i < tk; i++){
Z_tmp_start[i + 1] = Z_tmp[i].nnz + Z_tmp_start[i];
Z_total_size += Z_tmp[i].nnz;
}
result = sptNewSparseTensorWithSize(Z, nmodes_Z, ndims_buf, Z_total_size);
#pragma omp parallel for schedule(static) num_threads(tk) shared(Z, nmodes_Z, Z_tmp_start)
for(int i = 0; i < tk; i++){
int tid = omp_get_thread_num();
if(Z_tmp[tid].nnz > 0){
for(sptIndex m = 0; m < nmodes_Z; ++m)
sptAppendIndexVectorWithVectorStartFromNuma(&Z->inds[m], &Z_tmp[tid].inds[m], Z_tmp_start[tid]);
sptAppendValueVectorWithVectorStartFromNuma(&Z->values, &Z_tmp[tid].values, Z_tmp_start[tid]);
}
}
sptStopTimer(timer);
total_time += sptPrintElapsedTime(timer, "Writeback");
sptStartTimer(timer);
sptSparseTensorSortIndex(Z, 1, tk);
sptStopTimer(timer);
total_time += sptPrintElapsedTime(timer, "Output Sorting");
printf("[Total time]: %.6f s\n", total_time);
printf("\n");
}
//2: HTY + SPA
if(experiment_modes == 2){
int result;
/// The number of threads
sptIndex nmodes_X = X->nmodes;
sptIndex nmodes_Y = Y->nmodes;
sptTimer timer;
double total_time = 0;
sptNewTimer(&timer, 0);
if(num_cmodes >= X->nmodes) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU SpTns * SpTns", "shape mismatch");
}
for(sptIndex m = 0; m < num_cmodes; ++m) {
if(X->ndims[cmodes_X[m]] != Y->ndims[cmodes_Y[m]]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU SpTns * SpTns", "shape mismatch");
}
}
sptStartTimer(timer);
/// Shuffle X indices and sort X as the order of free modes -> contract modes; mode_order also separate all the modes to free and contract modes separately.
sptIndex * mode_order_X = (sptIndex *)malloc(nmodes_X * sizeof(sptIndex));
sptIndex ci = nmodes_X - num_cmodes, fi = 0;
for(sptIndex m = 0; m < nmodes_X; ++m) {
if(sptInArray(cmodes_X, num_cmodes, m) == -1) {
mode_order_X[fi] = m;
++ fi;
}
}
sptAssert(fi == nmodes_X - num_cmodes);
/// Copy the contract modes while keeping the contraction mode order
for(sptIndex m = 0; m < num_cmodes; ++m) {
mode_order_X[ci] = cmodes_X[m];
++ ci;
}
sptAssert(ci == nmodes_X);
/// Shuffle tensor indices according to mode_order_X
sptSparseTensorShuffleModes(X, mode_order_X);
// printf("Permuted X:\n");
// sptAssert(sptDumpSparseTensor(X, 0, stdout) == 0);
for(sptIndex m = 0; m < nmodes_X; ++m) mode_order_X[m] = m; // reset mode_order
// sptSparseTensorSortIndexCmode(X, 1, 1, 1, 2);
sptSparseTensorSortIndex(X, 1, tk);
sptStopTimer(timer);
double X_time = sptElapsedTime(timer);
total_time += X_time;
sptStartTimer(timer);
//sptAssert(sptDumpSparseTensor(Y, 0, stdout) == 0);
sptIndex * mode_order_Y = (sptIndex *)malloc(nmodes_Y * sizeof(sptIndex));
ci = 0;
fi = num_cmodes;
for(sptIndex m = 0; m < nmodes_Y; ++m) {
if(sptInArray(cmodes_Y, num_cmodes, m) == -1) { // m is not a contraction mode
mode_order_Y[fi] = m;
++ fi;
}
}
/// Copy the contract modes while keeping the contraction mode order
for(sptIndex m = 0; m < num_cmodes; ++m) {
mode_order_Y[ci] = cmodes_Y[m];
++ ci;
}
/// Convert Y into a hashtable
/// Create a hashtable
table_t *Y_ht;
unsigned int Y_ht_size = Y->nnz;
Y_ht = tensor_htCreate(Y_ht_size);
// omp lock
omp_lock_t *locks = (omp_lock_t *)malloc(Y_ht_size*sizeof(omp_lock_t));
for(size_t i = 0; i < Y_ht_size; i++) omp_init_lock(&locks[i]);
/// Calculate key range for Y hashtable
sptIndex* Y_cmode_inds = (sptIndex*)malloc((num_cmodes + 1) * sizeof(sptIndex));
for(sptIndex i = 0; i < num_cmodes + 1; i++) Y_cmode_inds[i] = 1;
for(sptIndex i = 0; i < num_cmodes;i++){
for(sptIndex j = i; j < num_cmodes;j++)
Y_cmode_inds[i] = Y_cmode_inds[i] * Y->ndims[mode_order_Y[j]];
}
sptIndex Y_num_fmodes = nmodes_Y - num_cmodes;
sptIndex* Y_fmode_inds = (sptIndex*)malloc((Y_num_fmodes + 1) * sizeof(sptIndex));
for(sptIndex i = 0; i < Y_num_fmodes + 1; i++) Y_fmode_inds[i] = 1;
for(sptIndex i = 0; i < Y_num_fmodes;i++){
for(sptIndex j = i; j < Y_num_fmodes;j++)
Y_fmode_inds[i] = Y_fmode_inds[i] * Y->ndims[mode_order_Y[j + num_cmodes]];
}
sptNnzIndex Y_nnz = Y->nnz;
#pragma omp parallel for schedule(static) num_threads(tk) shared(Y_ht, Y_num_fmodes, mode_order_Y, num_cmodes, Y_cmode_inds, Y_fmode_inds)
for(sptNnzIndex i = 0; i < Y_nnz; i++){
/// Contract modes of Y
unsigned long long key_cmodes = 0;
for(sptIndex m = 0; m < num_cmodes; ++m)
key_cmodes += Y->inds[mode_order_Y[m]].data[i] * Y_cmode_inds[m + 1];
/// Free modes of Y
unsigned long long key_fmodes = 0;
for(sptIndex m = 0; m < Y_num_fmodes; ++m)
key_fmodes += Y->inds[mode_order_Y[m+num_cmodes]].data[i] * Y_fmode_inds[m + 1];
unsigned pos = tensor_htHashCode(key_cmodes);
omp_set_lock(&locks[pos]);
tensor_value Y_val = tensor_htGet(Y_ht, key_cmodes);
//printf("Y_val.len: %d\n", Y_val.len);
if(Y_val.len == 0) {
tensor_htInsert(Y_ht, key_cmodes, key_fmodes, Y->values.data[i]);
}
else {
tensor_htUpdate(Y_ht, key_cmodes, key_fmodes, Y->values.data[i]);
//for(int i = 0; i < Y_val.len; i++)
// printf("key_FM: %lu, Y_val: %f\n", Y_val.key_FM[i], Y_val.val[i]);
}
omp_unset_lock(&locks[pos]);
//sprintf("i: %d, key_cmodes: %lu, key_fmodes: %lu\n", i, key_cmodes, key_fmodes);
}
// Release omp lock
for(size_t i = 0; i < Y_ht_size; i++) omp_destroy_lock(&locks[i]);
sptStopTimer(timer);
total_time += sptElapsedTime(timer);
printf("[Input Processing]: %.6f s\n", sptElapsedTime(timer) + X_time );
/// Set fidx_X: indexing the combined free indices
sptNnzIndexVector fidx_X;
/// Set indices for free modes, use X
sptSparseTensorSetIndices(X, mode_order_X, nmodes_X - num_cmodes, &fidx_X);
//printf("fidx_X: \n");
//sptDumpNnzIndexVector(&fidx_X, stdout);
/// Allocate the output tensor
sptIndex nmodes_Z = nmodes_X + nmodes_Y - 2 * num_cmodes;
sptIndex *ndims_buf = malloc(nmodes_Z * sizeof *ndims_buf);
spt_CheckOSError(!ndims_buf, "CPU SpTns * SpTns");
for(sptIndex m = 0; m < nmodes_X - num_cmodes; ++m) {
ndims_buf[m] = X->ndims[m];
}
/// For non-sorted Y
for(sptIndex m = num_cmodes; m < nmodes_Y; ++m) {
ndims_buf[(m - num_cmodes) + nmodes_X - num_cmodes] = Y->ndims[mode_order_Y[m]];
}
free(mode_order_X);
free(mode_order_Y);
/// Each thread with a local Z_tmp
sptSparseTensor *Z_tmp = malloc(tk * sizeof (sptSparseTensor));
for (int i = 0; i < tk; i++){
result = sptNewSparseTensor(&(Z_tmp[i]), nmodes_Z, ndims_buf);
}
//free(ndims_buf);
spt_CheckError(result, "CPU SpTns * SpTns", NULL);
sptTimer timer_SPA;
double time_prep = 0;
double time_free_mode = 0;
double time_spa = 0;
double time_accumulate_z = 0;
sptNewTimer(&timer_SPA, 0);
sptStartTimer(timer);
// For the progress
int fx_counter = fidx_X.len;
#pragma omp parallel for schedule(static) num_threads(tk) shared(fidx_X, nmodes_X, nmodes_Y, num_cmodes, Y_fmode_inds, Y_ht, Y_cmode_inds, fx_counter)
for(sptNnzIndex fx_ptr = 0; fx_ptr < fidx_X.len - 1; ++fx_ptr) { // Loop fiber pointers of X
int tid = omp_get_thread_num();
//Print the progress
fx_counter--;
//if (fx_counter % 100 == 0) printf("Progress: %d\/%d\n", fx_counter, fidx_X.len);
sptNnzIndex fx_begin = fidx_X.data[fx_ptr];
sptNnzIndex fx_end = fidx_X.data[fx_ptr+1];
if (tid == 0){
sptStartTimer(timer_SPA);
}
/// Allocate the SPA buffer
sptIndex nmodes_spa = nmodes_Y - num_cmodes;
sptIndexVector * spa_inds = (sptIndexVector*)malloc(nmodes_spa * sizeof(sptIndexVector));
sptValueVector spa_vals;
for(sptIndex m = 0; m < nmodes_spa; ++m)
sptNewIndexVector(&spa_inds[m], 0, 0);
sptNewValueVector(&spa_vals, 0, 0);
/// Allocate a small index buffer
sptIndexVector inds_buf;
sptNewIndexVector(&inds_buf, (nmodes_Y - num_cmodes), (nmodes_Y - num_cmodes));
//printf("\nzX: [%lu, %lu]\n", fx_begin, fx_end);
if (tid == 0){
sptStopTimer(timer_SPA);
time_prep += sptElapsedTime(timer_SPA);
}
/// zX has common free indices
for(sptNnzIndex zX = fx_begin; zX < fx_end; ++ zX) { // Loop nnzs inside a X fiber
if (tid == 0) {
sptStartTimer(timer_SPA);
}
sptValue valX = X->values.data[zX];
sptIndexVector cmode_index_X;
sptNewIndexVector(&cmode_index_X, num_cmodes, num_cmodes);
for(sptIndex i = 0; i < num_cmodes; ++i){
cmode_index_X.data[i] = X->inds[nmodes_X - num_cmodes + i].data[zX];
//printf("\ncmode_index_X[%lu]: %lu\n", i, cmode_index_X.data[i]);
}
unsigned long long key_cmodes = 0;
for(sptIndex m = 0; m < num_cmodes; ++m)
key_cmodes += cmode_index_X.data[m] * Y_cmode_inds[m + 1];
//printf("key_cmodes: %d\n", key_cmodes);
tensor_value Y_val = tensor_htGet(Y_ht, key_cmodes);
//printf("Y_val.len: %d\n", Y_val.len);
unsigned int my_len = Y_val.len;
if (tid == 0){
sptStopTimer(timer_SPA);
time_free_mode += sptElapsedTime(timer_SPA);
}
if(my_len == 0) continue;
if (tid == 0) sptStartTimer(timer_SPA);
for(int i = 0; i < my_len; i++){
unsigned long long fmode = Y_val.key_FM[i];
float result = Y_val.val[i] * valX;
for(sptIndex m = 0; m < nmodes_spa; ++m)
inds_buf.data[m] = (fmode%Y_fmode_inds[m])/Y_fmode_inds[m+1];
//printf("inds_buf:\n");
//sptDumpIndexVector(&inds_buf, stdout);
long int found = sptInIndexVector(spa_inds, nmodes_spa, spa_inds[0].len, &inds_buf);
if( found == -1) {
for(sptIndex m = 0; m < nmodes_spa; ++m)
sptAppendIndexVector(&spa_inds[m], (fmode%Y_fmode_inds[m])/Y_fmode_inds[m+1]);
sptAppendValueVector(&spa_vals, result);
} else {
spa_vals.data[found] += result;
}
}
if (tid == 0){
sptStopTimer(timer_SPA);
time_spa += sptElapsedTime(timer_SPA);
}
} // End Loop nnzs inside a X fiber
if (tid == 0) sptStartTimer(timer_SPA);
/// Write back to Z
Z_tmp[tid].nnz += spa_vals.len;
for(sptIndex i = 0; i < spa_vals.len; ++i) {
for(sptIndex m = 0; m < nmodes_X - num_cmodes; ++m) {
sptAppendIndexVector(&Z_tmp[tid].inds[m], X->inds[m].data[fx_begin]);
}
}
for(sptIndex m = 0; m < nmodes_spa; ++m)
sptAppendIndexVectorWithVector(&Z_tmp[tid].inds[m + (nmodes_X - num_cmodes)], &spa_inds[m]);
sptAppendValueVectorWithVector(&Z_tmp[tid].values, &spa_vals);
//printf("Z:\n");
//sptDumpSparseTensor(&Z_tmp[tid], 0, stdout);
/// Free SPA buffer
for(sptIndex m = 0; m < nmodes_spa; ++m){
sptFreeIndexVector(&(spa_inds[m]));
}
sptFreeValueVector(&spa_vals);
if (tid == 0){
sptStopTimer(timer_SPA);
time_accumulate_z += sptElapsedTime(timer_SPA);
}
}
sptStopTimer(timer);
double main_computation = sptElapsedTime(timer);
total_time += main_computation;
double spa_total = time_prep + time_free_mode + time_spa + time_accumulate_z;
printf("[Index Search]: %.6f s\n", (time_free_mode + time_prep)/spa_total * main_computation);
printf("[Accumulation]: %.6f s\n", (time_spa + time_accumulate_z)/spa_total * main_computation);
sptStartTimer(timer);
/// Append Z_tmp to Z
//Calculate the indecies of Z
unsigned long long* Z_tmp_start = (unsigned long long*) malloc( (tk + 1) * sizeof(unsigned long long));
unsigned long long Z_total_size = 0;
Z_tmp_start[0] = 0;
for(int i = 0; i < tk; i++){
Z_tmp_start[i + 1] = Z_tmp[i].nnz + Z_tmp_start[i];
Z_total_size += Z_tmp[i].nnz;
//printf("Z_tmp_start[i + 1]: %lu, i: %d\n", Z_tmp_start[i + 1], i);
}
//printf("%d\n", Z_total_size);
result = sptNewSparseTensorWithSize(Z, nmodes_Z, ndims_buf, Z_total_size);
#pragma omp parallel for schedule(static) num_threads(tk) shared(Z, nmodes_Z, Z_tmp_start)
for(int i = 0; i < tk; i++){
int tid = omp_get_thread_num();
if(Z_tmp[tid].nnz > 0){
for(sptIndex m = 0; m < nmodes_Z; ++m)
sptAppendIndexVectorWithVectorStartFromNuma(&Z->inds[m], &Z_tmp[tid].inds[m], Z_tmp_start[tid]);
sptAppendValueVectorWithVectorStartFromNuma(&Z->values, &Z_tmp[tid].values, Z_tmp_start[tid]);
//sptDumpSparseTensor(&Z_tmp[tid], 0, stdout);
}
}
// for(int i = 0; i < tk; i++)
// sptFreeSparseTensor(&Z_tmp[i]);
sptStopTimer(timer);
total_time += sptPrintElapsedTime(timer, "Writeback");
sptStartTimer(timer);
sptSparseTensorSortIndex(Z, 1, tk);
sptStopTimer(timer);
total_time += sptPrintElapsedTime(timer, "Output Sorting");
printf("[Total time]: %.6f s\n", total_time);
printf("\n");
//sptFreeTimer(timer);
//sptFreeNnzIndexVector(&fidx_X);
return 0;
}
//3: HTY + HTA
if(experiment_modes == 3){
int result;
sptIndex nmodes_X = X->nmodes;
sptIndex nmodes_Y = Y->nmodes;
sptTimer timer;
double total_time = 0;
sptNewTimer(&timer, 0);
if(num_cmodes >= X->nmodes) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU SpTns * SpTns", "shape mismatch");
}
for(sptIndex m = 0; m < num_cmodes; ++m) {
if(X->ndims[cmodes_X[m]] != Y->ndims[cmodes_Y[m]]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU SpTns * SpTns", "shape mismatch");
}
}
sptStartTimer(timer);
/// Shuffle X indices and sort X as the order of free modes -> contract modes; mode_order also separate all the modes to free and contract modes separately.
sptIndex * mode_order_X = (sptIndex *)malloc(nmodes_X * sizeof(sptIndex));
sptIndex ci = nmodes_X - num_cmodes, fi = 0;
for(sptIndex m = 0; m < nmodes_X; ++m) {
if(sptInArray(cmodes_X, num_cmodes, m) == -1) {
mode_order_X[fi] = m;
++ fi;
}
}
sptAssert(fi == nmodes_X - num_cmodes);
/// Copy the contract modes while keeping the contraction mode order
for(sptIndex m = 0; m < num_cmodes; ++m) {
mode_order_X[ci] = cmodes_X[m];
++ ci;
}
sptAssert(ci == nmodes_X);
/// Shuffle tensor indices according to mode_order_X
sptSparseTensorShuffleModes(X, mode_order_X);
// printf("Permuted X:\n");
// sptAssert(sptDumpSparseTensor(X, 0, stdout) == 0);
for(sptIndex m = 0; m < nmodes_X; ++m) mode_order_X[m] = m; // reset mode_order
// sptSparseTensorSortIndexCmode(X, 1, 1, 1, 2);
sptSparseTensorSortIndex(X, 1, tk);
sptStopTimer(timer);
//total_time += sptPrintElapsedTime(timer, "Sort X");
double X_time = sptElapsedTime(timer);
total_time += X_time;
sptStartTimer(timer);
//sptAssert(sptDumpSparseTensor(Y, 0, stdout) == 0);
sptIndex * mode_order_Y = (sptIndex *)malloc(nmodes_Y * sizeof(sptIndex));
ci = 0;
fi = num_cmodes;
for(sptIndex m = 0; m < nmodes_Y; ++m) {
if(sptInArray(cmodes_Y, num_cmodes, m) == -1) { // m is not a contraction mode
mode_order_Y[fi] = m;
++ fi;
}
}
/// Copy the contract modes while keeping the contraction mode order
for(sptIndex m = 0; m < num_cmodes; ++m) {
mode_order_Y[ci] = cmodes_Y[m];
++ ci;
}
table_t *Y_ht;
unsigned int Y_ht_size = Y->nnz;
Y_ht = tensor_htCreate(Y_ht_size);
omp_lock_t *locks = (omp_lock_t *)malloc(Y_ht_size*sizeof(omp_lock_t));
for(size_t i = 0; i < Y_ht_size; i++) omp_init_lock(&locks[i]);
sptIndex* Y_cmode_inds = (sptIndex*)malloc((num_cmodes + 1) * sizeof(sptIndex));
for(sptIndex i = 0; i < num_cmodes + 1; i++) Y_cmode_inds[i] = 1;
for(sptIndex i = 0; i < num_cmodes;i++){
for(sptIndex j = i; j < num_cmodes;j++)
Y_cmode_inds[i] = Y_cmode_inds[i] * Y->ndims[mode_order_Y[j]];
}
sptIndex Y_num_fmodes = nmodes_Y - num_cmodes;
sptIndex* Y_fmode_inds = (sptIndex*)malloc((Y_num_fmodes + 1) * sizeof(sptIndex));
for(sptIndex i = 0; i < Y_num_fmodes + 1; i++) Y_fmode_inds[i] = 1;
for(sptIndex i = 0; i < Y_num_fmodes;i++){
for(sptIndex j = i; j < Y_num_fmodes;j++)
Y_fmode_inds[i] = Y_fmode_inds[i] * Y->ndims[mode_order_Y[j + num_cmodes]];
}
sptNnzIndex Y_nnz = Y->nnz;
#pragma omp parallel for schedule(static) num_threads(tk) shared(Y_ht, Y_num_fmodes, mode_order_Y, num_cmodes, Y_cmode_inds, Y_fmode_inds)
for(sptNnzIndex i = 0; i < Y_nnz; i++){
//if (Y->values.data[i] <0.00000001) continue;
unsigned long long key_cmodes = 0;
for(sptIndex m = 0; m < num_cmodes; ++m)
key_cmodes += Y->inds[mode_order_Y[m]].data[i] * Y_cmode_inds[m + 1];
unsigned long long key_fmodes = 0;
for(sptIndex m = 0; m < Y_num_fmodes; ++m)
key_fmodes += Y->inds[mode_order_Y[m+num_cmodes]].data[i] * Y_fmode_inds[m + 1];
unsigned pos = tensor_htHashCode(key_cmodes);
omp_set_lock(&locks[pos]);
tensor_value Y_val = tensor_htGet(Y_ht, key_cmodes);
//printf("Y_val.len: %d\n", Y_val.len);
if(Y_val.len == 0) {
tensor_htInsert(Y_ht, key_cmodes, key_fmodes, Y->values.data[i]);
}
else {
tensor_htUpdate(Y_ht, key_cmodes, key_fmodes, Y->values.data[i]);
//for(int i = 0; i < Y_val.len; i++)
// printf("key_FM: %lu, Y_val: %f\n", Y_val.key_FM[i], Y_val.val[i]);
}
omp_unset_lock(&locks[pos]);
//sprintf("i: %d, key_cmodes: %lu, key_fmodes: %lu\n", i, key_cmodes, key_fmodes);
}
for(size_t i = 0; i < Y_ht_size; i++) omp_destroy_lock(&locks[i]);
sptStopTimer(timer);
total_time += sptElapsedTime(timer);
printf("[Input Processing]: %.6f s\n", sptElapsedTime(timer) + X_time);
sptNnzIndexVector fidx_X;
/// Set indices for free modes, use X
sptSparseTensorSetIndices(X, mode_order_X, nmodes_X - num_cmodes, &fidx_X);
//printf("fidx_X: \n");
//sptDumpNnzIndexVector(&fidx_X, stdout);
/// Allocate the output tensor
sptIndex nmodes_Z = nmodes_X + nmodes_Y - 2 * num_cmodes;
sptIndex *ndims_buf = malloc(nmodes_Z * sizeof *ndims_buf);
spt_CheckOSError(!ndims_buf, "CPU SpTns * SpTns");
for(sptIndex m = 0; m < nmodes_X - num_cmodes; ++m) {
ndims_buf[m] = X->ndims[m];
}
/// For non-sorted Y
for(sptIndex m = num_cmodes; m < nmodes_Y; ++m) {
ndims_buf[(m - num_cmodes) + nmodes_X - num_cmodes] = Y->ndims[mode_order_Y[m]];
}
free(mode_order_X);
free(mode_order_Y);
/// Each thread with a local Z_tmp
sptSparseTensor *Z_tmp = malloc(tk * sizeof (sptSparseTensor));
for (int i = 0; i < tk; i++){
result = sptNewSparseTensor(&(Z_tmp[i]), nmodes_Z, ndims_buf);
}
//free(ndims_buf);
spt_CheckError(result, "CPU SpTns * SpTns", NULL);
sptTimer timer_SPA;
double time_prep = 0;
double time_free_mode = 0;
double time_spa = 0;
double time_accumulate_z = 0;
sptNewTimer(&timer_SPA, 0);
sptStartTimer(timer);
// For the progress
int fx_counter = fidx_X.len;
#pragma omp parallel for schedule(static) num_threads(tk) shared(fidx_X, nmodes_X, nmodes_Y, num_cmodes, Y_fmode_inds, Y_ht, Y_cmode_inds)
for(sptNnzIndex fx_ptr = 0; fx_ptr < fidx_X.len - 1; ++fx_ptr) { // Loop fiber pointers of X
int tid = omp_get_thread_num();
fx_counter--;
//if (fx_counter % 100 == 0) printf("Progress: %d\/%d\n", fx_counter, fidx_X.len);
if (tid == 0){
sptStartTimer(timer_SPA);
}
sptNnzIndex fx_begin = fidx_X.data[fx_ptr];
sptNnzIndex fx_end = fidx_X.data[fx_ptr+1];
/// hashtable size
const unsigned int ht_size = 10000;
sptIndex nmodes_spa = nmodes_Y - num_cmodes;
long int nnz_counter = 0;
sptIndex current_idx = 0;
table_t *ht;
ht = htCreate(ht_size);
if (tid == 0){
sptStopTimer(timer_SPA);
time_prep += sptElapsedTime(timer_SPA);
}
for(sptNnzIndex zX = fx_begin; zX < fx_end; ++ zX) {
sptValue valX = X->values.data[zX];
if (tid == 0) {
sptStartTimer(timer_SPA);
}
sptIndexVector cmode_index_X;
sptNewIndexVector(&cmode_index_X, num_cmodes, num_cmodes);
for(sptIndex i = 0; i < num_cmodes; ++i){
cmode_index_X.data[i] = X->inds[nmodes_X - num_cmodes + i].data[zX];
//printf("\ncmode_index_X[%lu]: %lu\n", i, cmode_index_X.data[i]);
}
unsigned long long key_cmodes = 0;
for(sptIndex m = 0; m < num_cmodes; ++m)
key_cmodes += cmode_index_X.data[m] * Y_cmode_inds[m + 1];
tensor_value Y_val = tensor_htGet(Y_ht, key_cmodes);
//printf("Y_val.len: %d\n", Y_val.len);
unsigned int my_len = Y_val.len;
if (tid == 0){
sptStopTimer(timer_SPA);
time_free_mode += sptElapsedTime(timer_SPA);
}
if(my_len == 0) continue;
if (tid == 0) {
sptStartTimer(timer_SPA);
}
for(int i = 0; i < my_len; i++){
unsigned long long fmode = Y_val.key_FM[i];
//printf("i: %d, Y_val.key_FM[i]: %lu, Y_val.val[i]: %f\n", i, Y_val.key_FM[i], Y_val.val[i]);
sptValue spa_val = htGet(ht, fmode);
float result = Y_val.val[i] * valX;
if(spa_val == LONG_MIN) {
htInsert(ht, fmode, result);
nnz_counter++;
}
else
htUpdate(ht, fmode, spa_val + result);
}
if (tid == 0){
sptStopTimer(timer_SPA);
time_spa += sptElapsedTime(timer_SPA);
}
} // End Loop nnzs inside a X fiber
if (tid == 0) {
sptStartTimer(timer_SPA);
}
for(int i = 0; i < ht->size; i++){
node_t *temp = ht->list[i];
while(temp){
unsigned long long idx_tmp = temp->key;
//nnz_counter++;
for(sptIndex m = 0; m < nmodes_spa; ++m) {
//printf("idx_tmp: %lu, m: %d, (idx_tmp inds_buf[m])/inds_buf[m+1]): %d\n", idx_tmp, m, (idx_tmp%inds_buf[m])/inds_buf[m+1]);
sptAppendIndexVector(&Z_tmp[tid].inds[m + (nmodes_X - num_cmodes)], (idx_tmp%Y_fmode_inds[m])/Y_fmode_inds[m+1]);
}
//printf("val: %f\n", temp->val);
sptAppendValueVector(&Z_tmp[tid].values, temp->val);
node_t* pre = temp;
temp = temp->next;
free(pre);
}
}
Z_tmp[tid].nnz += nnz_counter;
for(sptIndex i = 0; i < nnz_counter; ++i) {
for(sptIndex m = 0; m < nmodes_X - num_cmodes; ++m) {
sptAppendIndexVector(&Z_tmp[tid].inds[m], X->inds[m].data[fx_begin]);
}
}
htFree(ht);
if (tid == 0){
sptStopTimer(timer_SPA);
time_accumulate_z += sptElapsedTime(timer_SPA);
}
}
sptStopTimer(timer);
double main_computation = sptElapsedTime(timer);
total_time += main_computation;
double spa_total = time_prep + time_free_mode + time_spa + time_accumulate_z;
printf("[Index Search]: %.6f s\n", (time_free_mode + time_prep)/spa_total * main_computation);
printf("[Accumulation]: %.6f s\n", (time_spa + time_accumulate_z)/spa_total * main_computation);
sptStartTimer(timer);
/// Append Z_tmp to Z
//Calculate the indecies of Z
unsigned long long* Z_tmp_start = (unsigned long long*) malloc( (tk + 1) * sizeof(unsigned long long));
unsigned long long Z_total_size = 0;
Z_tmp_start[0] = 0;
for(int i = 0; i < tk; i++){
Z_tmp_start[i + 1] = Z_tmp[i].nnz + Z_tmp_start[i];
Z_total_size += Z_tmp[i].nnz;
//printf("Z_tmp_start[i + 1]: %lu, i: %d\n", Z_tmp_start[i + 1], i);
}
//printf("%d\n", Z_total_size);
result = sptNewSparseTensorWithSize(Z, nmodes_Z, ndims_buf, Z_total_size);
#pragma omp parallel for schedule(static) num_threads(tk) shared(Z, nmodes_Z, Z_tmp_start)
for(int i = 0; i < tk; i++){
int tid = omp_get_thread_num();
if(Z_tmp[tid].nnz > 0){
for(sptIndex m = 0; m < nmodes_Z; ++m)
sptAppendIndexVectorWithVectorStartFromNuma(&Z->inds[m], &Z_tmp[tid].inds[m], Z_tmp_start[tid]);
sptAppendValueVectorWithVectorStartFromNuma(&Z->values, &Z_tmp[tid].values, Z_tmp_start[tid]);
//sptDumpSparseTensor(&Z_tmp[tid], 0, stdout);
}
}
// for(int i = 0; i < tk; i++)
// sptFreeSparseTensor(&Z_tmp[i]);
sptStopTimer(timer);
total_time += sptPrintElapsedTime(timer, "Writeback");
sptStartTimer(timer);
if(output_sorting == 1){
sptSparseTensorSortIndex(Z, 1, tk);
}
sptStopTimer(timer);
total_time += sptPrintElapsedTime(timer, "Output Sorting");
printf("[Total time]: %.6f s\n", total_time);
printf("\n");
}
//4: HTY + HTA on HM
if(experiment_modes == 4){
int result;
int dram_node;
int optane_node;
sscanf(getenv("DRAM_NODE"), "%d", &dram_node);
sscanf(getenv("OPTANE_NODE"), "%d", &optane_node);
int numa_node = dram_node;
sptIndex nmodes_X = X->nmodes;
sptIndex nmodes_Y = Y->nmodes;
sptTimer timer;
double total_time = 0;
sptNewTimer(&timer, 0);
if(num_cmodes >= X->nmodes) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU SpTns * SpTns", "shape mismatch");
}
for(sptIndex m = 0; m < num_cmodes; ++m) {
if(X->ndims[cmodes_X[m]] != Y->ndims[cmodes_Y[m]]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU SpTns * SpTns", "shape mismatch");
}
}
sptStartTimer(timer);
sptIndex * mode_order_X = (sptIndex *)malloc(nmodes_X * sizeof(sptIndex));
sptIndex ci = nmodes_X - num_cmodes, fi = 0;
for(sptIndex m = 0; m < nmodes_X; ++m) {
if(sptInArray(cmodes_X, num_cmodes, m) == -1) {
mode_order_X[fi] = m;
++ fi;
}
}
sptAssert(fi == nmodes_X - num_cmodes);
/// Copy the contract modes while keeping the contraction mode order
for(sptIndex m = 0; m < num_cmodes; ++m) {
mode_order_X[ci] = cmodes_X[m];
++ ci;
}
sptAssert(ci == nmodes_X);
/// Shuffle tensor indices according to mode_order_X
sptSparseTensorShuffleModes(X, mode_order_X);
for(sptIndex m = 0; m < nmodes_X; ++m) mode_order_X[m] = m; // reset mode_order
sptSparseTensorSortIndex(X, 1, tk);
sptStopTimer(timer);
double X_time = sptElapsedTime(timer);
total_time += X_time;
sptStartTimer(timer);
unsigned long long tmp_dram_size = 0;
FILE *fp;
char *s;
char path[1035];
unsigned long long i1, i2, i3, i4, i5, i6, i7, i8;
fp = popen("numactl -H", "r");
while (fgets(path, sizeof(path), fp) != NULL) {
s = strstr(path, "node 0 free:");
if (s != NULL)
if (2 == sscanf(s, "%*[^0123456789]%llu%*[^0123456789]%llu", &i1, &i2)){
tmp_dram_size = i2 * 1024 * 1024;
//printf("test: %llu B\n", dram_cap);
break;
}
}
pclose(fp);
unsigned int node_size = sizeof(unsigned long long) + sizeof(unsigned int) + sizeof(unsigned int) + sizeof(unsigned long long*) + sizeof(sptValue*) + sizeof(tensor_node_t*);
unsigned long long Y_upper_size = node_size * (Y->nnz + Y->nnz);
//printf("%lu\n", Y_upper_size);
if (Y_upper_size < tmp_dram_size) numa_set_preferred(dram_node);
else numa_set_preferred(numa_node);
//sptAssert(sptDumpSparseTensor(Y, 0, stdout) == 0);
sptIndex * mode_order_Y = (sptIndex *)malloc(nmodes_Y * sizeof(sptIndex));
ci = 0;
fi = num_cmodes;
for(sptIndex m = 0; m < nmodes_Y; ++m) {
if(sptInArray(cmodes_Y, num_cmodes, m) == -1) {
mode_order_Y[fi] = m;
++ fi;
}
}
sptAssert(fi == nmodes_Y);
for(sptIndex m = 0; m < num_cmodes; ++m) {
mode_order_Y[ci] = cmodes_Y[m];
++ ci;
}
sptAssert(ci == num_cmodes);
//for(sptIndex m = 0; m < nmodes_Y; ++m)
// printf ("mode_order_Y[m]: %d\n", mode_order_Y[m]);
table_t *Y_ht;
unsigned int Y_ht_size = Y->nnz;
Y_ht = tensor_htCreate(Y_ht_size);
omp_lock_t *locks = (omp_lock_t *)malloc(Y_ht_size*sizeof(omp_lock_t));
for(size_t i = 0; i < Y_ht_size; i++) {
omp_init_lock(&locks[i]);
}
sptIndex* Y_cmode_inds = (sptIndex*)malloc((num_cmodes + 1) * sizeof(sptIndex));
for(sptIndex i = 0; i < num_cmodes + 1; i++) Y_cmode_inds[i] = 1;
for(sptIndex i = 0; i < num_cmodes;i++){
for(sptIndex j = i; j < num_cmodes;j++)
Y_cmode_inds[i] = Y_cmode_inds[i] * Y->ndims[mode_order_Y[j]];
}
//for(sptIndex i = 0; i <= num_cmodes;i++)
// printf("%d ", Y_cmode_inds[i]);
//printf("\n");
sptIndex Y_num_fmodes = nmodes_Y - num_cmodes;
sptIndex* Y_fmode_inds = (sptIndex*)malloc((Y_num_fmodes + 1) * sizeof(sptIndex));
//sptIndex* Y_fmode_inds = (sptIndex*) numa_alloc_onnode((Y_num_fmodes + 1) * sizeof(sptIndex), numa_node);
for(sptIndex i = 0; i < Y_num_fmodes + 1; i++) Y_fmode_inds[i] = 1;
for(sptIndex i = 0; i < Y_num_fmodes;i++){
for(sptIndex j = i; j < Y_num_fmodes;j++)
Y_fmode_inds[i] = Y_fmode_inds[i] * Y->ndims[mode_order_Y[j + num_cmodes]];
}
//for(sptIndex i = 0; i <= Y_num_fmodes;i++)
// printf("%d ", Y_fmode_inds[i]);
//printf("\n");
sptNnzIndex Y_nnz = Y->nnz;
unsigned int Y_free_upper = 0;
#pragma omp parallel for schedule(static) num_threads(tk) shared(Y_ht, Y_num_fmodes, mode_order_Y, num_cmodes, Y_cmode_inds, Y_fmode_inds)
for(sptNnzIndex i = 0; i < Y_nnz; i++){
unsigned long long key_cmodes = 0;
for(sptIndex m = 0; m < num_cmodes; ++m)
key_cmodes += Y->inds[mode_order_Y[m]].data[i] * Y_cmode_inds[m + 1];
unsigned long long key_fmodes = 0;
for(sptIndex m = 0; m < Y_num_fmodes; ++m)
key_fmodes += Y->inds[mode_order_Y[m+num_cmodes]].data[i] * Y_fmode_inds[m + 1];
unsigned pos = tensor_htHashCode(key_cmodes);
omp_set_lock(&locks[pos]);
tensor_value Y_val = tensor_htGet(Y_ht, key_cmodes);
//printf("Y_val.len: %d\n", Y_val.len);
unsigned int Y_len = Y_val.len;
if(Y_len == 0) {
tensor_htInsert(Y_ht, key_cmodes, key_fmodes, Y->values.data[i]);
}
else {
tensor_htUpdate(Y_ht, key_cmodes, key_fmodes, Y->values.data[i]);
if (Y_len >= Y_free_upper) Y_free_upper = Y_len + 1;
//for(int i = 0; i < Y_val.len; i++)
// printf("key_FM: %lu, Y_val: %f\n", Y_val.key_FM[i], Y_val.val[i]);
}
omp_unset_lock(&locks[pos]);
//sprintf("i: %d, key_cmodes: %lu, key_fmodes: %lu\n", i, key_cmodes, key_fmodes);
}
for(size_t i = 0; i < Y_ht_size; i++) {
omp_destroy_lock(&locks[i]);
}
sptStopTimer(timer);
total_time += sptElapsedTime(timer);
printf("[Input Processing]: %.2f s\n", sptElapsedTime(timer) + X_time );
sptStartTimer(timer);
//printf("Sorted X:\n");
//sptSparseTensorStatus(X, stdout);
//sptAssert(sptDumpSparseTensor(X, 0, stdout) == 0);
//printf("Sorted Y:\n");
//sptSparseTensorStatus(Y, stdout);
//sptAssert(sptDumpSparseTensor(Y, 0, stdout) == 0);
/// Set fidx_X: indexing the combined free indices;
sptNnzIndexVector fidx_X;
//sptStartTimer(timer);
/// Set indices for free modes, use X
sptSparseTensorSetIndices(X, mode_order_X, nmodes_X - num_cmodes, &fidx_X);
sptIndex nmodes_Z = nmodes_X + nmodes_Y - 2 * num_cmodes;
sptIndex *ndims_buf = malloc(nmodes_Z * sizeof *ndims_buf);
spt_CheckOSError(!ndims_buf, "CPU SpTns * SpTns");
for(sptIndex m = 0; m < nmodes_X - num_cmodes; ++m) {
ndims_buf[m] = X->ndims[m];
}
/// For sorted Y
//for(sptIndex m = num_cmodes; m < nmodes_Y; ++m) {
// ndims_buf[(m - num_cmodes) + nmodes_X - num_cmodes] = Y->ndims[m];
//}
/// For non-sorted Y
for(sptIndex m = num_cmodes; m < nmodes_Y; ++m) {
ndims_buf[(m - num_cmodes) + nmodes_X - num_cmodes] = Y->ndims[mode_order_Y[m]];
}
free(mode_order_X);
free(mode_order_Y);
// sptSparseTensor *Z_tmp = malloc(tk * sizeof (sptSparseTensor));
sptSparseTensor *Z_tmp_dram = numa_alloc_onnode(tk * sizeof (sptSparseTensor), dram_node);
sptSparseTensor *Z_tmp_optane = numa_alloc_onnode(tk * sizeof (sptSparseTensor), optane_node);
for (int i = 0; i < tk; i++){
//result = sptNewSparseTensor(&(Z_tmp[i]), nmodes_Z, ndims_buf);
result = sptNewSparseTensorNuma(&(Z_tmp_dram[i]), nmodes_Z, ndims_buf, dram_node);
result = sptNewSparseTensorNuma(&(Z_tmp_optane[i]), nmodes_Z, ndims_buf, optane_node);
}
//free(ndims_buf);
spt_CheckError(result, "CPU SpTns * SpTns", NULL);
unsigned long long dram_cur = 0;
unsigned long long dram_cap = 0;
unsigned long long Z_mem = 0;
fp = popen("numactl -H", "r"); // Open the command for reading
while (fgets(path, sizeof(path), fp) != NULL) { // Read the output a line at a time - output it.
s = strstr(path, "node 0 free:"); // Search for string "hassasin" in buff
if (s != NULL) // If successful then s now points at "hassasin"
if (2 == sscanf(s, "%*[^0123456789]%llu%*[^0123456789]%llu", &i1, &i2)){
//printf("System DRAM memory: %lu MB\n", i2);
dram_cap = i2 * 1024 * 1024 / 1.1; // Should be changed into: memory of the current system - X - Y_ht
//printf("test: %llu B\n", dram_cap);
break;
}
}
pclose(fp);
sptTimer timer_SPA;
double time_prep = 0;
double time_free_mode = 0;
double time_spa = 0;
double time_accumulate_z = 0;
sptNewTimer(&timer_SPA, 0);
// For the progress
int fx_counter = fidx_X.len;
#pragma omp parallel for schedule(static) num_threads(tk) shared(fidx_X, nmodes_X, nmodes_Y, num_cmodes, Z_tmp_dram, Z_tmp_optane, Y_fmode_inds, Y_ht, Y_cmode_inds, dram_cap, dram_cur, Z_mem, fx_counter)
for(sptNnzIndex fx_ptr = 0; fx_ptr < fidx_X.len - 1; ++fx_ptr) { // Loop fiber pointers of X
int tid = omp_get_thread_num();
fx_counter--;
//if (fx_counter % 1000 == 0) printf("Progress: %d\/%d\n", fx_counter, fidx_X.len);
if (tid == 0){
sptStartTimer(timer_SPA);
}
sptNnzIndex fx_begin = fidx_X.data[fx_ptr];
sptNnzIndex fx_end = fidx_X.data[fx_ptr+1];
/// The total number and memory of SPA for one x fiber.
unsigned long long num_SPA_upper = 0;
unsigned long long mem_SPA_upper = 0;
unsigned long long mem_SPA_cur = 0;
bool SPA_in_dram = false;
/// The total memory of Z_tmp
unsigned long long Z_tmp_mem = 0;
/// hashtable size
const unsigned int ht_size = 10000;
sptIndex nmodes_spa = nmodes_Y - num_cmodes;
long int nnz_counter = 0;
sptIndex current_idx = 0;
/*for(sptNnzIndex zX = fx_begin; zX < fx_end; ++ zX) { // Loop nnzs inside a X fiber
sptValue valX = X->values.data[zX];
//printf("valX: %f\n", valX);
sptIndexVector cmode_index_X;
sptNewIndexVector(&cmode_index_X, num_cmodes, num_cmodes);
for(sptIndex i = 0; i < num_cmodes; ++i){
cmode_index_X.data[i] = X->inds[nmodes_X - num_cmodes + i].data[zX];
//printf("\ncmode_index_X[%lu]: %lu\n", i, cmode_index_X.data[i]);
}
unsigned long long key_cmodes = 0;
for(sptIndex m = 0; m < num_cmodes; ++m)
key_cmodes += cmode_index_X.data[m] * Y_cmode_inds[m + 1];
//printf("key_cmodes: %d\n", key_cmodes);
tensor_value Y_val = tensor_htGet(Y_ht, key_cmodes);
//printf("Y_val.len: %d\n", Y_val.len);
unsigned int my_len = Y_val.len;
if(my_len == 0) continue;
num_SPA_upper += my_len;
}*/
mem_SPA_upper = (Y_free_upper + fx_end - fx_begin) * sizeof(node_t) + sizeof(node_t*) * ht_size + sizeof(table_t);
if(mem_SPA_upper + dram_cur <= dram_cap) { // spa in dram
dram_cur += mem_SPA_upper;
SPA_in_dram = true;
}
table_t *ht;
ht = htCreate(ht_size);
mem_SPA_cur = sizeof( node_t*)*ht_size + sizeof( table_t);
if (tid == 0){
sptStopTimer(timer_SPA);
time_prep += sptElapsedTime(timer_SPA);
}
for(sptNnzIndex zX = fx_begin; zX < fx_end; ++ zX) { // Loop nnzs inside a X fiber
if (tid == 0){
sptStartTimer(timer_SPA);
}
sptValue valX = X->values.data[zX];
//printf("valX: %f\n", valX);
sptIndexVector cmode_index_X;
sptNewIndexVector(&cmode_index_X, num_cmodes, num_cmodes);
for(sptIndex i = 0; i < num_cmodes; ++i){
cmode_index_X.data[i] = X->inds[nmodes_X - num_cmodes + i].data[zX];
//printf("\ncmode_index_X[%lu]: %lu\n", i, cmode_index_X.data[i]);
}
unsigned long long key_cmodes = 0;
for(sptIndex m = 0; m < num_cmodes; ++m)
key_cmodes += cmode_index_X.data[m] * Y_cmode_inds[m + 1];
//printf("key_cmodes: %d\n", key_cmodes);
tensor_value Y_val = tensor_htGet(Y_ht, key_cmodes);
//printf("Y_val.len: %d\n", Y_val.len);
unsigned int my_len = Y_val.len;
if (tid == 0){
sptStopTimer(timer_SPA);
time_free_mode += sptElapsedTime(timer_SPA);
}
if(my_len == 0) continue;
if (tid == 0){
sptStartTimer(timer_SPA);
}
for(int i = 0; i < my_len; i++){
unsigned long long fmode = Y_val.key_FM[i];
//printf("i: %d, Y_val.key_FM[i]: %lu, Y_val.val[i]: %f\n", i, Y_val.key_FM[i], Y_val.val[i]);
sptValue spa_val = htGet(ht, fmode);
float result = Y_val.val[i] * valX;
if(spa_val == LONG_MIN) {
htInsert(ht, fmode, result);
mem_SPA_cur += sizeof(node_t);
nnz_counter++;
}
else
htUpdate(ht, fmode, spa_val + result);
}
if (tid == 0){
sptStopTimer(timer_SPA);
time_spa += sptElapsedTime(timer_SPA);
}
}
if (tid == 0){
sptStartTimer(timer_SPA);
}
if(SPA_in_dram) dram_cur = dram_cur - mem_SPA_upper + mem_SPA_cur;
Z_tmp_mem = nnz_counter * (nmodes_Z * sizeof(sptIndex) + sizeof(sptValue));
Z_mem += Z_tmp_mem;
if(Z_tmp_mem + dram_cur <= dram_cap && (tid % 7 != 0)){
dram_cur += Z_tmp_mem;
for(int i = 0; i < ht->size; i++){
node_t *temp = ht->list[i];
while(temp){
unsigned long long idx_tmp = temp->key;
//nnz_counter++;
for(sptIndex m = 0; m < nmodes_spa; ++m) {
//sptAppendIndexVector(&Z_tmp_dram[tid].inds[m + (nmodes_X - num_cmodes)], (idx_tmp%Y_fmode_inds[m])/Y_fmode_inds[m+1]);
sptAppendIndexVectorNuma(&Z_tmp_dram[tid].inds[m + (nmodes_X - num_cmodes)], (idx_tmp%Y_fmode_inds[m])/Y_fmode_inds[m+1]);
}
//printf("val: %f\n", temp->val);
//sptAppendValueVector(&Z_tmp_dram[tid].values, temp->val);
sptAppendValueVectorNuma(&Z_tmp_dram[tid].values, temp->val);
node_t* pre = temp;
temp = temp->next;
free(pre);
//numa_free(pre, sizeof(node_t));
}
}
Z_tmp_dram[tid].nnz += nnz_counter;
for(sptIndex i = 0; i < nnz_counter; ++i) {
for(sptIndex m = 0; m < nmodes_X - num_cmodes; ++m) {
//sptAppendIndexVector(&Z_tmp_dram[tid].inds[m], X->inds[m].data[fx_begin]);
sptAppendIndexVectorNuma(&Z_tmp_dram[tid].inds[m], X->inds[m].data[fx_begin]);
}
}
}
else{ // append elements to Z_tmp_optane in Optane
for(int i = 0; i < ht->size; i++){
node_t *temp = ht->list[i];
while(temp){
unsigned long long idx_tmp = temp->key;
//nnz_counter++;
for(sptIndex m = 0; m < nmodes_spa; ++m) {
//sptAppendIndexVector(&Z_tmp_optane[tid].inds[m + (nmodes_X - num_cmodes)], (idx_tmp%Y_fmode_inds[m])/Y_fmode_inds[m+1]);
sptAppendIndexVectorNuma(&Z_tmp_optane[tid].inds[m + (nmodes_X - num_cmodes)], (idx_tmp%Y_fmode_inds[m])/Y_fmode_inds[m+1]);
}
//printf("val: %f\n", temp->val);
//sptAppendValueVector(&Z_tmp_optane[tid].values, temp->val);
sptAppendValueVectorNuma(&Z_tmp_optane[tid].values, temp->val);
node_t* pre = temp;
temp = temp->next;
free(pre);
//numa_free(pre, sizeof(node_t));
}
}
Z_tmp_optane[tid].nnz += nnz_counter;
for(sptIndex i = 0; i < nnz_counter; ++i) {
for(sptIndex m = 0; m < nmodes_X - num_cmodes; ++m) {
//sptAppendIndexVector(&Z_tmp_optane[tid].inds[m], X->inds[m].data[fx_begin]);
sptAppendIndexVectorNuma(&Z_tmp_optane[tid].inds[m], X->inds[m].data[fx_begin]);
}
}
}
htFree(ht);
if(SPA_in_dram) dram_cur -= mem_SPA_cur;
if (tid == 0){
sptStopTimer(timer_SPA);
time_accumulate_z += sptElapsedTime(timer_SPA);
}
//printf("Z:\n");
//sptDumpSparseTensor(Z, 0, stdout);
} // End Loop fiber pointers of X
//sptAssert(sptDumpSparseTensor(Z, 0, stdout) == 0);
sptStopTimer(timer);
double main_computation = sptElapsedTime(timer);
total_time += main_computation;
double spa_total = time_prep + time_free_mode + time_spa + time_accumulate_z;
printf("[Index Search]: %.2f s\n", (time_free_mode + time_prep)/spa_total * main_computation);
printf("[Accumulation]: %.2f s\n", (time_spa + time_accumulate_z)/spa_total * main_computation);
sptStartTimer(timer);
if(Z_mem + dram_cur < dram_cap) numa_node = dram_node;
unsigned long long* Z_tmp_start = (unsigned long long*) malloc( (tk + 1) * sizeof(unsigned long long));
unsigned long long Z_total_size = 0;
Z_tmp_start[0] = 0;
for(int i = 0; i < tk; i++){
Z_tmp_start[i + 1] = Z_tmp_dram[i].nnz + Z_tmp_optane[i].nnz + Z_tmp_start[i];
Z_total_size += Z_tmp_dram[i].nnz + Z_tmp_optane[i].nnz;
//printf("Z_tmp_start[i + 1]: %lu, i: %d\n", Z_tmp_start[i + 1], i);
}
result = sptNewSparseTensorWithSizeNuma(Z, nmodes_Z, ndims_buf, numa_node, Z_total_size);
//result = sptNewSparseTensorWithSize(Z, nmodes_Z, ndims_buf, Z_total_size);
#pragma omp parallel for schedule(static) num_threads(tk) shared(Z_tmp_dram, Z_tmp_optane, Z, nmodes_Z, Z_tmp_start)
for(int i = 0; i < tk; i++){
int tid = omp_get_thread_num();
if(Z_tmp_dram[tid].nnz > 0){
for(sptIndex m = 0; m < nmodes_Z; ++m)
sptAppendIndexVectorWithVectorStartFromNuma(&Z->inds[m], &Z_tmp_dram[tid].inds[m], Z_tmp_start[tid]);
sptAppendValueVectorWithVectorStartFromNuma(&Z->values, &Z_tmp_dram[tid].values, Z_tmp_start[tid]);
}
if(Z_tmp_optane[tid].nnz > 0){
for(sptIndex m = 0; m < nmodes_Z; ++m)
sptAppendIndexVectorWithVectorStartFromNuma(&Z->inds[m], &Z_tmp_optane[tid].inds[m], Z_tmp_start[tid] + Z_tmp_dram[tid].nnz);
sptAppendValueVectorWithVectorStartFromNuma(&Z->values, &Z_tmp_optane[tid].values, Z_tmp_start[tid] + Z_tmp_dram[tid].nnz);
}
}
sptStopTimer(timer);
total_time += sptPrintElapsedTime(timer, "Writeback");
sptStartTimer(timer);
sptSparseTensorSortIndex(Z, 1, tk);
sptStopTimer(timer);
total_time += sptPrintElapsedTime(timer, "Output Sorting");
printf("[Total time]: %.2f s\n", total_time);
//system("numactl -H");
printf("\n");
}
if(experiment_modes == 5){
int result;
int dram_node;
int optane_node;
sscanf(getenv("DRAM_NODE"), "%d", &dram_node);
sscanf(getenv("OPTANE_NODE"), "%d", &optane_node);
int numa_node = dram_node;
sptIndex nmodes_X = X->nmodes;
sptIndex nmodes_Y = Y->nmodes;
sptTimer timer;
double total_time = 0;
sptNewTimer(&timer, 0);
if(num_cmodes >= X->nmodes) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU SpTns * SpTns", "shape mismatch");
}
for(sptIndex m = 0; m < num_cmodes; ++m) {
if(X->ndims[cmodes_X[m]] != Y->ndims[cmodes_Y[m]]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "CPU SpTns * SpTns", "shape mismatch");
}
}
sptStartTimer(timer);
sptIndex * mode_order_X = (sptIndex *)malloc(nmodes_X * sizeof(sptIndex));
sptIndex ci = nmodes_X - num_cmodes, fi = 0;
for(sptIndex m = 0; m < nmodes_X; ++m) {
if(sptInArray(cmodes_X, num_cmodes, m) == -1) {
mode_order_X[fi] = m;
++ fi;
}
}
sptAssert(fi == nmodes_X - num_cmodes);
/// Copy the contract modes while keeping the contraction mode order
for(sptIndex m = 0; m < num_cmodes; ++m) {
mode_order_X[ci] = cmodes_X[m];
++ ci;
}
sptAssert(ci == nmodes_X);
/// Shuffle tensor indices according to mode_order_X
sptSparseTensorShuffleModes(X, mode_order_X);
for(sptIndex m = 0; m < nmodes_X; ++m) mode_order_X[m] = m; // reset mode_order
sptSparseTensorSortIndex(X, 1, tk);
sptStopTimer(timer);
double X_time = sptElapsedTime(timer);
total_time += X_time;
sptStartTimer(timer);
unsigned long long tmp_dram_size = 0;
FILE *fp;
char *s;
char path[1035];
unsigned long long i1, i2, i3, i4, i5, i6, i7, i8;
fp = popen("numactl -H", "r");
while (fgets(path, sizeof(path), fp) != NULL) {
s = strstr(path, "node 0 free:");
if (s != NULL)
if (2 == sscanf(s, "%*[^0123456789]%llu%*[^0123456789]%llu", &i1, &i2)){
tmp_dram_size = i2 * 1024 * 1024;
//printf("test: %llu B\n", dram_cap);
break;
}
}
pclose(fp);
unsigned int node_size = sizeof(unsigned long long) + sizeof(unsigned int) + sizeof(unsigned int) + sizeof(unsigned long long*) + sizeof(sptValue*) + sizeof(tensor_node_t*);
unsigned long long Y_upper_size = node_size * (Y->nnz + Y->nnz);
//printf("%lu\n", Y_upper_size);
if (Y_upper_size < tmp_dram_size) numa_set_preferred(dram_node);
else numa_set_preferred(numa_node);
//sptAssert(sptDumpSparseTensor(Y, 0, stdout) == 0);
sptIndex * mode_order_Y = (sptIndex *)malloc(nmodes_Y * sizeof(sptIndex));
ci = 0;
fi = num_cmodes;
for(sptIndex m = 0; m < nmodes_Y; ++m) {
if(sptInArray(cmodes_Y, num_cmodes, m) == -1) {
mode_order_Y[fi] = m;
++ fi;
}
}
sptAssert(fi == nmodes_Y);
for(sptIndex m = 0; m < num_cmodes; ++m) {
mode_order_Y[ci] = cmodes_Y[m];
++ ci;
}
sptAssert(ci == num_cmodes);
//for(sptIndex m = 0; m < nmodes_Y; ++m)
// printf ("mode_order_Y[m]: %d\n", mode_order_Y[m]);
table_t *Y_ht;
unsigned int Y_ht_size = Y->nnz;
Y_ht = tensor_htCreate(Y_ht_size);
omp_lock_t *locks = (omp_lock_t *)malloc(Y_ht_size*sizeof(omp_lock_t));
for(size_t i = 0; i < Y_ht_size; i++) {
omp_init_lock(&locks[i]);
}
sptIndex* Y_cmode_inds = (sptIndex*)malloc((num_cmodes + 1) * sizeof(sptIndex));
for(sptIndex i = 0; i < num_cmodes + 1; i++) Y_cmode_inds[i] = 1;
for(sptIndex i = 0; i < num_cmodes;i++){
for(sptIndex j = i; j < num_cmodes;j++)
Y_cmode_inds[i] = Y_cmode_inds[i] * Y->ndims[mode_order_Y[j]];
}
//for(sptIndex i = 0; i <= num_cmodes;i++)
// printf("%d ", Y_cmode_inds[i]);
//printf("\n");
sptIndex Y_num_fmodes = nmodes_Y - num_cmodes;
sptIndex* Y_fmode_inds = (sptIndex*)malloc((Y_num_fmodes + 1) * sizeof(sptIndex));
//sptIndex* Y_fmode_inds = (sptIndex*) numa_alloc_onnode((Y_num_fmodes + 1) * sizeof(sptIndex), numa_node);
for(sptIndex i = 0; i < Y_num_fmodes + 1; i++) Y_fmode_inds[i] = 1;
for(sptIndex i = 0; i < Y_num_fmodes;i++){
for(sptIndex j = i; j < Y_num_fmodes;j++)
Y_fmode_inds[i] = Y_fmode_inds[i] * Y->ndims[mode_order_Y[j + num_cmodes]];
}
//for(sptIndex i = 0; i <= Y_num_fmodes;i++)
// printf("%d ", Y_fmode_inds[i]);
//printf("\n");
sptNnzIndex Y_nnz = Y->nnz;
unsigned int Y_free_upper = 0;
#pragma omp parallel for schedule(static) num_threads(tk) shared(Y_ht, Y_num_fmodes, mode_order_Y, num_cmodes, Y_cmode_inds, Y_fmode_inds)
for(sptNnzIndex i = 0; i < Y_nnz; i++){
if(placement == 3) numa_set_preferred(optane_node);
unsigned long long key_cmodes = 0;
for(sptIndex m = 0; m < num_cmodes; ++m)
key_cmodes += Y->inds[mode_order_Y[m]].data[i] * Y_cmode_inds[m + 1];
unsigned long long key_fmodes = 0;
for(sptIndex m = 0; m < Y_num_fmodes; ++m)
key_fmodes += Y->inds[mode_order_Y[m+num_cmodes]].data[i] * Y_fmode_inds[m + 1];
unsigned pos = tensor_htHashCode(key_cmodes);
omp_set_lock(&locks[pos]);
tensor_value Y_val = tensor_htGet(Y_ht, key_cmodes);
//printf("Y_val.len: %d\n", Y_val.len);
unsigned int Y_len = Y_val.len;
if(Y_len == 0) {
tensor_htInsert(Y_ht, key_cmodes, key_fmodes, Y->values.data[i]);
}
else {
tensor_htUpdate(Y_ht, key_cmodes, key_fmodes, Y->values.data[i]);
if (Y_len >= Y_free_upper) Y_free_upper = Y_len + 1;
//for(int i = 0; i < Y_val.len; i++)
// printf("key_FM: %lu, Y_val: %f\n", Y_val.key_FM[i], Y_val.val[i]);
}
omp_unset_lock(&locks[pos]);
//sprintf("i: %d, key_cmodes: %lu, key_fmodes: %lu\n", i, key_cmodes, key_fmodes);
}
for(size_t i = 0; i < Y_ht_size; i++) {
omp_destroy_lock(&locks[i]);
}
sptStopTimer(timer);
total_time += sptElapsedTime(timer);
printf("[Input Processing]: %.2f s\n", sptElapsedTime(timer) + X_time );
sptStartTimer(timer);
//printf("Sorted X:\n");
//sptSparseTensorStatus(X, stdout);
//sptAssert(sptDumpSparseTensor(X, 0, stdout) == 0);
//printf("Sorted Y:\n");
//sptSparseTensorStatus(Y, stdout);
//sptAssert(sptDumpSparseTensor(Y, 0, stdout) == 0);
/// Set fidx_X: indexing the combined free indices;
sptNnzIndexVector fidx_X;
//sptStartTimer(timer);
/// Set indices for free modes, use X
sptSparseTensorSetIndices(X, mode_order_X, nmodes_X - num_cmodes, &fidx_X);
sptIndex nmodes_Z = nmodes_X + nmodes_Y - 2 * num_cmodes;
sptIndex *ndims_buf = malloc(nmodes_Z * sizeof *ndims_buf);
spt_CheckOSError(!ndims_buf, "CPU SpTns * SpTns");
for(sptIndex m = 0; m < nmodes_X - num_cmodes; ++m) {
ndims_buf[m] = X->ndims[m];
}
/// For sorted Y
//for(sptIndex m = num_cmodes; m < nmodes_Y; ++m) {
// ndims_buf[(m - num_cmodes) + nmodes_X - num_cmodes] = Y->ndims[m];
//}
/// For non-sorted Y
for(sptIndex m = num_cmodes; m < nmodes_Y; ++m) {
ndims_buf[(m - num_cmodes) + nmodes_X - num_cmodes] = Y->ndims[mode_order_Y[m]];
}
free(mode_order_X);
free(mode_order_Y);
// sptSparseTensor *Z_tmp = malloc(tk * sizeof (sptSparseTensor));
sptSparseTensor *Z_tmp_dram, *Z_tmp_optane;
if(placement == 5) {
Z_tmp_dram = numa_alloc_onnode(tk * sizeof (sptSparseTensor), optane_node);
Z_tmp_optane = numa_alloc_onnode(tk * sizeof (sptSparseTensor), optane_node);
}
else{
Z_tmp_dram = numa_alloc_onnode(tk * sizeof (sptSparseTensor), dram_node);
Z_tmp_optane = numa_alloc_onnode(tk * sizeof (sptSparseTensor), optane_node);
}
for (int i = 0; i < tk; i++){
//result = sptNewSparseTensor(&(Z_tmp[i]), nmodes_Z, ndims_buf);
result = sptNewSparseTensorNuma(&(Z_tmp_dram[i]), nmodes_Z, ndims_buf, dram_node);
result = sptNewSparseTensorNuma(&(Z_tmp_optane[i]), nmodes_Z, ndims_buf, optane_node);
}
//free(ndims_buf);
spt_CheckError(result, "CPU SpTns * SpTns", NULL);
unsigned long long dram_cur = 0;
unsigned long long dram_cap = 0;
unsigned long long Z_mem = 0;
fp = popen("numactl -H", "r"); // Open the command for reading
while (fgets(path, sizeof(path), fp) != NULL) { // Read the output a line at a time - output it.
s = strstr(path, "node 0 free:"); // Search for string "hassasin" in buff
if (s != NULL) // If successful then s now points at "hassasin"
if (2 == sscanf(s, "%*[^0123456789]%llu%*[^0123456789]%llu", &i1, &i2)){
//printf("System DRAM memory: %lu MB\n", i2);
dram_cap = i2 * 1024 * 1024 / 1.1; // Should be changed into: memory of the current system - X - Y_ht
//printf("test: %llu B\n", dram_cap);
break;
}
}
pclose(fp);
sptTimer timer_SPA;
double time_prep = 0;
double time_free_mode = 0;
double time_spa = 0;
double time_accumulate_z = 0;
sptNewTimer(&timer_SPA, 0);
// For the progress
int fx_counter = fidx_X.len;
#pragma omp parallel for schedule(static) num_threads(tk) shared(fidx_X, nmodes_X, nmodes_Y, num_cmodes, Z_tmp_dram, Z_tmp_optane, Y_fmode_inds, Y_ht, Y_cmode_inds, dram_cap, dram_cur, Z_mem, fx_counter)
for(sptNnzIndex fx_ptr = 0; fx_ptr < fidx_X.len - 1; ++fx_ptr) { // Loop fiber pointers of X
int tid = omp_get_thread_num();
if(placement == 4) numa_set_preferred(optane_node);
fx_counter--;
//if (fx_counter % 1000 == 0) printf("Progress: %d\/%d\n", fx_counter, fidx_X.len);
if (tid == 0){
sptStartTimer(timer_SPA);
}
sptNnzIndex fx_begin = fidx_X.data[fx_ptr];
sptNnzIndex fx_end = fidx_X.data[fx_ptr+1];
/// The total number and memory of SPA for one x fiber.
unsigned long long num_SPA_upper = 0;
unsigned long long mem_SPA_upper = 0;
unsigned long long mem_SPA_cur = 0;
bool SPA_in_dram = false;
/// The total memory of Z_tmp
unsigned long long Z_tmp_mem = 0;
/// hashtable size
const unsigned int ht_size = 10000;
sptIndex nmodes_spa = nmodes_Y - num_cmodes;
long int nnz_counter = 0;
sptIndex current_idx = 0;
/*for(sptNnzIndex zX = fx_begin; zX < fx_end; ++ zX) { // Loop nnzs inside a X fiber
sptValue valX = X->values.data[zX];
//printf("valX: %f\n", valX);
sptIndexVector cmode_index_X;
sptNewIndexVector(&cmode_index_X, num_cmodes, num_cmodes);
for(sptIndex i = 0; i < num_cmodes; ++i){
cmode_index_X.data[i] = X->inds[nmodes_X - num_cmodes + i].data[zX];
//printf("\ncmode_index_X[%lu]: %lu\n", i, cmode_index_X.data[i]);
}
unsigned long long key_cmodes = 0;
for(sptIndex m = 0; m < num_cmodes; ++m)
key_cmodes += cmode_index_X.data[m] * Y_cmode_inds[m + 1];
//printf("key_cmodes: %d\n", key_cmodes);
tensor_value Y_val = tensor_htGet(Y_ht, key_cmodes);
//printf("Y_val.len: %d\n", Y_val.len);
unsigned int my_len = Y_val.len;
if(my_len == 0) continue;
num_SPA_upper += my_len;
}*/
mem_SPA_upper = (Y_free_upper + fx_end - fx_begin) * sizeof(node_t) + sizeof(node_t*) * ht_size + sizeof(table_t);
if(mem_SPA_upper + dram_cur <= dram_cap) { // spa in dram
dram_cur += mem_SPA_upper;
SPA_in_dram = true;
}
table_t *ht;
ht = htCreate(ht_size);
mem_SPA_cur = sizeof( node_t*)*ht_size + sizeof( table_t);
if (tid == 0){
sptStopTimer(timer_SPA);
time_prep += sptElapsedTime(timer_SPA);
}
for(sptNnzIndex zX = fx_begin; zX < fx_end; ++ zX) { // Loop nnzs inside a X fiber
if (tid == 0){
sptStartTimer(timer_SPA);
}
sptValue valX = X->values.data[zX];
//printf("valX: %f\n", valX);
sptIndexVector cmode_index_X;
sptNewIndexVector(&cmode_index_X, num_cmodes, num_cmodes);
for(sptIndex i = 0; i < num_cmodes; ++i){
cmode_index_X.data[i] = X->inds[nmodes_X - num_cmodes + i].data[zX];
//printf("\ncmode_index_X[%lu]: %lu\n", i, cmode_index_X.data[i]);
}
unsigned long long key_cmodes = 0;
for(sptIndex m = 0; m < num_cmodes; ++m)
key_cmodes += cmode_index_X.data[m] * Y_cmode_inds[m + 1];
//printf("key_cmodes: %d\n", key_cmodes);
tensor_value Y_val = tensor_htGet(Y_ht, key_cmodes);
//printf("Y_val.len: %d\n", Y_val.len);
unsigned int my_len = Y_val.len;
if (tid == 0){
sptStopTimer(timer_SPA);
time_free_mode += sptElapsedTime(timer_SPA);
}
if(my_len == 0) continue;
if (tid == 0){
sptStartTimer(timer_SPA);
}
if(placement == 4) numa_set_preferred(optane_node);
for(int i = 0; i < my_len; i++){
unsigned long long fmode = Y_val.key_FM[i];
//printf("i: %d, Y_val.key_FM[i]: %lu, Y_val.val[i]: %f\n", i, Y_val.key_FM[i], Y_val.val[i]);
sptValue spa_val = htGet(ht, fmode);
float result = Y_val.val[i] * valX;
if(spa_val == LONG_MIN) {
htInsert(ht, fmode, result);
mem_SPA_cur += sizeof(node_t);
nnz_counter++;
}
else
htUpdate(ht, fmode, spa_val + result);
}
if (tid == 0){
sptStopTimer(timer_SPA);
time_spa += sptElapsedTime(timer_SPA);
}
}
if (tid == 0){
sptStartTimer(timer_SPA);
}
if(SPA_in_dram) dram_cur = dram_cur - mem_SPA_upper + mem_SPA_cur;
Z_tmp_mem = nnz_counter * (nmodes_Z * sizeof(sptIndex) + sizeof(sptValue));
Z_mem += Z_tmp_mem;
if(Z_tmp_mem + dram_cur <= dram_cap && (tid % 7 != 0)){
dram_cur += Z_tmp_mem;
for(int i = 0; i < ht->size; i++){
if (placement == 5 && fx_ptr%(ht_size/10) == 0) numa_set_preferred(optane_node);
node_t *temp = ht->list[i];
while(temp){
unsigned long long idx_tmp = temp->key;
//nnz_counter++;
for(sptIndex m = 0; m < nmodes_spa; ++m) {
//sptAppendIndexVector(&Z_tmp_dram[tid].inds[m + (nmodes_X - num_cmodes)], (idx_tmp%Y_fmode_inds[m])/Y_fmode_inds[m+1]);
sptAppendIndexVectorNuma(&Z_tmp_dram[tid].inds[m + (nmodes_X - num_cmodes)], (idx_tmp%Y_fmode_inds[m])/Y_fmode_inds[m+1]);
}
//printf("val: %f\n", temp->val);
//sptAppendValueVector(&Z_tmp_dram[tid].values, temp->val);
sptAppendValueVectorNuma(&Z_tmp_dram[tid].values, temp->val);
node_t* pre = temp;
temp = temp->next;
free(pre);
//numa_free(pre, sizeof(node_t));
}
}
Z_tmp_dram[tid].nnz += nnz_counter;
for(sptIndex i = 0; i < nnz_counter; ++i) {
for(sptIndex m = 0; m < nmodes_X - num_cmodes; ++m) {
//sptAppendIndexVector(&Z_tmp_dram[tid].inds[m], X->inds[m].data[fx_begin]);
sptAppendIndexVectorNuma(&Z_tmp_dram[tid].inds[m], X->inds[m].data[fx_begin]);
}
}
}
else{
for(int i = 0; i < ht->size; i++){
if (placement == 5 && fx_ptr%(ht_size/10) == 0) numa_set_preferred(optane_node);
node_t *temp = ht->list[i];
while(temp){
unsigned long long idx_tmp = temp->key;
//nnz_counter++;
for(sptIndex m = 0; m < nmodes_spa; ++m) {
//sptAppendIndexVector(&Z_tmp_optane[tid].inds[m + (nmodes_X - num_cmodes)], (idx_tmp%Y_fmode_inds[m])/Y_fmode_inds[m+1]);
sptAppendIndexVectorNuma(&Z_tmp_optane[tid].inds[m + (nmodes_X - num_cmodes)], (idx_tmp%Y_fmode_inds[m])/Y_fmode_inds[m+1]);
}
//printf("val: %f\n", temp->val);
//sptAppendValueVector(&Z_tmp_optane[tid].values, temp->val);
sptAppendValueVectorNuma(&Z_tmp_optane[tid].values, temp->val);
node_t* pre = temp;
temp = temp->next;
free(pre);
//numa_free(pre, sizeof(node_t));
}
}
Z_tmp_optane[tid].nnz += nnz_counter;
for(sptIndex i = 0; i < nnz_counter; ++i) {
for(sptIndex m = 0; m < nmodes_X - num_cmodes; ++m) {
//sptAppendIndexVector(&Z_tmp_optane[tid].inds[m], X->inds[m].data[fx_begin]);
sptAppendIndexVectorNuma(&Z_tmp_optane[tid].inds[m], X->inds[m].data[fx_begin]);
}
}
}
htFree(ht);
if(SPA_in_dram) dram_cur -= mem_SPA_cur;
if (tid == 0){
sptStopTimer(timer_SPA);
time_accumulate_z += sptElapsedTime(timer_SPA);
}
//printf("Z:\n");
//sptDumpSparseTensor(Z, 0, stdout);
} // End Loop fiber pointers of X
//sptAssert(sptDumpSparseTensor(Z, 0, stdout) == 0);
sptStopTimer(timer);
double main_computation = sptElapsedTime(timer);
total_time += main_computation;
double spa_total = time_prep + time_free_mode + time_spa + time_accumulate_z;
printf("[Index Search]: %.2f s\n", (time_free_mode + time_prep)/spa_total * main_computation);
printf("[Accumulation]: %.2f s\n", (time_spa + time_accumulate_z)/spa_total * main_computation);
sptStartTimer(timer);
if(Z_mem + dram_cur < dram_cap) numa_node = dram_node;
unsigned long long* Z_tmp_start = (unsigned long long*) malloc( (tk + 1) * sizeof(unsigned long long));
unsigned long long Z_total_size = 0;
Z_tmp_start[0] = 0;
for(int i = 0; i < tk; i++){
Z_tmp_start[i + 1] = Z_tmp_dram[i].nnz + Z_tmp_optane[i].nnz + Z_tmp_start[i];
Z_total_size += Z_tmp_dram[i].nnz + Z_tmp_optane[i].nnz;
//printf("Z_tmp_start[i + 1]: %lu, i: %d\n", Z_tmp_start[i + 1], i);
}
if(placement == 6) {
result = sptNewSparseTensorWithSizeNuma(Z, nmodes_Z, ndims_buf, optane_node, Z_total_size);
}
else{
result = sptNewSparseTensorWithSizeNuma(Z, nmodes_Z, ndims_buf, numa_node, Z_total_size);
}
//result = sptNewSparseTensorWithSize(Z, nmodes_Z, ndims_buf, Z_total_size);
#pragma omp parallel for schedule(static) num_threads(tk) shared(Z_tmp_dram, Z_tmp_optane, Z, nmodes_Z, Z_tmp_start)
for(int i = 0; i < tk; i++){
int tid = omp_get_thread_num();
if(Z_tmp_dram[tid].nnz > 0){
for(sptIndex m = 0; m < nmodes_Z; ++m)
sptAppendIndexVectorWithVectorStartFromNuma(&Z->inds[m], &Z_tmp_dram[tid].inds[m], Z_tmp_start[tid]);
sptAppendValueVectorWithVectorStartFromNuma(&Z->values, &Z_tmp_dram[tid].values, Z_tmp_start[tid]);
}
if(Z_tmp_optane[tid].nnz > 0){
for(sptIndex m = 0; m < nmodes_Z; ++m)
sptAppendIndexVectorWithVectorStartFromNuma(&Z->inds[m], &Z_tmp_optane[tid].inds[m], Z_tmp_start[tid] + Z_tmp_dram[tid].nnz);
sptAppendValueVectorWithVectorStartFromNuma(&Z->values, &Z_tmp_optane[tid].values, Z_tmp_start[tid] + Z_tmp_dram[tid].nnz);
}
}
sptStopTimer(timer);
total_time += sptPrintElapsedTime(timer, "Writeback");
sptStartTimer(timer);
sptSparseTensorSortIndex(Z, 1, tk);
sptStopTimer(timer);
total_time += sptPrintElapsedTime(timer, "Output Sorting");
printf("[Total time]: %.2f s\n", total_time);
//system("numactl -H");
printf("\n");
}
return 0;
} |
meta_default.c | int main()
{
int n = 10;
#pragma omp metadirective default(parallel for)
for(int i=0; i<n; i++)
;
return 0;
}
|
master_slave.c | #include<stdio.h>
#include<omp.h>
int main() {
#pragma omp parallel
{
if(omp_get_thread_num() == 0) {
printf("Master\n");
} else {
printf("Slave\n");
}
}
} |
network.h | /*
* Copyright (c) 2020 Georgios Damaskinos
* All rights reserved.
* @author Georgios Damaskinos <georgios.damaskinos@gmail.com>
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
// == mojo ====================================================================
//
// Copyright (c) gnawice@gnawice.com. All rights reserved.
// See LICENSE in root folder
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files(the "Software"),
// to deal in the Software without restriction, including without
// limitation the rights to use, copy, modify, merge, publish, distribute,
// sublicense, and/or sell copies of the Software, and to permit persons to
// whom the Software is furnished to do so, subject to the following
// conditions :
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT
// OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
// THE USE OR OTHER DEALINGS IN THE SOFTWARE.
//
// ============================================================================
// network.h: The main artificial neural network graph for mojo
// ==================================================================== mojo ==
#pragma once
#include <string>
#include <iostream> // cout
#include <fstream>
#include <sstream>
#include <map>
#include <set>
#include <vector>
#include <atomic>
#include "layer.h"
#include "solver.h"
#include "activation.h"
#include "cost.h"
#define TEMPERATURE 2
#ifndef DISTILLATION_MODE
#define DISTILLATION_MODE 0
#endif
// hack for VS2010 to handle c++11 for(:)
#if (_MSC_VER == 1600)
#ifndef __for__
#define __for__ for each
#define __in__ in
#endif
#else
#ifndef __for__
#define __for__ for
#define __in__ :
#endif
#endif
namespace mojo {
#if defined(MOJO_CV2) || defined(MOJO_CV3)
// forward declare these for data augmentation
cv::Mat matrix2cv(const mojo::matrix &m, bool uc8 = false);
mojo::matrix cv2matrix(cv::Mat &m);
mojo::matrix transform(const mojo::matrix in, const int x_center, const int y_center, int out_dim, float theta = 0, float scale = 1.f);
#endif
// sleep needed for threading
#ifdef _WIN32
#include <windows.h>
void mojo_sleep(unsigned milliseconds) { Sleep(milliseconds); }
#else
#include <unistd.h>
void mojo_sleep(unsigned milliseconds) { usleep(milliseconds * 1000); }
#endif
#ifdef MOJO_PROFILE_LAYERS
#ifdef _WIN32
//* used for profiling layers
double PCFreq = 0.0;
__int64 CounterStart = 0;
void StartCounter()
{
LARGE_INTEGER li;
if (!QueryPerformanceFrequency(&li)) return;
PCFreq = double(li.QuadPart) / 1000.0;
QueryPerformanceCounter(&li);
CounterStart = li.QuadPart;
}
double GetCounter()
{
LARGE_INTEGER li;
QueryPerformanceCounter(&li);
return double(li.QuadPart - CounterStart) / PCFreq;
}
#else
void StartCounter(){}
double GetCounter(){return 0;}
#endif
#endif
//*/
void replace_str(std::string& str, const std::string& from, const std::string& to) {
if (from.empty())
return;
size_t start_pos = 0;
while ((start_pos = str.find(from, start_pos)) != std::string::npos) {
str.replace(start_pos, from.length(), to);
start_pos += to.length(); // In case 'to' contains 'from', like replacing 'x' with 'yx'
}
}
// returns Energy (euclidian distance / 2) and max index
float match_labels(const float *out, const float *target, const int size, int *best_index = NULL)
{
float E = 0;
int max_j = 0;
for (int j = 0; j<size; j++)
{
E += (out[j] - target[j])*(out[j] - target[j]);
if (out[max_j]<out[j]) max_j = j;
}
if (best_index) *best_index = max_j;
E *= 0.5;
return E;
}
// returns index of highest value (argmax)
int arg_max(const float *out, const int size)
{
int max_j = 0;
for (int j = 0; j<size; j++)
if (out[max_j]<out[j])
{max_j = j; }//std::cout <<j<<",";}
return max_j;
}
//----------------------------------------------------------------------
// network
// - class that holds all the layers and connection information
// - runs forward prediction
class network
{
public:
int _size; // output size
int _thread_count; // determines number of layer sets (copys of layers)
int _internal_thread_count; // used for speeding up convolutions, etc..
static const int MAIN_LAYER_SET = 0;
std::atomic<int> batch_index = {0};
// training related stuff
int _batch_size; // determines number of dW sets
float _skip_energy_level;
bool _smart_train;
std::vector <float> _running_E;
double _running_sum_E;
cost_function *_cost_function;
solver *_solver;
static const unsigned char BATCH_RESERVED = 1, BATCH_FREE = 0, BATCH_COMPLETE = 2;
static const int BATCH_FILLED_COMPLETE = -2, BATCH_FILLED_IN_PROCESS = -1;
#ifdef MOJO_OMP
omp_lock_t _lock_batch;
void lock_batch() {omp_set_lock(&_lock_batch);}
void unlock_batch() {omp_unset_lock(&_lock_batch);}
void init_lock() {omp_init_lock(&_lock_batch);}
void destroy_lock() {omp_destroy_lock(&_lock_batch);}
int get_thread_num() {return omp_get_thread_num();}
#else
void lock_batch() {}
void unlock_batch() {}
void init_lock(){}
void destroy_lock() {}
int get_thread_num() {return 0;}
#endif
// training progress stuff
int train_correct;
int train_skipped;
int stuck_counter;
int train_updates;
int train_samples;
int epoch_count;
int max_epochs;
float best_estimated_accuracy;
int best_accuracy_count;
float old_estimated_accuracy;
float estimated_accuracy;
// data augmentation stuff
int use_augmentation; // 0=off, 1=mojo, 2=opencv
int augment_x, augment_y;
int augment_h_flip, augment_v_flip;
mojo::pad_type augment_pad;
float augment_theta;
float augment_scale;
// here we have multiple sets of the layers to allow threading and batch processing
// a separate layer set is needed for each independent thread
std::vector< std::vector<base_layer *>> layer_sets;
std::map<std::string, int> layer_map; // name-to-index of layer for layer management
std::vector<std::pair<std::string, std::string>> layer_graph; // pairs of names of layers that are connected
std::vector<matrix *> W; // these are the weights between/connecting layers
// these sets are needed because we need copies for each item in mini-batch
std::vector< std::vector<matrix>> dW_sets; // only for training, will have _batch_size of these
std::vector< std::vector<matrix>> dbias_sets; // only for training, will have _batch_size of these
std::vector< unsigned char > batch_open; // only for training, will have _batch_size of these
network(const char* opt_name=NULL): _thread_count(1), _skip_energy_level(0.f), _batch_size(1)
{
_internal_thread_count=1;
_size=0;
_solver = new_solver(opt_name);
_cost_function = NULL;
//std::vector<base_layer *> layer_set;
//layer_sets.push_back(layer_set);
layer_sets.resize(1);
dW_sets.resize(_batch_size);
dbias_sets.resize(_batch_size);
batch_open.resize(_batch_size);
_running_sum_E = 0.;
train_correct = 0;
train_samples = 0;
train_skipped = 0;
epoch_count = 0;
max_epochs = 1000;
train_updates = 0;
estimated_accuracy = 0;
old_estimated_accuracy = 0;
stuck_counter = 0;
best_estimated_accuracy=0;
best_accuracy_count=0;
use_augmentation=0;
augment_x = 0; augment_y = 0; augment_h_flip = 0; augment_v_flip = 0;
augment_pad =mojo::edge;
augment_theta=0; augment_scale=0;
init_lock();
#ifdef USE_AF
af::setDevice(0);
af::info();
#endif
}
~network()
{
clear();
if (_cost_function) delete _cost_function;
if(_solver) delete _solver;
destroy_lock();
}
// call clear if you want to load a different configuration/model
void clear()
{
for(int i=0; i<(int)layer_sets.size(); i++)
{
__for__(auto l __in__ layer_sets[i]) delete l;
layer_sets.clear();
}
layer_sets.clear();
__for__(auto w __in__ W) if(w) delete w;
W.clear();
layer_map.clear();
layer_graph.clear();
}
// output size of final layer;
int out_size() {return _size;}
// get input size
bool get_input_size(int *w, int *h, int *c)
{
if(layer_sets[MAIN_LAYER_SET].size()<1) return false;
*w=layer_sets[MAIN_LAYER_SET][0]->node.cols;*h=layer_sets[MAIN_LAYER_SET][0]->node.rows;*c=layer_sets[MAIN_LAYER_SET][0]->node.chans;
return true;
}
// sets up number of layer copies to run over multiple threads
void build_layer_sets()
{
int layer_cnt = (int)layer_sets.size();
if (layer_cnt<_thread_count) layer_sets.resize(_thread_count);
// ToDo: add shrink back / else if(layer_cnt>_thread_count)
sync_layer_sets();
}
inline int get_thread_count() {return _thread_count;}
// must call this with max thread count before constructing layers
// value <1 will result in thread count = # cores (including hyperthreaded)
void enable_external_threads(int threads = -1)
{
//#ifdef MOJO_OMP
// if (threads < 1) threads = omp_get_num_procs();
_thread_count = threads;
// if(_internal_thread_count<=_thread_count) omp_set_num_threads(_thread_count);
//#else
// if (threads < 1) _thread_count = 1;
// else _thread_count = threads;
// if (threads > 1) bail("must define MOJO_OMP to used threading");
//#endif
build_layer_sets();
}
void enable_internal_threads(int threads = -1)
{
#ifdef MOJO_OMP
if (threads < 1) {threads = omp_get_num_procs(); threads = threads-1;} // one less than core count
if(threads<1) _internal_thread_count=1;
else _internal_thread_count=threads;
#else
_internal_thread_count=1;
#endif
}
// when using threads, need to get bias data synched between all layer sets,
// call this after bias update in main layer set to copy the bias to the other sets
void sync_layer_sets()
{
for(int i=1; i<(int)layer_sets.size();i++)
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++)
for(int k=0; k<layer_sets[MAIN_LAYER_SET][j]->bias.size(); k++)
(layer_sets[i])[j]->bias.x[k]=(layer_sets[MAIN_LAYER_SET])[j]->bias.x[k];
}
// used to add some noise to weights
void heat_weights()
{
__for__(auto w __in__ W)
{
if (!w) continue;
matrix noise(w->cols, w->rows, w->chans);
noise.fill_random_normal(1.f/ noise.size());
//noise *= *w;
*w += noise;
}
}
// used to add some noise to weights
void remove_means()
{
__for__(auto w __in__ W)
if(w) w->remove_mean();
}
// used to push a layer back in the ORDERED list of layers
// if connect_all() is used, then the order of the push_back is used to connect the layers
// when forward or backward propogation, this order is used for the serialized order of calculations
// Layer_name must be unique.
bool push_back(const char *layer_name, const char *layer_config)
{
if(layer_map[layer_name]) return false; //already exists
base_layer *l=new_layer(layer_name, layer_config);
// set map to index
// make sure there is a 'set' to add layers to
if(layer_sets.size()<1)
{
std::vector<base_layer *> layer_set;
layer_sets.push_back(layer_set);
}
// make sure layer_sets are created
build_layer_sets();
layer_map[layer_name] = (int)layer_sets[MAIN_LAYER_SET].size();
layer_sets[MAIN_LAYER_SET].push_back(l);
// upadate as potential last layer - so it sets the out size
_size=l->fan_size();
// add other copies needed for threading
for(int i=1; i<(int)layer_sets.size();i++) layer_sets[i].push_back(new_layer(layer_name, layer_config));
return true;
}
// connect 2 layers together and initialize weights
// top and bottom concepts are reversed from literature
// my 'top' is the input of a forward() pass and the 'bottom' is the output
// perhaps 'top' traditionally comes from the brain model, but my 'top' comes
// from reading order (information flows top to bottom)
void connect(const char *layer_name_top, const char *layer_name_bottom)
{
size_t i_top=layer_map[layer_name_top];
size_t i_bottom=layer_map[layer_name_bottom];
base_layer *l_top= layer_sets[MAIN_LAYER_SET][i_top];
base_layer *l_bottom= layer_sets[MAIN_LAYER_SET][i_bottom];
int w_i=(int)W.size();
matrix *w = l_bottom->new_connection(*l_top, w_i);
W.push_back(w);
layer_graph.push_back(std::make_pair(layer_name_top,layer_name_bottom));
// need to build connections for other batches/threads
for(int i=1; i<(int)layer_sets.size(); i++)
{
l_top= layer_sets[i][i_top];
l_bottom= layer_sets[i][i_bottom];
delete l_bottom->new_connection(*l_top, w_i);
}
// we need to let solver prepare space for stateful information
if (_solver)
{
if (w)_solver->push_back(w->cols, w->rows, w->chans);
else _solver->push_back(1, 1, 1);
}
int fan_in=l_bottom->fan_size();
int fan_out=l_top->fan_size();
// ToDo: this may be broke when 2 layers connect to one. need to fix (i.e. resnet)
// after all connections, run through and do weights with correct fan count
// initialize weights - ToDo: separate and allow users to configure(?)
if (w && l_bottom->has_weights())
{
if (strcmp(l_bottom->p_act->name, "tanh") == 0)
{
// xavier : for tanh
float weight_base = (float)(std::sqrt(6. / ((double)fan_in + (double)fan_out)));
// float weight_base = (float)(std::sqrt(.25/( (double)fan_in)));
w->fill_random_uniform(weight_base);
}
else if ((strcmp(l_bottom->p_act->name, "sigmoid") == 0) || (strcmp(l_bottom->p_act->name, "sigmoid") == 0))
{
// xavier : for sigmoid
float weight_base = 4.f*(float)(std::sqrt(6. / ((double)fan_in + (double)fan_out)));
w->fill_random_uniform(weight_base);
}
else if ((strcmp(l_bottom->p_act->name, "lrelu") == 0) || (strcmp(l_bottom->p_act->name, "relu") == 0)
|| (strcmp(l_bottom->p_act->name, "vlrelu") == 0) || (strcmp(l_bottom->p_act->name, "elu") == 0))
{
// he : for relu
float weight_base = (float)(std::sqrt(2. / (double)fan_in));
w->fill_random_normal(weight_base);
}
else
{
// lecun : orig
float weight_base = (float)(std::sqrt(1. / (double)fan_in));
w->fill_random_uniform(weight_base);
}
}
else if (w) w->fill(0);
}
// automatically connect all layers in the order they were provided
// easy way to go, but can't deal with branch/highway/resnet/inception types of architectures
void connect_all()
{
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size()-1; j++)
connect(layer_sets[MAIN_LAYER_SET][j]->name.c_str(), layer_sets[MAIN_LAYER_SET][j+1]->name.c_str());
}
int get_layer_index(const char *name)
{
for (int j = 0; j < (int)layer_sets[MAIN_LAYER_SET].size(); j++)
if (layer_sets[MAIN_LAYER_SET][j]->name.compare(name) == 0)
return j;
return -1;
}
// get the list of layers used (but not connection information)
std::string get_configuration()
{
std::string str;
// print all layer configs
for (int j = 0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++) str+= " "+ std::to_string((long long)j) +" : " +layer_sets[MAIN_LAYER_SET][j]->name +" : " + layer_sets[MAIN_LAYER_SET][j]->get_config_string();
str += "\n";
// print layer links
if (layer_graph.size() <= 0) return str;
for (int j = 0; j < (int)layer_graph.size(); j++)
{
if (j % 3 == 0) str += " ";
if((j % 3 == 1)|| (j % 3 == 2)) str += ", ";
str +=layer_graph[j].first + "-" + layer_graph[j].second;
if (j % 3 == 2) str += "\n";
}
return str;
}
// performs forward pass and returns <class index, probability>
// do not delete or modify the returned pointer. it is a live pointer to the last layer in the network
// if calling over multiple threads, provide the thread index since the interal data is not otherwise thread safe
std::tuple<int, double> predict_class(const float *in, int _thread_number = -1)
{
const float* out = forward(in, 1.f, _thread_number);
int argm = arg_max(out, out_size());
return std::make_tuple(argm, out[argm]);
}
//----------------------------------------------------------------------------------------------------------
// F O R W A R D
//
// the main forward pass
// if calling over multiple threads, provide the thread index since the interal data is not otherwise thread safe
// train parameter is used to designate the forward pass is used in training (it turns on dropout layers, etc..)
float* forward(const float *in, float temperature = 1.f, int _thread_number = -1, int _train = 0)
{
if(_thread_number<0) _thread_number=get_thread_num();
if (_thread_number > _thread_count && _thread_count>0) bail("need to enable threading\n");
if (_thread_number >= (int)layer_sets.size()) bail("need to enable threading\n");
//std::cout << get_thread_num() << ",";
// clear nodes to zero & find input layers
std::vector<base_layer *> inputs;
__for__(auto layer __in__ layer_sets[_thread_number])
{
if (dynamic_cast<input_layer*> (layer) != NULL) inputs.push_back(layer);
layer->set_threading(_internal_thread_count);
layer->node.fill(0.f);
}
// first layer assumed input. copy input to it
const float *in_ptr = in;
//base_layer * layer = layer_sets[_thread_number][0];
//memcpy(layer->node.x, in, sizeof(float)*layer->node.size());
__for__(auto layer __in__ inputs)
{
memcpy(layer->node.x, in_ptr, sizeof(float)*layer->node.size());
in_ptr += layer->node.size();
}
//for (int i = 0; i < layer->node.size(); i++)
// layer_sets[_thread_number][0]->node.x[i] = in[i];
// for all layers
__for__(auto layer __in__ layer_sets[_thread_number])
{
// add bias and activate these outputs (they should all be summed up from other branches at this point)
//for(int j=0; j<layer->node.chans; j+=10) for (int i=0; i<layer->node.cols*layer->node.rows; i+=10) std::cout<< layer->node.x[i+j*layer->node.chan_stride] <<"|";
layer->activate_nodes(temperature);
//for(int j=0; j<layer->node.chans; j++) for (int i=0; i<layer->node.cols*layer->node.rows; i+=10) std::cout<< layer->node.x[i+j*layer->node.chan_stride] <<"|";
// send output signal downstream (note in this code 'top' is input layer, 'bottom' is output - bucking tradition
__for__ (auto &link __in__ layer->forward_linked_layers)
{
// instead of having a list of paired connections, just use the shape of W to determine connections
// this is harder to read, but requires less look-ups
// the 'link' variable is a std::pair created during the connect() call for the layers
int connection_index = link.first;
base_layer *p_bottom = link.second;
// weight distribution of the signal to layers under it
#ifdef MOJO_PROFILE_LAYERS
StartCounter();
#endif
p_bottom->accumulate_signal(*layer, *W[connection_index], _train);
//if (p_bottom->has_weights())
//for(int j=0; j<layer->node.chans; j++)
//int j=0; for (int i=0; i<layer->node.cols*layer->node.rows; i+=10) std::cout<< layer->node.x[i+j*layer->node.chan_stride] <<"|";
#ifdef MOJO_PROFILE_LAYERS
std::cout << p_bottom->name << "\t" << GetCounter() << "ms\n";
#endif
}
}
// return pointer to float * result from last layer
/* std::cout << "out:";
for (int i = 0; i < 10; i++)
{
std::cout << layer_sets[_thread_number][layer_sets[_thread_number].size() - 1]->node.x[i] <<",";
}
std::cout << "\n";
*/
return layer_sets[_thread_number][layer_sets[_thread_number].size()-1]->node.x;
}
int float_vector_find(std::vector<float>& indexed_unique_values, float element){
bool find_element = false;
for(int k = 0 ; k < indexed_unique_values.size(); k++){
if(fabs(element - indexed_unique_values[k]) < 0.00000001f){
find_element = true;
return k;
}
}
if(!find_element){
indexed_unique_values.push_back(element);
}
return -1;
}
//----------------------------------------------------------------------------------------------------------
// W R I T E
std::string getParams() {
std::ostringstream stream;
// save layers
int layer_cnt = (int)layer_sets[MAIN_LAYER_SET].size();
// int ignore_cnt = 0;
// for (int j = 0; j<(int)layer_sets[0].size(); j++)
// if (dynamic_cast<dropout_layer*> (layer_sets[0][j]) != NULL) ignore_cnt++;
stream<<"mojo01" << std::endl;
stream<<(int)(layer_cnt)<<std::endl;
for(int j=0; j<(int)layer_sets[0].size(); j++)
stream << layer_sets[MAIN_LAYER_SET][j]->name << std::endl << layer_sets[MAIN_LAYER_SET][j]->get_config_string();
// if (dynamic_cast<dropout_layer*> (layer_sets[0][j]) != NULL)
// save graph
stream<<(int)layer_graph.size()<<std::endl;
for(int j=0; j<(int)layer_graph.size(); j++)
stream<<layer_graph[j].first << std::endl << layer_graph[j].second << std::endl;
stream<<(int)0<<std::endl;
// save bias info
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++)
{
if (layer_sets[MAIN_LAYER_SET][j]->use_bias())
{
for (int k = 0; k < layer_sets[MAIN_LAYER_SET][j]->bias.size(); k++) stream << layer_sets[MAIN_LAYER_SET][j]->bias.x[k] << " ";
stream << std::endl;
}
}
// save weights
if(DISTILLATION_MODE){
//Store the different weight values in a set so keep the unique
//std::set<float> unique_values;
std::vector<float> indexed_unique_values;
int loop_counter =0;
for(int j=0; j<(int)W.size(); j++)
{
if (W[j])
{
for (int i = 0; i < W[j]->size(); i++){
float_vector_find(indexed_unique_values, W[j]->x[i]);
//if (std::find(indexed_unique_values.begin(), indexed_unique_values.end(), W[j]->x[i]) == indexed_unique_values.end())
// indexed_unique_values.push_back(W[j]->x[i]);
//unique_values.insert(W[j]->x[i]);
loop_counter++;
}
}
}
//std::cout<<"Loop Counter" << loop_counter <<std::endl;
//Create a vector from the set in order to have index values
stream<<loop_counter<<std::endl;
//std::cout<<"Unique_values size:"<<indexed_unique_values.size()<<std::endl;
//std::vector<float> indexed_unique_values(unique_values.begin(), unique_values.end());
//for(int index_counter=0; index_counter < indexed_unique_values.size(); index_counter++){
// std::cout<< index_counter << " " << indexed_unique_values[index_counter] <<std::endl;
//}
//std::cout<<"INDEXED VALUE FROM GETPARAMS:"<<indexed_unique_values.size()<<std::endl;
//Send the size of the mapping
stream<<indexed_unique_values.size()<<std::endl;
//std::cout<<"INDEXED VALUE FROM GETPARAMS:"<<indexed_unique_values.size()<<std::endl;
for(int index_counter=0; index_counter < indexed_unique_values.size(); index_counter++){
stream << index_counter << std::endl;
stream << indexed_unique_values[index_counter] << std::endl;
}
for(int j=0; j<(int)W.size(); j++)
{
if (W[j])
{
for (int i = 0; i < W[j]->size(); i++){
stream << float_vector_find(indexed_unique_values,W[j]->x[i]) << " ";//(int)std::distance(indexed_unique_values.begin(),std::find(indexed_unique_values.begin(),indexed_unique_values.end(),W[j]->x[i])) << " ";
//std::cout<< (int)std::distance(indexed_unique_values.begin(),std::find(indexed_unique_values.begin(),indexed_unique_values.end(),W[j]->x[i]))<< W[j]->x[i] << std::endl;
//std::cout<< float_vector_find(indexed_unique_values,W[j]->x[i])<<" "<< W[j]->x[i]<<std::endl;
}
stream << std::endl;
}
}
}
else{
//stream << (int)0 <<std::endl;
for(int j=0; j<(int)W.size(); j++)
{
if (W[j])
{
for (int i = 0; i < W[j]->size(); i++) stream << W[j]->x[i] << " ";
stream << std::endl;
}
}
}
std::string res = stream.str();
return res;
}
std::vector<float> getModelParams() {
std::vector<float> ret;
// save graph
for(int j=0; j<(int)layer_graph.size(); j++)
// save bias info
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++)
if (layer_sets[MAIN_LAYER_SET][j]->use_bias())
for (int k = 0; k < layer_sets[MAIN_LAYER_SET][j]->bias.size(); k++)
ret.push_back(layer_sets[MAIN_LAYER_SET][j]->bias.x[k]);
// save weights
for(int j=0; j<(int)W.size(); j++)
if (W[j])
for (int i = 0; i < W[j]->size(); i++)
ret.push_back(W[j]->x[i]);
return ret;
}
//
// write parameters to stream/file
// note that this does not persist intermediate training information that could be needed to 'pickup where you left off'
bool write(std::ofstream& ofs, bool binary = false, bool final = false)
{
// save layers
int layer_cnt = (int)layer_sets[MAIN_LAYER_SET].size();
// int ignore_cnt = 0;
// for (int j = 0; j<(int)layer_sets[0].size(); j++)
// if (dynamic_cast<dropout_layer*> (layer_sets[0][j]) != NULL) ignore_cnt++;
ofs<<"mojo01" << std::endl;
ofs<<(int)(layer_cnt)<<std::endl;
for(int j=0; j<(int)layer_sets[0].size(); j++)
ofs << layer_sets[MAIN_LAYER_SET][j]->name << std::endl << layer_sets[MAIN_LAYER_SET][j]->get_config_string();
// if (dynamic_cast<dropout_layer*> (layer_sets[0][j]) != NULL)
// save graph
ofs<<(int)layer_graph.size()<<std::endl;
for(int j=0; j<(int)layer_graph.size(); j++)
ofs<<layer_graph[j].first << std::endl << layer_graph[j].second << std::endl;
if(binary)
{
ofs<<(int)1<<std::endl; // flags that this is binary data
// binary version to save space if needed
// save bias info
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++)
if(layer_sets[MAIN_LAYER_SET][j]->use_bias())
ofs.write((char*)layer_sets[MAIN_LAYER_SET][j]->bias.x, layer_sets[MAIN_LAYER_SET][j]->bias.size()*sizeof(float));
// save weights
for (int j = 0; j < (int)W.size(); j++)
{
if (W[j])
ofs.write((char*)W[j]->x, W[j]->size()*sizeof(float));
}
}
else
{
ofs<<(int)0<<std::endl;
// save bias info
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++)
{
if (layer_sets[MAIN_LAYER_SET][j]->use_bias())
{
for (int k = 0; k < layer_sets[MAIN_LAYER_SET][j]->bias.size(); k++) ofs << layer_sets[MAIN_LAYER_SET][j]->bias.x[k] << " ";
ofs << std::endl;
}
}
// save weights
for(int j=0; j<(int)W.size(); j++)
{
if (W[j])
{
for (int i = 0; i < W[j]->size(); i++) ofs << W[j]->x[i] << " ";
ofs << std::endl;
}
}
}
ofs.flush();
return true;
}
bool write(std::string &filename, bool binary = false, bool final = false) {
std::ofstream temp((const char *)filename.c_str(), std::ios::binary);
return write(temp, binary, final);
}//, std::ofstream::binary);
bool write(char *filename, bool binary = false, bool final = false)
{
std::string str= filename;
return write(str, binary, final);
}
// read network from a file/stream
std::string getcleanline(std::istream& ifs)
{
std::string s;
// The characters in the stream are read one-by-one using a std::streambuf.
// That is faster than reading them one-by-one using the std::istream.
// Code that uses streambuf this way must be guarded by a sentry object.
// The sentry object performs various tasks,
// such as thread synchronization and updating the stream state.
std::istream::sentry se(ifs, true);
std::streambuf* sb = ifs.rdbuf();
for (;;) {
int c = sb->sbumpc();
switch (c) {
case '\n':
return s;
case '\r':
if (sb->sgetc() == '\n') sb->sbumpc();
return s;
case EOF:
// Also handle the case when the last line has no line ending
if (s.empty()) ifs.setstate(std::ios::eofbit);
return s;
default:
s += (char)c;
}
}
}
//----------------------------------------------------------------------------------------------------------
// R E A D
//
void fetchParams(std::string ¶ms) {
std::istringstream ss(params);
read(ss);
}
bool read(std::istream &ifs)
{
if(!ifs.good()) return false;
std::string s;
s = getcleanline(ifs);
int layer_count;
int version = 0;
if (s.compare("mojo01")==0)
{
s = getcleanline(ifs);
layer_count = atoi(s.c_str());
version = 1;
//std::cout<<"READ: Layer count:"<<layer_count<<std::endl;
}
else if (s.compare("mojo:") == 0)
{
version = -1;
int cnt = 1;
while (!ifs.eof())
{
s = getcleanline(ifs);
if (s.empty()) continue;
push_back(int2str(cnt).c_str(), s.c_str());
cnt++;
}
connect_all();
// copies batch=0 stuff to other batches
sync_layer_sets();
return true;
}
else
layer_count = atoi(s.c_str());
// read layer def
std::string layer_name;
std::string layer_def;
for (auto i=0; i<layer_count; i++)
{
layer_name = getcleanline(ifs);
layer_def = getcleanline(ifs);
push_back(layer_name.c_str(),layer_def.c_str());
}
//std::cout<<"READ: Layer Name Successful"<<std::endl;
// read graph
int graph_count;
ifs>>graph_count;
//std::cout<<"Read: Graph Count"<<graph_count<<std::endl;
getline(ifs,s); // get endline
if (graph_count <= 0)
{
connect_all();
}
else
{
std::string layer_name1;
std::string layer_name2;
for (auto i=0; i<graph_count; i++)
{
layer_name1= getcleanline(ifs);
layer_name2 = getcleanline(ifs);
connect(layer_name1.c_str(),layer_name2.c_str());
}
}
//std::cout<<"Read: Layer Connection Successful"<<std::endl;
int binary;
s=getcleanline(ifs); // get endline
binary = atoi(s.c_str());
//std::cout<<"Read: Binary value:"<<binary<<std::endl;
int distillation_compression;
// binary version to save space if needed
if(binary==1)
{
for(int j=0; j<(int)layer_sets[MAIN_LAYER_SET].size(); j++)
if (layer_sets[MAIN_LAYER_SET][j]->use_bias())
{
//int c = layer_sets[MAIN_LAYER_SET][j]->bias.chans;
//int cs = layer_sets[MAIN_LAYER_SET][j]->bias.chan_stride;
//for (int i = 0; i < layer_sets[MAIN_LAYER_SET][j]->bias.size; i++)
ifs.read((char*)layer_sets[MAIN_LAYER_SET][j]->bias.x, layer_sets[MAIN_LAYER_SET][j]->bias.size()*sizeof(float));
}
for (int j = 0; j < (int)W.size(); j++)
{
if (W[j])
{
ifs.read((char*)W[j]->x, W[j]->size()*sizeof(float));
}
}
}
else if(binary==0)// text version
{
// read bias
for(int j=0; j<layer_count; j++)
{
if (layer_sets[MAIN_LAYER_SET][j]->use_bias())
{
// int c = layer_sets[MAIN_LAYER_SET][j]->bias.chans;
// int cs = layer_sets[MAIN_LAYER_SET][j]->bias.chan_stride;
// for (int i = 0; i < c; i++)
for (int k = 0; k < layer_sets[MAIN_LAYER_SET][j]->bias.size(); k++)
{
ifs >> layer_sets[MAIN_LAYER_SET][j]->bias.x[k];
//std::cout << layer_sets[MAIN_LAYER_SET][j]->bias.x[k] << ",";
}
ifs.ignore();// getline(ifs, s); // get endline
}
}
std::map<int, float> unique_mapping;
//Empty line after the layers
if(DISTILLATION_MODE){
ifs.ignore();
int loop_counter;
s = getcleanline(ifs);
loop_counter = atoi(s.c_str());
//std::cout<<"Read Loop Counter is:"<<loop_counter<<std::endl;
int unique_values_size;
s = getcleanline(ifs);
unique_values_size = atoi(s.c_str());
//std::cout<<"INDEXED VALUE FROM READ:"<<unique_values_size<<std::endl;
int vector_idx;
float vector_val;
for(int idx_counter=0; idx_counter < unique_values_size; idx_counter++){
ifs >> vector_idx;
ifs.ignore();
ifs >> vector_val;
ifs.ignore();
//s = getcleanline(ifs);
//vector_idx = atoi(s.c_str());
//s = getcleanline(ifs);
//vector_val = atof(s.c_str());
//std::cout<<vector_idx<< " "<<vector_val<<std::endl;
unique_mapping.insert({vector_idx,vector_val});
}
//std::cout<<"SET OF PAIRS SIZE:"<<unique_mapping.size()<<std::endl;
int translator;
for (auto j=0; j<(int)W.size(); j++)
{
if (W[j])
{
for (int i = 0; i < W[j]->size(); i++){
ifs >> translator;
//std::cout<<"Translator:"<<translator<<" "<<unique_mapping[translator]<<std::endl;
W[j]->x[i] = unique_mapping[translator];
}
ifs.ignore(); //getline(ifs, s); // get endline
}
}
}
else{
for (auto j=0; j<(int)W.size(); j++)
{
if (W[j])
{
for (int i = 0; i < W[j]->size(); i++) ifs >> W[j]->x[i];
ifs.ignore(); //getline(ifs, s); // get endline
}
}
}
}
// copies batch=0 stuff to other batches
sync_layer_sets();
return true;
}
bool read(std::string filename)
{
std::ifstream fs(filename.c_str(),std::ios::binary);
if (fs.is_open())
{
bool ret = read(fs);
fs.close();
return ret;
}
else return false;
}
bool read(const char *filename) { return read(std::string(filename)); }
// returns a vector representation of dw_sets[0] and dbias_sets[0]
// !must be invoked right after sync_mini_batch() invoke
// (i.e., at the end of mini-batch processing so that all gradients are
// summed to dw_sets[0] and dbias_sets[0])
// Returns: ret[0]: size of dw_sets[0]
// ret[1]: size of dw_sets[0][0]
// ret[2]: dw_sets[0][0][0]
// ret[3]: dw_sets[0][0][1]
// ...
// ret[n]: size of dbias[0]
// ...
std::vector<float> gradients() {
std::vector<float> ret;
// weight gradients
sync_mini_batch();
ret.push_back(dW_sets[0].size());
for (int i=0; i<dW_sets[0].size(); i++) {
ret.push_back(dW_sets[0][i].size());
for (int j=0; j<dW_sets[0][i].size(); j++)
ret.push_back(dW_sets[0][i].x[j]);
}
// bias gradients
ret.push_back(dbias_sets[0].size());
for (int i=0; i<dbias_sets[0].size(); i++) {
ret.push_back(dbias_sets[0][i].size());
for (int j=0; j<dbias_sets[0][i].size(); j++)
ret.push_back(dbias_sets[0][i].x[j]);
}
return ret;
}
int sizeOfGradients() {
int params = 0;
// weight gradients
for (int i=0; i<dW_sets[0].size(); i++)
params += dW_sets[0][i].size();
// bias gradients
for (int i=0; i<dbias_sets[0].size(); i++)
params += dbias_sets[0][i].size();
return params;
}
// adds g2 to g1
void addGradients(std::vector<float> &g1, std::vector<float> g2) {
int idx=0;
// weight gradients
idx++;
for (int i=0; i<dW_sets[0].size(); i++) {
idx++;
for (int j=0; j<dW_sets[0][i].size(); j++) {
g1[idx] += g2[idx];
idx++;
}
}
// bias gradients
idx++;
for (int i=0; i<dbias_sets[0].size(); i++) {
idx++;
for (int j=0; j<dbias_sets[0][i].size(); j++) {
g1[idx] += g2[idx];
idx++;
}
}
}
void scale_gradients(float C) {
int layer_cnt = (int)layer_sets[MAIN_LAYER_SET].size();
base_layer *layer;
for (int b = 1; b< _batch_size; b++) {
float sqsum = 0;
for (int k = layer_cnt - 1; k >= 0; k--) {
layer = layer_sets[MAIN_LAYER_SET][k];
__for__ (auto &link __in__ layer->backward_linked_layers) {
int w_index = (int) link.first;
for (int j=0; j<dW_sets[b][w_index].size(); j++)
sqsum += dW_sets[b][w_index].x[j] * dW_sets[b][w_index].x[j];
}
for (int j=0; j<dbias_sets[b][k].size(); j++)
sqsum += dbias_sets[b][k].x[j] * dbias_sets[b][k].x[j];
}
float norm = sqrt(sqsum);
//__android_log_print(ANDROID_LOG_DEBUG, "INFO", "Norm: %6.4lf", norm);
float scale = 1 / std::max<float>(1.0, norm / C);
for (int k = layer_cnt - 1; k >= 0; k--) {
__for__ (auto &link __in__ layer->backward_linked_layers) {
int w_index = (int) link.first;
for (int j=0; j<dW_sets[b][w_index].size(); j++)
dW_sets[b][w_index].x[j] *= scale;
}
for (int j=0; j<dbias_sets[b][k].size(); j++)
dbias_sets[b][k].x[j] *= scale;
}
}
}
void addGaussian(std::vector<float> &v, float sigma) {
std::default_random_engine generator;
std::normal_distribution<double> dist(0.0, sigma);
for (int i = 0; i < v.size(); i++)
v[i] += dist(generator);
}
// same as gradients() but adds noise for differential privacy
std::vector<float> DPgradients(float C, float sigma) {
std::vector<float> ret;
std::vector<int> sizes;
std::vector<int> indices;
int idx = 0;
scale_gradients(C);
sync_mini_batch();
// weight gradients
sizes.push_back(dW_sets[0].size());
indices.push_back(idx);
ret.push_back(0); // setting 0 instead of the size will not affect the norm computation
idx++;
for (int i=0; i<dW_sets[0].size(); i++) {
sizes.push_back(dW_sets[0][i].size());
indices.push_back(idx);
ret.push_back(0);
idx++;
for (int j=0; j<dW_sets[0][i].size(); j++) {
ret.push_back(dW_sets[0][i].x[j]);
idx++;
}
}
// bias gradients
sizes.push_back(dbias_sets[0].size());
indices.push_back(idx);
ret.push_back(0);
idx++;
for (int i=0; i<dbias_sets[0].size(); i++) {
sizes.push_back(dbias_sets[0][i].size());
indices.push_back(idx);
ret.push_back(0);
idx++;
for (int j=0; j<dbias_sets[0][i].size(); j++) {
ret.push_back(dbias_sets[0][i].x[j]);
idx++;
}
}
addGaussian(ret, C * sigma);
// copy back sizes to ret
for (int i=0; i<sizes.size(); i++)
ret[indices[i]] = sizes[i];
return ret;
}
// performs model update (descent) with the given gradients in a vector
// representation (see gradients() method)
void descent(std::vector<float> g) {
int idx = 0;
int dWSize = g[idx++];
for (int i=0; i<dWSize; i++) {
int size = g[idx++];
for (int j=0; j<size; j++)
dW_sets[0][i].x[j] = g[idx++];
}
int dbiasSize= g[idx++];
for (int i=0; i<dbiasSize; i++) {
int size = g[idx++];
for (int j=0; j<size; j++)
dbias_sets[0][i].x[j] = g[idx++];
}
descent();
}
// returns a flat representation of the gradient without additional info
// (e.g., sizes)
std::vector<float> flatGrad(std::vector<float> g) {
int idx = 0;
std::vector<float> ret;
int dWSize = g[idx++];
for (int i=0; i<dWSize; i++) {
int size = g[idx++];
for (int j=0; j<size; j++)
ret.push_back(g[idx++]);
}
int dbiasSize= g[idx++];
for (int i=0; i<dbiasSize; i++) {
int size = g[idx++];
for (int j=0; j<size; j++)
ret.push_back(g[idx++]);
}
return ret;
}
// merges flat gradient to given gradient
void mergeFlatGrad(std::vector<float> &g, std::vector<float> flat) {
int idx = 0;
int idx2 = 0;
int dWSize = g[idx++];
for (int i=0; i<dWSize; i++) {
int size = g[idx++];
for (int j=0; j<size; j++)
g[idx++] = flat[idx2++];
}
int dbiasSize= g[idx++];
for (int i=0; i<dbiasSize; i++) {
int size = g[idx++];
for (int j=0; j<size; j++)
g[idx++] = flat[idx2++];
}
}
#ifndef MOJO_NO_TRAINING // this is surely broke by now and will need to be fixed
// ===========================================================================
// training part
// ===========================================================================
// resets the state of all batches to 'free' state
void reset_mini_batch() { memset(batch_open.data(), BATCH_FREE, batch_open.size()); }
// sets up number of mini batches (storage for sets of weight deltas)
void set_mini_batch_size(int batch_cnt)
{
if (batch_cnt<1) batch_cnt = 1;
_batch_size = batch_cnt;
dW_sets.resize(_batch_size);
dbias_sets.resize(_batch_size);
batch_open.resize(_batch_size);
reset_mini_batch();
}
int get_mini_batch_size() { return _batch_size; }
// return index of next free batch
// or returns -2 (BATCH_FILLED_COMPLETE) if no free batches - all complete (need a sync call)
// or returns -1 (BATCH_FILLED_IN_PROCESS) if no free batches - some still in progress (must wait to see if one frees)
int get_next_open_batch()
{
int reserved = 0;
int filled = 0;
for (int i = 0; i<batch_open.size(); i++)
{
if (batch_open[i] == BATCH_FREE) return i;
if (batch_open[i] == BATCH_RESERVED) reserved++;
if (batch_open[i] == BATCH_COMPLETE) filled++;
}
if (reserved>0) return BATCH_FILLED_IN_PROCESS; // all filled but wainting for reserves
if (filled == batch_open.size()) return BATCH_FILLED_COMPLETE; // all filled and complete
bail("threading error"); // should not get here unless threading problem
}
//----------------------------------------------------------------------------------------------------------
// s y n c m i n i b a t c h
//
// apply all weights to first set of dW, then apply to model weights
void sync_mini_batch()
{
// need to ensure no batches in progress (reserved)
// int next = get_next_open_batch();
// if (next == BATCH_FILLED_IN_PROCESS) bail("thread lock");
int layer_cnt = (int)layer_sets[MAIN_LAYER_SET].size();
base_layer *layer;
// sum contributions
for (int k = layer_cnt - 1; k >= 0; k--)
{
layer = layer_sets[MAIN_LAYER_SET][k];
__for__(auto &link __in__ layer->backward_linked_layers)
{
int w_index = (int)link.first;
// if batch free, then make sure it is zero'd out because we will increment dW set [0]
//if (batch_open[0] == BATCH_FREE) dW_sets[0][w_index].fill(0);
for (int b = 1; b< _batch_size; b++)
{
/*if (batch_open[b] == BATCH_COMPLETE)*/ dW_sets[0][w_index] += dW_sets[b][w_index];
}
}
if (dynamic_cast<convolution_layer*> (layer) != NULL) continue;
// bias stuff... that needs to be fixed for conv layers perhaps
//if (batch_open[0] == BATCH_FREE) dbias_sets[0][k].fill(0);
for (int b = 1; b< _batch_size; b++)
{
/*if (batch_open[b] == BATCH_COMPLETE)*/ dbias_sets[0][k] += dbias_sets[b][k];
}
}
// descent();
// prepare to start mini batch over
// reset_mini_batch();
batch_index = 0;
// train_updates++; // could have no updates .. so this is not exact
// sync_layer_sets();
}
// performs the descent operation (i.e., updates weights and gradients)
void descent() {
int layer_cnt = (int)layer_sets[MAIN_LAYER_SET].size();
base_layer *layer;
// update weights
for (int k = layer_cnt - 1; k >= 0; k--)
{
layer = layer_sets[MAIN_LAYER_SET][k];
__for__(auto &link __in__ layer->backward_linked_layers)
{
int w_index = (int)link.first;
if (dW_sets[MAIN_LAYER_SET][w_index].size() > 0)
if(W[w_index]) _solver->increment_w(W[w_index], w_index, dW_sets[MAIN_LAYER_SET][w_index]); // -- 10%
}
layer->update_bias(dbias_sets[0][k], _solver->learning_rate);
}
train_updates++;
sync_layer_sets();
}
// reserve_next.. is used to reserve a space in the minibatch for the existing training sample
int reserve_next_batch()
{
lock_batch();
int my_batch_index = -3;
while (my_batch_index < 0)
{
my_batch_index = get_next_open_batch();
if (my_batch_index >= 0) // valid index
{
batch_open[my_batch_index] = BATCH_RESERVED;
unlock_batch();
return my_batch_index;
}
else if (my_batch_index == BATCH_FILLED_COMPLETE) // all index are complete
{
// sync_mini_batch(); // MY edit commented out/ resets _batch_index to 0
my_batch_index = get_next_open_batch();
batch_open[my_batch_index] = BATCH_RESERVED;
unlock_batch();
return my_batch_index;
}
// need to wait for ones in progress to finish
unlock_batch();
mojo_sleep(1);
lock_batch();
}
return -3;
}
float get_learning_rate() {if(!_solver) bail("set solver"); return _solver->learning_rate;}
void set_learning_rate(float alpha) {if(!_solver) bail("set solver"); _solver->learning_rate=alpha;}
void reset_solver() {if(!_solver) bail("set solver"); _solver->reset();}
bool get_smart_training() {return _smart_train;}
void set_smart_training(bool _use_train) { _smart_train = _use_train;}
float get_smart_train_level() { return _skip_energy_level; }
void set_smart_train_level(float _level) { _skip_energy_level = _level; }
void set_max_epochs(int max_e) { if (max_e <= 0) max_e = 1; max_epochs = max_e; }
int get_epoch() { return epoch_count; }
// goal here is to update the weights W.
// use w_new = w_old - alpha dE/dw
// E = sum: 1/2*||y-target||^2
// note y = f(x*w)
// dE = (target-y)*dy/dw = (target-y)*df/dw = (target-y)*df/dx* dx/dw = (target-y) * df * y_prev
// similarly for cross entropy
// ===========================================================================
// training part
// ===========================================================================
void set_random_augmentation(int translate_x, int translate_y,
int flip_h, int flip_v, mojo::pad_type padding = mojo::edge)
{
use_augmentation = 1;
augment_x = translate_x;
augment_y = translate_y;
augment_h_flip = flip_h;
augment_v_flip = flip_v;
augment_pad = padding;
augment_theta = 0;
augment_scale = 0;
}
void set_random_augmentation(int translate_x, int translate_y,
int flip_h, int flip_v, float rotation_deg, float scale, mojo::pad_type padding = mojo::edge)
{
use_augmentation = 2;
augment_x = translate_x;
augment_y = translate_y;
augment_h_flip = flip_h;
augment_v_flip = flip_v;
augment_pad = padding;
augment_theta = rotation_deg;
augment_scale = scale;
}
// call before starting training for current epoch
void start_epoch(std::string loss_function="mse")
{
_cost_function=new_cost_function(loss_function);
train_correct = 0;
train_skipped = 0;
train_updates = 0;
train_samples = 0;
if (epoch_count == 0) reset_solver();
// accuracy not improving .. slow learning
if(_smart_train && (best_accuracy_count > 4))
{
stuck_counter++;
set_learning_rate((0.5f)*get_learning_rate());
if (get_learning_rate() < 0.000001f)
{
// heat_weights();
set_learning_rate(0.000001f);
stuck_counter++;// end of the line.. so speed up end
}
best_accuracy_count = 0;
}
old_estimated_accuracy = estimated_accuracy;
estimated_accuracy = 0;
//_skip_energy_level = 0.05;
_running_sum_E = 0;
}
// time to stop?
bool elvis_left_the_building()
{
// 2 stuck x 4 non best accuracy to quit = 8 times no improvement
if ((epoch_count>max_epochs) || (stuck_counter > 3)) return true;
else return false;
}
// call after putting all training samples through this epoch
bool end_epoch()
{
// run leftovers through mini-batch
// sync_mini_batch(); // MY edit
epoch_count++;
// estimate accuracy of validation run
estimated_accuracy = 100.f*train_correct / train_samples;
if (train_correct > best_estimated_accuracy)
{
best_estimated_accuracy = (float)train_correct;
best_accuracy_count = 0;
stuck_counter = 0;
}
else best_accuracy_count++;
return elvis_left_the_building();
}
// if smart training was thinking about exiting, calling reset will make it think everything is OK
void reset_smart_training()
{
stuck_counter=0;
best_accuracy_count = 0;
best_estimated_accuracy = 0;
}
//----------------------------------------------------------------------------------------------------------
// u p d a t e _ s m a r t _ t r a i n
//
void update_smart_train(const float E, bool correct)
{
#ifdef MOJO_OMP
#pragma omp critical
#endif
{
train_samples++;
if (correct) train_correct++;
if (_smart_train)
{
_running_E.push_back(E);
_running_sum_E += E;
const int SMART_TRAIN_SAMPLE_SIZE = 1000;
int s = (int)_running_E.size();
if (s >= SMART_TRAIN_SAMPLE_SIZE)
{
_running_sum_E /= (double)s;
std::sort(_running_E.begin(), _running_E.end());
float top_fraction = (float)_running_sum_E*10.f; //10.
const float max_fraction = 0.75f;
const float min_fraction = 0.075f;// 0.03f;
if (top_fraction > max_fraction) top_fraction = max_fraction;
if (top_fraction < min_fraction) top_fraction = min_fraction;
int index = s - 1 - (int)(top_fraction*(s - 1));
if (_running_E[index] > 0) _skip_energy_level = _running_E[index];
_running_sum_E = 0;
_running_E.clear();
}
}
if (E > 0 && E < _skip_energy_level)
{
//std::cout << "E=" << E;
train_skipped++;
}
} // omp critical
}
// finish back propogation through the hidden layers
void backward_hidden(const int my_batch_index, const int thread_number, float temperature)
{
const int layer_cnt = (int)layer_sets[thread_number].size();
const int last_layer_index = layer_cnt - 1;
base_layer *layer;// = layer_sets[thread_number][last_layer_index];
// update hidden layers
// start at lower layer and push information up to previous layer
// handle dropout first
for (int k = last_layer_index; k >= 0; k--)
{
layer = layer_sets[thread_number][k];
// all the signals should be summed up to this layer by now, so we go through and take the grad of activiation
int nodes = layer->node.size();
// already did last layer, so skip it
if (k< last_layer_index)
for (int i = 0; i< nodes; i++)
layer->delta.x[i] *= layer->df(layer->node.x, i, nodes, temperature);
// now pass that signal upstream
__for__(auto &link __in__ layer->backward_linked_layers) // --- 50% of time this loop
{
base_layer *p_top = link.second;
// note all the delta[connections[i].second] should have been calculated by time we get here
layer->distribute_delta(*p_top, *W[link.first]);
}
}
// update weights - shouldn't matter the direction we update these
// we can stay in backwards direction...
// it was not faster to combine distribute_delta and increment_w into the same loop
int size_W = (int)W.size();
dW_sets[my_batch_index].resize(size_W);
dbias_sets[my_batch_index].resize(layer_cnt);
for (int k = last_layer_index; k >= 0; k--)
{
layer = layer_sets[thread_number][k];
__for__(auto &link __in__ layer->backward_linked_layers)
{
base_layer *p_top = link.second;
int w_index = (int)link.first;
//if (dynamic_cast<max_pooling_layer*> (layer) != NULL) continue;
layer->calculate_dw(*p_top, dW_sets[my_batch_index][w_index]);// --- 20%
// moved this out to sync_mini_batch();
//_solver->increment_w( W[w_index],w_index, dW_sets[_batch_index][w_index]); // -- 10%
}
if (dynamic_cast<convolution_layer*> (layer) != NULL) continue;
dbias_sets[my_batch_index][k] = layer->delta;
}
// if all batches finished, update weights
// lock_batch();
// batch_open[my_batch_index] = BATCH_COMPLETE;
// int next_index = get_next_open_batch();
// if (next_index == BATCH_FILLED_COMPLETE) // all complete
// sync_mini_batch(); // MY edit commented out/ resets _batch_index to 0
// unlock_batch();
}
mojo::matrix make_input(float *in, const int _thread_number)
{
mojo::matrix augmented_input;// = auto_augmentation();
std::vector<base_layer *> inputs;
int in_size = 0;
__for__(auto layer __in__ layer_sets[_thread_number])
{
if (dynamic_cast<input_layer*> (layer) != NULL)
{
inputs.push_back(layer);
in_size += layer->node.size();
}
}
if (use_augmentation > 0)
{
augmented_input.resize(in_size, 1, 1);
float s = ((float)(rand() % 101) / 50.f - 1.f)*augment_scale;
float t = ((float)(rand() % 101) / 50.f - 1.f)*augment_theta;
bool flip_h = ((rand() % 2)*augment_h_flip) ? true: false;
bool flip_v = ((rand() % 2)*augment_v_flip) ? true: false;
int shift_x = (rand() % (augment_x * 2 + 1)) - augment_x;
int shift_y = (rand() % (augment_y * 2 + 1)) - augment_y;
int offset = 0;
__for__(auto layer __in__ inputs)
{
//memcpy(layer->node.x, in_ptr, sizeof(float)*layer->node.size());
//in_ptr += layer->node.size();
// copy input to matrix type
mojo::matrix m(layer->node.cols, layer->node.rows, layer->node.chans, in + offset);
if (m.rows > 1 && m.cols > 1)
{
#if defined(MOJO_CV2) || defined(MOJO_CV3)
if ((augment_theta > 0 || augment_scale > 0))
m = transform(m, m.cols / 2, m.rows / 2, m.cols, t, 1 + s);
#endif
if (flip_v)m = m.flip_cols();
if (flip_h) m = m.flip_rows();
mojo::matrix aug = m.shift(shift_x, shift_y, augment_pad);
memcpy(augmented_input.x + offset, aug.x, sizeof(float)*aug.size());
offset += aug.size();
}
else
{
memcpy(augmented_input.x + offset, m.x, sizeof(float)*m.size());
offset += m.size();
}
}
// input = augmented_input.x;
}
else
{
augmented_input.resize(in_size, 1, 1);
memcpy(augmented_input.x, in, sizeof(float)*in_size);
}
return augmented_input;
}
//QUANTIZATION
// Here we introduce our quantization. The quantization process follows the one from the paper
// https://arxiv.org/pdf/1802.05668.pdf , since its an experimental analysis we just implement:
// 1. Only the linear scaling function sc(u)
// 2. Only the uniform quantization function Q
// 3. Also quantization function is the determenistic rounding and not the stohastic one
void quantization_weight_model(int num_bits = 8, int bucket_size = 128) { //num_bits = 2 for testing
//Here we introduce the quantization of our model
//s = 2 ** num_bits for the linear case
std::vector<std::vector<float>> weight_map;
int s = pow (2, num_bits);
//scale down the weights of the network as a first step
float alpha, beta;
s = s - 1;
bool bucketing = false;
if (bucketing) {
//Scale down the vector in buckets to maintain the variance and not so many 0 values
for (int j = 0; j < (int)W.size(); j++) {
if (W[j]) {
// std::cout<<"The size of the processing matrix is: "<<W[j]->size() <<std::endl;
// for (int i = 0; i < W[j]->size(); i++) std::cout << W[j]->x[i] << " ";
// std::cout << std::endl;
// std::cout<<"/////////////////////////////////////////////"<<std::endl;
W[j]->bucket_scaling(bucket_size, s);
// for (int i = 0; i < W[j]->size(); i++) std::cout << W[j]->x[i] << " ";
// std::cout << std::endl;
}
}
//std::cout<<"\n\n\n~~~~~~~~~~~~~~~~FINITO~~~~~~~~~~~~~~~\n\n\n";
} else {
float min, max;
for (int j = 0; j < (int)W.size(); j++) {
if (W[j]) {
//SCALING PROCEDURE
//std::cout << "The W[j] is : " << *W[j] <<std::endl;
W[j]->min_max(&min, &max);
alpha = max - min;
beta = min;
//std::cout<<"MIN: "<<min<<" MAX: "<<max<<" ALPHA: "<<alpha<<" BETA: "<<beta<<std::endl;
*W[j] = *W[j] + (-1.0) * beta; //substraction with beta
// std::cout<<"AFTER THE BETA TRANSFORMATION"<<std::endl;
// std::cout<<getParams()<<std::endl;
*W[j] = *W[j] * (1.0 / alpha); //division with alpha
// std::cout<<"AFTER THE ALPHA TRANSFORMATION"<<std::endl;
// std::cout<<getParams()<<std::endl;
//QUANTIZATION PROCEDURE
//*W[j] = *W[j] * s;
//std::cout<<"AFTER THE S TRANSFORMATION"<<std::endl;
//std::cout<<getParams()<<std::endl;
W[j]->round_matrix(s);
//std::cout<<"AFTER THE ROUND TRANSFORMATION"<<std::endl;
/*This procedure will create a dictionary to map the float values
in integers to save some bandwidth*/
/*std::vector<float> temp;
for (float i = 0.0 ; i <= 1.0 ; i += 1 / s) {
temp.push(i);
temp.apply(lambda x: x * 1 / s);
temp.apply(lambda x: x * alpha);
temp.apply(lambda x: x + beta);
}
*/
//W[j]->int_mapping(temp);
// *dict *= (1.0 / s);
// *dict *= alpha;
// *dict += beta;
// }
//std::cout<<getParams()<<std::endl;
//*W[j] = *W[j] * (1.0 / s);
//INVERSING SCALING DOWN
*W[j] = *W[j] * alpha;
*W[j] = *W[j] + beta; //substraction with beta
// W[j]->int_mapping(temp);
}
}
}
}
void save_model_weights(std::vector<matrix*>* old_weights) {
for (int j = 0; j < (int)W.size(); j++) {
if (W[j]) {
matrix* old_weight = new matrix(*W[j]);
old_weights->push_back(old_weight);
} else {
old_weights->push_back(NULL);
}
}
}
void load_model_weights(std::vector<matrix*>& unquantized_weights) {
for (std::vector<matrix *>::iterator it = W.begin() ; it != W.end(); ++it) {
delete (*it);
}
W.clear();
for (int j = 0; j < (int)unquantized_weights.size(); j++) {
W.push_back(unquantized_weights[j]);
}
}
//----------------------------------------------------------------------------------------------------------
// T R A I N C L A S S
//
// after starting epoch, call this to train against a class label
// label_index must be 0 to out_size()-1
// for thread safety, you must pass in the thread_index if calling from different threads
bool train_class(float *in, int label_index, std::vector<float>* teacher_prob = NULL, int _thread_number = -1, bool debug=false)
{
if (_solver == NULL) bail("set solver");
if (_thread_number < 0) _thread_number = get_thread_num();
if (_thread_number > _thread_count) bail("call allow_threads()");
const int thread_number = _thread_number;
/*
mojo::matrix augmented_input = make_input(in, thread_number);
/*/
float *input = in;
mojo::matrix augmented_input;
if (use_augmentation > 0)
{
//augment_h_flip = flip_h;
//augment_v_flip = flip_v;
// copy input to matrix type
mojo::matrix m(layer_sets[thread_number][0]->node.cols, layer_sets[thread_number][0]->node.rows, layer_sets[thread_number][0]->node.chans, in);
#if defined(MOJO_CV2) || defined(MOJO_CV3)
if (augment_theta > 0 || augment_scale > 0)
{
float s = ((float)(rand() % 101) / 50.f - 1.f)*augment_scale;
float t = ((float)(rand() % 101) / 50.f - 1.f)*augment_theta;
m = transform(m, m.cols / 2, m.rows / 2, m.cols, t, 1+s);
}
#endif
if (augment_h_flip)
if ((rand() % 2) == 0)
m = m.flip_cols();
if (augment_v_flip)
if ((rand() % 2) == 0)
m = m.flip_rows();
augmented_input = m.shift((rand() % (augment_x * 2 + 1)) - augment_x, (rand() % (augment_y * 2 + 1)) - augment_y, augment_pad);
input = augmented_input.x;
}
//*/
// get next free mini_batch slot
// this is tied to the current state of the model
int my_batch_index = batch_index++;
// out of data or an error if index is negative
if (my_batch_index < 0) return false;
// run through forward to get nodes activated
if (teacher_prob == NULL) {
forward(input, 1, thread_number, 1);
} else {
forward(input, TEMPERATURE, thread_number, 1);
}
//forward(input, thread_number, 1, debug);
// set all deltas to zero
__for__(auto layer __in__ layer_sets[thread_number]) layer->delta.fill(0.f);
int layer_cnt = (int)layer_sets[thread_number].size();
// calc delta for last layer to prop back up through network
// d = (target-out)* grad_activiation(out)
const int last_layer_index = layer_cnt - 1;
base_layer *layer = layer_sets[thread_number][last_layer_index];
const int layer_node_size = layer->node.size();
const int layer_delta_size = layer->delta.size();
if (dynamic_cast<dropout_layer*> (layer) != NULL) bail("can't have dropout on last layer");
float E = 0;
int max_j_out = 0;
int max_j_target = label_index;
// was passing this in, but may as well just create it on the fly
// a vector mapping the label index to the desired target output node values
// all -1 except target node 1
std::vector<float> target;
//One Hot Encoding
if ((std::string("sigmoid").compare(layer->p_act->name) == 0) ||
(std::string("softmax").compare(layer->p_act->name) == 0) ||
(std::string("logsoftmax").compare(layer->p_act->name) == 0)) {
target = std::vector<float>(layer_node_size, 0);
} else {
target = std::vector<float>(layer_node_size, -1);
}
if (label_index >= 0 && label_index < layer_node_size) {
target[label_index] = 1;
}
//const float grad_fudge = 1.0f;
// because of numerator/demoninator cancellations which prevent a divide by zero issue,
// we need to handle some things special on output layer
float cost_activation_type = 0;
if ((std::string("sigmoid").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0)) {
cost_activation_type = 1;
} else if ((std::string("softmax").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0)) {
cost_activation_type = 1;
} else if ((std::string("tanh").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0)) {
cost_activation_type = 4;
} else if ((std::string("logsoftmax").compare(layer->p_act->name) == 0) &&
(std::string("distillation").compare(_cost_function->name) == 0)) {
cost_activation_type = -1;
}
// for (int j = 0; j < layer_node_size; j++)
// {
// if(cost_activation_type>0)
// layer->delta.x[j] = cost_activation_type*(layer->node.x[j]- target[j]);
// else
// layer->delta.x[j] = _cost_function->d_cost(layer->node.x[j], target[j])*layer->df(layer->node.x, j, layer_node_size);
// // pick best response
// if (layer->node.x[max_j_out] < layer->node.x[j]) max_j_out = j;
// // for better E maybe just look at 2 highest scores so zeros don't dominate
// float f= mse::cost(layer->node.x[j], target[j]);
// E += f;//mse::cost(layer->node.x[j], target[j]);
// }
for (int j = 0; j < layer_node_size; j++) {
if (cost_activation_type > 0) {
//std::cout << "Node: " << j << " value: " << layer->node.x[j] << " corresponding target: " << target[j] << std::endl;
layer->delta.x[j] = cost_activation_type * (layer->node.x[j] - target[j]);
//layer->delta.x[j] = _cost_function->d_cost(layer->node.x[j], target[j],0,0) * layer->df(layer->node.x, j, layer_node_size);
//std::cout << "Delta for node: " << j << " value: " << layer->delta.x[j] << std::endl;
} else if (cost_activation_type < 0) {
//Distillation loss
//std::cout << "Node: " << j << " value: " << layer->node.x[j] << " corresponding target: " << target[j] <<" teacher_val: "<< teacher_layer->node.x[j] << std::endl;
layer->delta.x[j] = _cost_function->d_cost(layer->node.x[j], target[j], (*teacher_prob)[j], TEMPERATURE) ; //* layer->df(layer->node.x, j, layer_node_size,TEMPERATURE);
//std::cout << "Delta for node: " << j << " value: " << layer->delta.x[j] << std::endl;
} else {
//std::cout << "Node: " << j << " value: " << layer->node.x[j] << " corresponding target: " << target[j] << std::endl;
layer->delta.x[j] = _cost_function->d_cost (layer->node.x[j], target[j], (*teacher_prob)[j], TEMPERATURE) * layer->df(layer->node.x, j, layer_node_size, TEMPERATURE);
//std::cout << "Delta for node: " << j << " value: " << layer->delta.x[j] << std::endl;
}
//count_prob += layer->node.x[j];
//Pick best response
if (layer->node.x[max_j_out] < layer->node.x[j]) {
max_j_out = j;
}
// for better E maybe just look at 2 highest scores so zeros don't dominate
float f;
//if(teacher_model != NULL)
// f = distillation::cost(layer->node.x[j], target[j],teacher_layer->node.x[j],TEMPERATURE);
//else
f = mse::cost(layer->node.x[j], target[j], 0, 0);
E += f;//mse::cost(layer->node.x[j], target[j]);
}
E /= (float)layer_node_size;
// check for NAN
if (E != E) bail("network blew up - try lowering learning rate\n");
// critical section in here, blocking update
bool match = false;
if (max_j_target == max_j_out) match = true;
update_smart_train(E, match);
if (E>0 && E<_skip_energy_level && _smart_train && match)
{
// lock_batch();
// batch_open[my_batch_index] = BATCH_FREE;
// unlock_batch();
return false; // return without doing training
}
if (teacher_prob != NULL) {
backward_hidden(my_batch_index, thread_number, TEMPERATURE);
} else {
backward_hidden(my_batch_index, thread_number, 1);
}
//backward_hidden(my_batch_index, thread_number);
return true;
}
//----------------------------------------------------------------------------------------------------------
// T R A I N T A R G E T
//
// after starting epoch, call this to train against a target vector
// for thread safety, you must pass in the thread_index if calling from different threads
// if positive=1, goal is to minimize the distance between in and target
bool train_target(float *in, float *target, int positive=1, int _thread_number = -1)
{
if (_solver == NULL) bail("set solver");
if (_thread_number < 0) _thread_number = get_thread_num();
if (_thread_number > _thread_count) bail("need to enable OMP");
const int thread_number = _thread_number;
mojo::matrix augmented_input = make_input(in, thread_number);
float *input = augmented_input.x;
// get next free mini_batch slot
// this is tied to the current state of the model
int my_batch_index = reserve_next_batch();
// out of data or an error if index is negative
if (my_batch_index < 0) return false;
// run through forward to get nodes activated
float *out=forward(in, 1.f,thread_number, 1);
// set all deltas to zero
__for__(auto layer __in__ layer_sets[thread_number]) layer->delta.fill(0.f);
int layer_cnt = (int)layer_sets[thread_number].size();
// calc delta for last layer to prop back up through network
// d = (target-out)* grad_activiation(out)
const int last_layer_index = layer_cnt - 1;
base_layer *layer = layer_sets[thread_number][last_layer_index];
const int layer_node_size = layer->node.size();
if (dynamic_cast<dropout_layer*> (layer) != NULL) bail("can't have dropout on last layer");
float E = 0;
int max_j_out = 0;
//int max_j_target = label_index;
// was passing this in, but may as well just create it on the fly
// a vector mapping the label index to the desired target output node values
// all -1 except target node 1
// std::vector<float> target;
//if ((std::string("sigmoid").compare(layer->p_act->name) == 0) || (std::string("softmax").compare(layer->p_act->name) == 0))
// target = std::vector<float>(layer_node_size, 0);
// else
// target = std::vector<float>(layer_node_size, -1);
// if (label_index >= 0 && label_index<layer_node_size) target[label_index] = 1;
const float grad_fudge = 1.0f;
// because of numerator/demoninator cancellations which prevent a divide by zero issue,
// we need to handle some things special on output layer
float cost_activation_type = 0;
if ((std::string("sigmoid").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 1;
else if ((std::string("softmax").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 1;
else if ((std::string("tanh").compare(layer->p_act->name) == 0) &&
(std::string("cross_entropy").compare(_cost_function->name) == 0))
cost_activation_type = 4;
for (int j = 0; j < layer_node_size; j++) {
if (positive) { // want to minimize distance
if (cost_activation_type > 0)
layer->delta.x[j] = grad_fudge * cost_activation_type * (layer->node.x[j] - target[j]);
else
layer->delta.x[j] = grad_fudge * _cost_function->d_cost(layer->node.x[j], target[j], 0, 0) * layer->df(layer->node.x, j, layer_node_size, TEMPERATURE);
} else {
if (cost_activation_type > 0)
layer->delta.x[j] = grad_fudge * cost_activation_type * (1.f - abs(layer->node.x[j] - target[j]));
else
layer->delta.x[j] = grad_fudge * (1.f - abs(_cost_function->d_cost(layer->node.x[j], target[j], 0, 0))) * layer->df(layer->node.x, j, layer_node_size, TEMPERATURE);
}
// pick best response
if (layer->node.x[max_j_out] < layer->node.x[j]) max_j_out = j;
// for better E maybe just look at 2 highest scores so zeros don't dominate
// L2 distance x 2
E += mse::cost(layer->node.x[j], target[j], 0, 0);
}
E /= (float)layer_node_size;
// check for NAN
if (E != E) bail("network blew up - try lowering learning rate\n");
// critical section in here, blocking update
bool match = false;
// FIxME if ((max_j_target == max_j_out)) match = true;
if (E < 0.01 && positive) match = true;
else if (E > 0.1 && !positive) match = true;
update_smart_train(E, match);
if (E>0 && E<_skip_energy_level && _smart_train && match)
{
lock_batch();
batch_open[my_batch_index] = BATCH_FREE;
unlock_batch();
return false; // return without doing training
}
//backward_hidden(my_batch_index, thread_number); !!!CAREFUL HERE
return true;
}
#else
float get_learning_rate() {return 0;}
void set_learning_rate(float alpha) {}
void train(float *in, float *target){}
void reset() {}
float get_smart_train_level() {return 0;}
void set_smart_train_level(float _level) {}
bool get_smart_train() { return false; }
void set_smart_train(bool _use) {}
#endif
};
}
|
2-4t.c | #include <stdio.h>
#include <omp.h>
int main()
{
omp_set_num_threads(4);
#pragma omp parallel
{
printf(" Hello ");
}
printf("\n\n GoodBye – Team Destroyed – Exiting Program \n\n");
}
|
GB_binop__band_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__band_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__band_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__band_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__band_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__band_int16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__band_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__band_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__band_int16)
// C=scalar+B GB (_bind1st__band_int16)
// C=scalar+B' GB (_bind1st_tran__band_int16)
// C=A+scalar GB (_bind2nd__band_int16)
// C=A'+scalar GB (_bind2nd_tran__band_int16)
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = (aij) & (bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) & (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BAND || GxB_NO_INT16 || GxB_NO_BAND_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__band_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__band_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__band_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__band_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__band_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__band_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__band_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__band_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__band_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) & (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__band_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) & (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) & (aij) ; \
}
GrB_Info GB (_bind1st_tran__band_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) & (y) ; \
}
GrB_Info GB (_bind2nd_tran__band_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
vortex.h | #ifndef vortex_h
#define vortex_h
#include "fft.h"
class Vortex : public FastFourierTransform {
private:
bool initial;
float dx;
float *r, *x;
void rbf(Bodies &bodies, Cells &cells, int d) {
const int itmax = 5;
const float tol = 1e-3;
setKernel("Gaussian");
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
int i = B-bodies.begin();
B->SRC[0] = x[i] = B->TRG[d+1] * dx * dx * dx;
B->TRG[0] = 0;
}
setCommBodies(cells);
Bodies jbodies;
Cells jcells = cells;
if( MPISIZE != 1 ) {
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
{
downward(cells,jcells,1,false);
}
#pragma omp section
{
updateBodies();
}
}
jbodies = bodies;
jcells.clear();
bodies2cells(jbodies,jcells);
eraseLocalTree(jcells);
}
downward(cells,jcells,1);
float resRecv, resSend = 0;
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
int i = B-bodies.begin();
r[i] = B->TRG[d+1] - B->TRG[0];
B->SRC[0] = r[i];
B->TRG[0] = 0;
resSend += r[i] * r[i];
}
MPI_Allreduce(&resSend,&resRecv,1,MPI_FLOAT,MPI_SUM,MPI_COMM_WORLD);
float res0 = resRecv;
int it = 0;
while( sqrt(res0) > tol && sqrt(resRecv / res0) > tol && it < itmax ) {
print("iteration : ",0);
print(it,0);
print(", residual : ",0);
print(sqrt(resRecv / res0),0);
print("\n",0);
jcells = cells;
if( MPISIZE != 1 ) {
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
{
downward(cells,jcells,1,false);
}
#pragma omp section
{
updateBodies();
}
}
jbodies = bodies;
jcells.clear();
bodies2cells(jbodies,jcells);
eraseLocalTree(jcells);
}
downward(cells,jcells,1);
float pApRecv, pApSend = 0;
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
pApSend += B->SRC[0] * B->TRG[0];
}
MPI_Allreduce(&pApSend,&pApRecv,1,MPI_FLOAT,MPI_SUM,MPI_COMM_WORLD);
float alpha = resRecv / pApRecv;
float resOld = resRecv;
resSend = 0;
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
int i = B-bodies.begin();
x[i] += alpha * B->SRC[0];
r[i] -= alpha * B->TRG[0];
resSend += r[i] * r[i];
}
MPI_Allreduce(&resSend,&resRecv,1,MPI_FLOAT,MPI_SUM,MPI_COMM_WORLD);
float beta = resRecv / resOld;
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
int i = B-bodies.begin();
B->SRC[0] = r[i] + beta * B->SRC[0];
B->TRG[0] = 0;
}
it++;
}
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
int i = B-bodies.begin();
B->SRC[d] = x[i];
}
}
public:
Vortex(int numGrid1D) : FastFourierTransform(numGrid1D) {
initial = true;
dx = 2 * M_PI / numGrid1D;
r = new float [numBodies];
x = new float [numBodies];
}
~Vortex() {
delete[] r;
delete[] x;
}
void readData(Bodies &bodies, Bodies &bodies2, Cells &cells) {// Initialize source values
char fname[256];
sprintf(fname,"initial%4.4d",nx);
std::ifstream fid(fname,std::ios::in|std::ios::binary);
int byte;
float dummy[3];
for( int rank=0; rank!=MPISIZE; ++rank ) {
if( rank == MPIRANK ) {
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
fid.read((char*)&byte,sizeof(int));
fid.read((char*)&bodies[B-bodies.begin()].SRC[0],byte);
fid.read((char*)&byte,sizeof(int));
}
} else {
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
fid.read((char*)&byte,sizeof(int));
fid.read((char*)dummy,byte);
fid.read((char*)&byte,sizeof(int));
}
}
}
fid.close();
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
int i = B-bodies.begin();
int iGlob = i + numBodies * MPIRANK;
int ix = iGlob % nx;
int iy = iGlob / nx % nx;
int iz = iGlob / nx / nx;
B->IBODY = iGlob;
B->IPROC = MPIRANK;
B->X[0] = (ix + .5) * dx - M_PI;
B->X[1] = (iy + .5) * dx - M_PI;
B->X[2] = (iz + .5) * dx - M_PI;
B->SRC[3] = dx;
realRecv[i] = B->SRC[1];
}
for( B_iter B=bodies2.begin(); B!=bodies2.end(); ++B ) {
int i = B-bodies2.begin();
int iGlob = i + numBodies * MPIRANK;
int ix = iGlob % nx;
int iy = iGlob / nx % nx;
int iz = iGlob / nx / nx;
B->IBODY = iGlob;
B->IPROC = MPIRANK;
B->X[0] = ix * dx - M_PI + 1e-5;
B->X[1] = iy * dx - M_PI + 1e-5;
B->X[2] = iz * dx - M_PI + 1e-5;
}
zDerivative();
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
B->TRG[1] = realSend[B-bodies.begin()];
realRecv[B-bodies.begin()] = B->SRC[2];
}
yDerivative();
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
B->TRG[1] -= realSend[B-bodies.begin()];
realRecv[B-bodies.begin()] = B->SRC[2];
}
xDerivative();
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
B->TRG[2] = realSend[B-bodies.begin()];
realRecv[B-bodies.begin()] = B->SRC[0];
}
zDerivative();
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
B->TRG[2] -= realSend[B-bodies.begin()];
realRecv[B-bodies.begin()] = B->SRC[0];
}
yDerivative();
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
B->TRG[3] = realSend[B-bodies.begin()];
realRecv[B-bodies.begin()] = B->SRC[1];
}
xDerivative();
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
B->TRG[3] -= realSend[B-bodies.begin()];
}
setGlobDomain(bodies);
octsection(bodies);
octsection(bodies2);
bottomup(bodies,cells);
rbf(bodies,cells,2);
rbf(bodies,cells,1);
rbf(bodies,cells,0);
}
void gridVelocity(Bodies &bodies, Bodies &bodies2, Cells &cells) {
Bodies jbodies = bodies;
for( B_iter B=bodies2.begin(); B!=bodies2.end(); ++B ) B->TRG = 0;
setKernel("BiotSavart");
cells.clear();
Cells jcells;
bottomup(bodies2,cells);
bottomup(jbodies,jcells);
commBodies(jcells);
commCells(jbodies,jcells);
downward(cells,jcells,1);
}
void initialError(Bodies bodies) {
char fname[256];
int byte;
float dummy[3];
float u, v, w;
float diffSend = 0, normSend = 0, diffRecv, normRecv;
unpartition(bodies);
std::sort(bodies.begin(),bodies.end());
sprintf(fname,"initial%4.4d",nx);
std::ifstream fid(fname,std::ios::in|std::ios::binary);
for( int rank=0; rank!=MPISIZE; ++rank ) {
if( rank == MPIRANK ) {
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
int i = B-bodies.begin();
fid.read((char*)&byte,sizeof(int));
fid.read((char*)&u,sizeof(float));
fid.read((char*)&v,sizeof(float));
fid.read((char*)&w,sizeof(float));
fid.read((char*)&byte,sizeof(int));
diffSend += (bodies[i].TRG[0] - u) * (bodies[i].TRG[0] - u)
+ (bodies[i].TRG[1] - v) * (bodies[i].TRG[1] - v)
+ (bodies[i].TRG[2] - w) * (bodies[i].TRG[2] - w);
normSend += u * u + v * v + w * w;
}
} else {
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
fid.read((char*)&byte,sizeof(int));
fid.read((char*)dummy,byte);
fid.read((char*)&byte,sizeof(int));
}
}
}
fid.close();
MPI_Reduce(&diffSend,&diffRecv,1,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
MPI_Reduce(&normSend,&normRecv,1,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
print("Error : ",0);
print(sqrt(diffRecv/normRecv),0);
print("\n",0);
}
void statistics(Bodies bodies, float nu, float dt) {
unpartition(bodies);
std::sort(bodies.begin(),bodies.end());
initSpectrum();
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) realRecv[B-bodies.begin()] = B->TRG[0];
forwardFFT();
addSpectrum();
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) realRecv[B-bodies.begin()] = B->TRG[1];
forwardFFT();
addSpectrum();
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) realRecv[B-bodies.begin()] = B->TRG[2];
forwardFFT();
addSpectrum();
writeSpectrum();
float umax = 0;
float uu = 0, vv = 0, ww = 0, uw = 0, ds = 0;
float ux2 = 0, ux3 = 0, ux4 = 0;
float statSend[9], statRecv[9];
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
umax = std::max(umax,std::abs(B->TRG[0]));
umax = std::max(umax,std::abs(B->TRG[1]));
umax = std::max(umax,std::abs(B->TRG[2]));
uu += B->TRG[0] * B->TRG[0];
vv += B->TRG[1] * B->TRG[1];
ww += B->TRG[2] * B->TRG[2];
uw += B->TRG[0] * B->TRG[2];
realRecv[B-bodies.begin()] = B->TRG[0];
}
xDerivative();
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
float ux = realSend[B-bodies.begin()];
ds += ux * ux;
ux2 += ux * ux;
ux3 += ux * ux * ux;
ux4 += ux * ux * ux * ux;
realRecv[B-bodies.begin()] = B->TRG[0];
}
yDerivative();
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
float uy = realSend[B-bodies.begin()];
ds += uy * uy;
realRecv[B-bodies.begin()] = B->TRG[0];
}
zDerivative();
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
float uz = realSend[B-bodies.begin()];
ds += uz * uz;
realRecv[B-bodies.begin()] = B->TRG[1];
}
xDerivative();
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
float vx = realSend[B-bodies.begin()];
ds += vx * vx;
realRecv[B-bodies.begin()] = B->TRG[1];
}
yDerivative();
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
float vy = realSend[B-bodies.begin()];
ds += vy * vy;
realRecv[B-bodies.begin()] = B->TRG[1];
}
zDerivative();
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
float vz = realSend[B-bodies.begin()];
ds += vz * vz;
realRecv[B-bodies.begin()] = B->TRG[2];
}
xDerivative();
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
float wx = realSend[B-bodies.begin()];
ds += wx * wx;
realRecv[B-bodies.begin()] = B->TRG[2];
}
yDerivative();
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
float wy = realSend[B-bodies.begin()];
ds += wy * wy;
realRecv[B-bodies.begin()] = B->TRG[2];
}
zDerivative();
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
float wz = realSend[B-bodies.begin()];
ds += wz * wz;
}
statSend[0] = umax;
statSend[1] = uu;
statSend[2] = vv;
statSend[3] = ww;
statSend[4] = uw;
statSend[5] = ds;
statSend[6] = ux2;
statSend[7] = ux3;
statSend[8] = ux4;
MPI_Reduce(&statSend,&statRecv,9,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
if( MPIRANK == 0 ) {
umax = statRecv[0] / MPISIZE;
uu = statRecv[1];
vv = statRecv[2];
ww = statRecv[3];
uw = statRecv[4];
ds = statRecv[5];
ux2 = statRecv[6];
ux3 = statRecv[7];
ux4 = statRecv[8];
float ek = uu + vv + ww;
uu /= ek;
vv /= ek;
ww /= ek;
uw /= ek;
ek /= 2 * numGlobal;
ds *= nu / numGlobal / 4;
ux2 /= numGlobal;
ux3 /= numGlobal;
ux4 /= numGlobal;
float sk = ux3 / std::pow(ux2,1.5);
float fl = ux4 / ux2 / ux2;
float ret = ek * ek / nu / ds;
float rel = sqrt(20 * ret / 3);
float cfl = umax * dt / dx;
std::ofstream fid("statistics.dat",std::ios::out | std::ios::app);
fid << ek << std::endl;
fid << ds << std::endl;
fid << sk << std::endl;
fid << fl << std::endl;
fid << cfl << std::endl;
fid << ret << std::endl;
fid << rel << std::endl;
fid << uu << std::endl;
fid << vv << std::endl;
fid << ww << std::endl;
fid << uw << std::endl;
fid.close();
std::cout << "-------------------------------\n";
std::cout << "| energy : " << ek << std::endl;
std::cout << "| dissipation : " << ds << std::endl;
std::cout << "| skewness : " << sk << std::endl;
std::cout << "| flatness : " << fl << std::endl;
std::cout << "| CFL : " << cfl << std::endl;
std::cout << "| Re T : " << ret << std::endl;
std::cout << "| Re lambda : " << rel << std::endl;
std::cout << "| uu : " << uu << std::endl;
std::cout << "| vv : " << vv << std::endl;
std::cout << "| ww : " << ww << std::endl;
std::cout << "| uw : " << uw << std::endl;
std::cout << "-------------------------------\n";
}
}
void BiotSavart(Bodies &bodies, Cells &cells) {
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) B->TRG = 0;
setKernel("BiotSavart");
cells.clear();
octsection(bodies);
bottomup(bodies,cells);
setCommBodies(cells);
Bodies jbodies;
Cells jcells = cells;
if( MPISIZE != 1 ) {
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
{
downward(cells,jcells,1,false);
}
#pragma omp section
{
updateBodies();
}
}
jbodies = bodies;
jcells = cells;
commCells(jbodies,jcells);
eraseLocalTree(jcells);
}
downward(cells,jcells,1);
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
B->dxdt[0] = B->TRG[0];
B->dxdt[1] = B->TRG[1];
B->dxdt[2] = B->TRG[2];
}
}
void Stretching(Bodies &bodies, Cells &cells) {
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) B->TRG = 0;
setKernel("Stretching");
evalP2M(cells);
evalM2M(cells);
Bodies jbodies;
Cells jcells = cells;
if( MPISIZE != 1 ) {
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
{
downward(cells,jcells,1,false);
}
#pragma omp section
{
updateBodies();
}
}
jbodies = bodies;
jcells = cells;
commCells(jbodies,jcells);
eraseLocalTree(jcells);
}
downward(cells,jcells,1);
}
void update(Bodies &bodies, float nu, float dt) {
if( initial ) {
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
B->dxdt2 = B->dxdt;
B->dgdt[0] = B->TRG[0];
B->dgdt[1] = B->TRG[1];
B->dgdt[2] = B->TRG[2];
}
}
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
B->X[0] += (1.5 * B->dxdt[0] - 0.5 * B->dxdt2[0]) * dt;
B->X[1] += (1.5 * B->dxdt[1] - 0.5 * B->dxdt2[1]) * dt;
B->X[2] += (1.5 * B->dxdt[2] - 0.5 * B->dxdt2[2]) * dt;
for( int d=0; d!=3; ++d ) {
if( B->X[d] < -M_PI ) {
B->X[d] += 2 * M_PI;
} else if( M_PI < B->X[d] ) {
B->X[d] -= 2 * M_PI;
}
}
B->SRC[0] += (1.5 * B->TRG[0] - 0.5 * B->dgdt[0]) * dt;
B->SRC[1] += (1.5 * B->TRG[1] - 0.5 * B->dgdt[1]) * dt;
B->SRC[2] += (1.5 * B->TRG[2] - 0.5 * B->dgdt[2]) * dt;
B->SRC[3] += nu / B->SRC[3] * dt;
}
initial = false;
}
void reinitialize(Bodies &bodies, Cells &cells) {
Bodies jbodies = bodies;
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
int ix = B->IBODY % nx;
int iy = B->IBODY / nx % nx;
int iz = B->IBODY / nx / nx;
B->X[0] = (ix + .5) * dx - M_PI;
B->X[1] = (iy + .5) * dx - M_PI;
B->X[2] = (iz + .5) * dx - M_PI;
B->TRG = 0;
}
setKernel("Gaussian");
cells.clear();
Cells jcells;
octsection(bodies);
bottomup(bodies,cells);
bottomup(jbodies,jcells);
commBodies(jcells);
commCells(jbodies,jcells);
int numCells = jcells.size();
downward(cells,jcells,1);
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
B->TRG[1] = B->TRG[0];
B->TRG[0] = 0;
}
jcells.resize(numCells);
for( B_iter B=jbodies.begin(); B!=jbodies.end(); ++B ) {
B->SRC[0] = B->SRC[1];
}
downward(cells,jcells,1);
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
B->TRG[2] = B->TRG[0];
B->TRG[0] = 0;
}
jcells.resize(numCells);
for( B_iter B=jbodies.begin(); B!=jbodies.end(); ++B ) {
B->SRC[0] = B->SRC[2];
}
downward(cells,jcells,1);
for( B_iter B=bodies.begin(); B!=bodies.end(); ++B ) {
B->TRG[3] = B->TRG[0];
B->SRC[3] = dx;
}
rbf(bodies,cells,2);
rbf(bodies,cells,1);
rbf(bodies,cells,0);
initial = true;
}
};
#endif
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 8;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
OctTree.c | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#include "ompfuncs.h"
#endif
#include "OctTree.h"
#include "libgad.h"
static int num_threads=1;
static int periodic = 0;
static float boxsize = 0;
OctNode * initNode ()
{
OctNode *onode;
int i, j;
onode = (OctNode*) malloc(sizeof(OctNode));
onode->numpart = 0;
for (j = 0; j < 3; j++)
onode->center[j] = 0;
onode->radius = 0;
onode->part = NULL;
for ( i=0; i<8; i++)
{
onode->child[i] = NULL;
}
return onode;
}
void delOctNode (OctNode *onode)
{
int i;
for ( i = 0; i < 8; i++ )
{
if (onode->child[i]!=NULL)
delOctNode(onode->child[i]);
}
if (onode->numpart)
{
free(onode->part);
}
free(onode);
}
#ifdef _OPENMP
void set_num_threads()
{
#pragma omp parallel //private(num_threads)
{
int ithread= omp_get_thread_num();
if (ithread == 0)
{
num_threads = omp_get_num_threads();
// printf("Number of Threads %d\n", num_threads);
}
}
}
#endif
void set_periodic_boundaries(struct header head)
{
periodic =1;
boxsize = head.boxsize;
}
int buildTreeBox(OctNode **onode, gadpart *part, struct header head, int maxparticles, int maxdepth)
{
*onode = initNode();
float radius = head.boxsize/2.;
fltarr center={radius, radius, radius};
unsigned int numpart_all = 0;
int i;
for ( i=0; i<6; i++)
{
numpart_all += head.npart[i];
}
gadpart **pnt_part = (gadpart**) malloc (sizeof(gadpart*) * numpart_all);
#pragma omp parallel for
for ( i = 0; i < numpart_all; i++)
{
pnt_part[i] = &part[i];
}
set_num_threads();
int ret_val;
ret_val = buildTree(*onode, pnt_part, numpart_all, center, radius, maxparticles, maxdepth, 0);
// free(pnt_part);
return ret_val;
}
int buildTree(OctNode *onode, gadpart **part, unsigned int count, fltarr center, float radius, unsigned int leafsize, unsigned int maxDepth, unsigned int currentDepth )
{
#ifdef DEBUG
printf("Octree.buildtree: count %d\t| center %f %f %f | radius %f | currentDepth %d \n", count, center[0], center[1], center[2], radius, currentDepth);
fflush(stdout);
#endif
int j;
for (j=0; j<3; j++)
onode -> center[j] = center[j];
onode -> radius = radius;
if (count <= leafsize || currentDepth >= maxDepth)
{
int i;
onode -> numpart = count;
onode -> part = (gadpart **) malloc (sizeof(gadpart*) * count);
for ( i = 0; i < count; i++ )
{
onode -> part[i] = part[i];
}
free(part);
return 1;
}
int i;
unsigned int *childPointCounts[8];
unsigned int *code = (int *) calloc (count, sizeof(int));
#pragma omp parallel for private(i)
for ( i = 0; i < 8; i++ )
{
childPointCounts[i] = (int*) calloc (num_threads, sizeof(int));
}
#pragma omp parallel for private(i)
for ( i = 0; i < count; i++ )
{
int ithread = 0;
#ifdef _OPENMP
ithread = omp_get_thread_num();
#endif
code[i] = 0;
if (part[i]->pos[0] > center[0]) code[i] |= 1;
if (part[i]->pos[1] > center[1]) code[i] |= 2;
if (part[i]->pos[2] > center[2]) code[i] |= 4;
childPointCounts[code[i]][ithread]++;
// if (code[i]==7)
// {
// printf("pos2 %g code %d | ithread %d\n", part[i]->pos[0], code[i], ithread);fflush(stdout);
// printf("cpc 7: %d\n", childPointCounts[7][0]);fflush(stdout);
// }
}
// printf("num_treads %d\n", num_threads);fflush(stdout);
for ( i = 0; i < 8; i++ )
{
for ( j = 1; j<num_threads; j++)
{
childPointCounts[i][0] += childPointCounts[i][j];
// printf("childPointcount %d %d | %d\n", i, j, childPointCounts[i][j]);fflush(stdout);
}
// printf("childPointcount0 %d\n", childPointCounts[i][0]);fflush(stdout);
}
gadpart **newlist[8];
unsigned int newcount[8];
#pragma omp parallel for // firstprivate(radius)
for ( i = 0; i < 8; i++ )
{
newcount[i] = 0;
if (!childPointCounts[i][0]) continue;
onode->child[i] = initNode();
newlist[i] = (gadpart**) malloc (sizeof(gadpart*) * childPointCounts[i][0]);
if (newlist==NULL)
{
fprintf(stderr, "OctTree: fail to allocate memory in buildtree!\n");
exit(1);
}
int n;
for ( n = 0; n < count; n++)
{
if (code[n]==i)
{
newlist[i][newcount[i]++] = part[n];
}
}
}
free(part);
#pragma omp parallel for // firstprivate(radius)
for ( i = 0; i < 8; i++ )
{
if (childPointCounts[i][0])
{
int n;
fltarr newcenter = {-0.5,-0.5,-0.5};
if (i & 1) newcenter[0] = 0.5;
if (i & 2) newcenter[1] = 0.5;
if (i & 4) newcenter[2] = 0.5;
for ( n = 0; n < 3; n++ )
newcenter[n] = center[n] + newcenter[n] * radius;
float newradius = radius * 0.5;
buildTree(onode->child[i], newlist[i], newcount[i], newcenter, newradius, leafsize, maxDepth, currentDepth+1);
// free(newlist[i]);
}
free(childPointCounts[i]);
}
free(code);
return 1;
}
int checkOctTree(OctNode *onode)
{
if (onode->numpart)
return onode->numpart;
int sum=0;
int i;
for ( i = 0; i < 8; i++ )
{
if (onode->child[i] != NULL)
sum += checkOctTree(onode->child[i]);
}
return sum;
}
float distOctNode(OctNode *onode, fltarr center)
{
float dist = 0;
int dim;
for ( dim=0; dim<3; dim++)
{
float dum=ABS(center[dim] - onode->center[dim]);
if (periodic)
dum = MIN(dum, boxsize-dum);
if (dum < onode->radius)
continue;
// if ((periodic) && (dum > (boxsize - onode->radius)) )
// continue;
dist += SQR(dum - onode->radius);
}
return sqrt(dist);
}
float distPart(gadpart *part, fltarr pos)
{
float dist = 0;
int dim;
for ( dim=0; dim<3; dim++)
{
float dum=ABS(pos[dim] - part->pos[dim]);
if (periodic)
dum = MIN(dum, boxsize-dum);
dist += SQR(dum);
}
return sqrt(dist);
}
unsigned long findParticles(OctNode *onode, fltarr pos, float dist, gadpart ***result, unsigned int *numpart, unsigned int *size)
{
if ( distOctNode(onode, pos) > dist )
return 0;
int i;
int sum = 0;
for ( i = 0; i < 8; i++ )
{
if (onode->child[i] == NULL)
continue;
sum += findParticles(onode->child[i], pos, dist, result, numpart, size);
}
int npart = 0;
if (onode->numpart)
{
if (*size==0)
{
// printf("allocating...\n");fflush(stdout);
*size = PBUFF;
*result = (gadpart**) calloc (*size, sizeof(gadpart*));
}
if ((*numpart+onode->numpart) > *size)
{
// printf("reallocating...\n");fflush(stdout);
while ( (*numpart+onode->numpart) > *size)
(*size)*=2;
*result = realloc(*result, (*size)*sizeof(gadpart*));
}
for ( i = 0; i < onode->numpart; i++ )
{
if ( distPart(onode->part[i], pos) < dist )
{
(*result)[(*numpart)++] = (onode->part[i]);
npart++;
}
}
}
return sum+npart;
}
OctNode * findLeaf(OctNode *onode, fltarr pos)
{
OctNode *node = onode;
while ( node->numpart == 0 )
{
int code = 0;
if (pos[0] > node->center[0]) code |= 1;
if (pos[1] > node->center[1]) code |= 2;
if (pos[2] > node->center[2]) code |= 4;
if ( node->child[code] == NULL )
{
return node;
}
node = node->child[code];
}
return node;
}
void rejectNodes(OctNode *onode, int atype ,int rtype, float reject_ratio)
{
int i,j;
if (onode->numpart)
{
float rejects = 0;
float accepts = 0;
for ( i = 0; i < onode->numpart; i++)
{
if ((1<<(onode->part[i]->type)) & atype )
accepts++;
if ((1<<(onode->part[i]->type)) & rtype )
rejects++;
}
if ((accepts == 0) && (rejects == 0))
{
return;
}
if ( rejects/(accepts+rejects) > reject_ratio )
{
onode->numpart = 0;
free(onode->part);
}
return;
}
#pragma omp parallel for // firstprivate(radius)
for ( i = 0; i < 8; i++ )
{
if (onode->child[i] == NULL)
continue;
rejectNodes(onode->child[i], atype, rtype, reject_ratio);
}
return;
}
|
BatchNormalization.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/BatchNormalization.c"
#else
void THNN_(BatchNormalization_updateOutput)(
THNNState *state, THTensor *input, THTensor *output,
THTensor *weight, THTensor *bias,
THTensor *running_mean, THTensor *running_var,
THTensor *save_mean, THTensor *save_std,
bool train, double momentum, double eps)
{
THTensor_(resizeAs)(output, input);
int64_t nInput = THTensor_(size)(input, 1);
int64_t f;
ptrdiff_t n = THTensor_(nElement)(input) / nInput;
#pragma omp parallel for
for (f = 0; f < nInput; ++f) {
THTensor *in = THTensor_(newSelect)(input, 1, f);
THTensor *out = THTensor_(newSelect)(output, 1, f);
real mean, invstd;
if (train) {
// compute mean per input
accreal sum = 0;
TH_TENSOR_APPLY(real, in, sum += *in_data;);
mean = (real) sum / n;
THTensor_(set1d)(save_mean, f, (real) mean);
// compute variance per input
sum = 0;
TH_TENSOR_APPLY(real, in,
sum += (*in_data - mean) * (*in_data - mean););
if (sum == 0 && eps == 0.0) {
invstd = 0;
} else {
invstd = (real) (1 / sqrt(sum/n + eps));
}
THTensor_(set1d)(save_std, f, (real) invstd);
// update running averages
THTensor_(set1d)(running_mean, f,
(real) (momentum * mean + (1 - momentum) * THTensor_(get1d)(running_mean, f)));
accreal unbiased_var = sum / (n - 1);
THTensor_(set1d)(running_var, f,
(real) (momentum * unbiased_var + (1 - momentum) * THTensor_(get1d)(running_var, f)));
} else {
mean = THTensor_(get1d)(running_mean, f);
invstd = 1 / sqrt(THTensor_(get1d)(running_var, f) + eps);
}
// compute output
real w = weight ? THTensor_(get1d)(weight, f) : 1;
real b = bias ? THTensor_(get1d)(bias, f) : 0;
TH_TENSOR_APPLY2(real, in, real, out,
*out_data = (real) (((*in_data - mean) * invstd) * w + b););
THTensor_(free)(out);
THTensor_(free)(in);
}
}
void THNN_(BatchNormalization_backward)(
THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput,
THTensor *gradWeight, THTensor *gradBias, THTensor *weight,
THTensor *running_mean, THTensor *running_var,
THTensor *save_mean, THTensor *save_std,
bool train, double scale, double eps)
{
THNN_CHECK_SHAPE(input, gradOutput);
int64_t nInput = THTensor_(size)(input, 1);
int64_t f;
ptrdiff_t n = THTensor_(nElement)(input) / nInput;
if (gradInput) {
THTensor_(resizeAs)(gradInput, input);
}
#pragma omp parallel for
for (f = 0; f < nInput; ++f) {
THTensor *in = THTensor_(newSelect)(input, 1, f);
THTensor *gradOut = THTensor_(newSelect)(gradOutput, 1, f);
real w = weight ? THTensor_(get1d)(weight, f) : 1;
real mean, invstd;
if (train) {
mean = THTensor_(get1d)(save_mean, f);
invstd = THTensor_(get1d)(save_std, f);
} else {
mean = THTensor_(get1d)(running_mean, f);
invstd = 1 / sqrt(THTensor_(get1d)(running_var, f) + eps);
}
// sum over all gradOutput in feature plane
accreal sum = 0;
TH_TENSOR_APPLY(real, gradOut, sum += *gradOut_data;);
// dot product of the Q(X) and gradOuput
accreal dotp = 0;
TH_TENSOR_APPLY2(real, in, real, gradOut,
dotp += (*in_data - mean) * (*gradOut_data););
if (gradInput) {
THTensor *gradIn = THTensor_(newSelect)(gradInput, 1, f);
if (train) {
// when in training mode
// Q(X) = X - E[x] ; i.e. input centered to zero mean
// Y = Q(X) / σ ; i.e. BN output before weight and bias
// dL/dX = (Q(dL/dY) - dot(Y, dL/dY) * Y) / σ * w
// projection of gradOutput on to output scaled by std
real k = (real) dotp * invstd * invstd / n;
TH_TENSOR_APPLY2(real, gradIn, real, in,
*gradIn_data = (*in_data - mean) * k;);
accreal gradMean = sum / n;
TH_TENSOR_APPLY2(real, gradIn, real, gradOut,
*gradIn_data = (*gradOut_data - gradMean - *gradIn_data) * invstd * w;);
} else {
// when in evaluation mode
// Q(X) = X - running_mean ; i.e. input centered to zero mean
// Y = Q(X) / running_std ; i.e. BN output before weight and bias
// dL/dX = w / running_std
TH_TENSOR_APPLY2(real, gradIn, real, gradOut,
*gradIn_data = *gradOut_data * invstd * w;);
}
THTensor_(free)(gradIn);
}
if (gradWeight) {
real val = THTensor_(get1d)(gradWeight, f);
THTensor_(set1d)(gradWeight, f, val + scale * dotp * invstd);
}
if (gradBias) {
real val = THTensor_(get1d)(gradBias, f);
THTensor_(set1d)(gradBias, f, val + scale * sum);
}
THTensor_(free)(gradOut);
THTensor_(free)(in);
}
}
#endif
|
GB_binop__band_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__band_uint16)
// A.*B function (eWiseMult): GB (_AemultB_01__band_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__band_uint16)
// A.*B function (eWiseMult): GB (_AemultB_03__band_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__band_uint16)
// A*D function (colscale): GB (_AxD__band_uint16)
// D*A function (rowscale): GB (_DxB__band_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__band_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__band_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__band_uint16)
// C=scalar+B GB (_bind1st__band_uint16)
// C=scalar+B' GB (_bind1st_tran__band_uint16)
// C=A+scalar GB (_bind2nd__band_uint16)
// C=A'+scalar GB (_bind2nd_tran__band_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = (aij) & (bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) & (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BAND || GxB_NO_UINT16 || GxB_NO_BAND_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__band_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__band_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__band_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__band_uint16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__band_uint16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__band_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__band_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__band_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__band_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__band_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__band_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) & (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__band_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) & (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) & (aij) ; \
}
GrB_Info GB (_bind1st_tran__band_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) & (y) ; \
}
GrB_Info GB (_bind2nd_tran__band_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
SpatialMaxUnpooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/SpatialMaxUnpooling.c"
#else
static void THNN_(SpatialMaxUnpooling_updateOutput_frame)(real *input_p, real *output_p,
THIndex_t *ind_p,
int nslices,
int iwidth, int iheight,
int owidth, int oheight)
{
int k;
int has_error = 0;
THIndex_t error_index = 0;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
real *output_p_k = output_p + k*owidth*oheight;
real *input_p_k = input_p + k*iwidth*iheight;
THIndex_t *ind_p_k = ind_p + k*iwidth*iheight;
int i, j;
THIndex_t maxp;
for(i = 0; i < iheight; i++)
{
for(j = 0; j < iwidth; j++)
{
maxp = ind_p_k[i*iwidth + j] - TH_INDEX_BASE; /* retrieve position of max */
if(maxp<0 || maxp>=owidth*oheight){
#pragma omp critical
{
has_error = 1;
error_index = maxp;
}
} else {
output_p_k[maxp] = input_p_k[i*iwidth + j]; /* update output */
}
}
}
}
if (has_error) {
THError("found an invalid max index %ld (output volumes are of size %dx%d)",
error_index, oheight, owidth);
}
}
void THNN_(SpatialMaxUnpooling_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THIndexTensor *indices,
int owidth, int oheight)
{
int dimw = 2;
int dimh = 1;
int nbatch = 1;
int nslices;
int iheight;
int iwidth;
real *input_data;
real *output_data;
THIndex_t *indices_data;
AT_CHECK(!input->is_empty() && (input->dim() == 3 || input->dim() == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input, but got sizes: ", input->sizes());
THNN_CHECK_SHAPE_INDICES(input, indices);
if (input->dim() == 4)
{
nbatch = input->size(0);
dimw++;
dimh++;
}
/* sizes */
nslices = input->size(dimh-1);
iheight = input->size(dimh);
iwidth = input->size(dimw);
/* get contiguous input and indices */
input = THTensor_(newContiguous)(input);
indices = THIndexTensor_(newContiguous)(indices);
/* resize output */
if (input->dim() == 3)
{
THTensor_(resize3d)(output, nslices, oheight, owidth);
THTensor_(zero)(output);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
indices_data = THIndexTensor_(data)(indices);
THNN_(SpatialMaxUnpooling_updateOutput_frame)(input_data, output_data,
indices_data,
nslices,
iwidth, iheight,
owidth, oheight);
}
else
{
int p;
THTensor_(resize4d)(output, nbatch, nslices, oheight, owidth);
THTensor_(zero)(output);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
indices_data = THIndexTensor_(data)(indices);
for (p = 0; p < nbatch; p++)
{
THNN_(SpatialMaxUnpooling_updateOutput_frame)(
input_data+p*nslices*iwidth*iheight,
output_data+p*nslices*owidth*oheight,
indices_data+p*nslices*iwidth*iheight,
nslices,
iwidth, iheight,
owidth, oheight);
}
}
/* cleanup */
THTensor_(free)(input);
THIndexTensor_(free)(indices);
}
static void THNN_(SpatialMaxUnpooling_updateGradInput_frame)(real *gradInput_p, real *gradOutput_p,
THIndex_t *ind_p,
int nslices,
int iwidth, int iheight,
int owidth, int oheight)
{
int k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
real *gradInput_p_k = gradInput_p + k*iwidth*iheight;
real *gradOutput_p_k = gradOutput_p + k*owidth*oheight;
THIndex_t *ind_p_k = ind_p + k*iwidth*iheight;
int i, j;
THIndex_t maxp;
for(i = 0; i < iheight; i++)
{
for(j = 0; j < iwidth; j++)
{
maxp = ind_p_k[i*iwidth + j] - TH_INDEX_BASE; /* retrieve position of max */
if(maxp < 0 || maxp >= owidth * oheight) {
THError("invalid max index %ld, owidth= %d, oheight= %d", maxp, owidth, oheight);
}
gradInput_p_k[i*iwidth + j] = gradOutput_p_k[maxp]; /* update gradient */
}
}
}
}
void THNN_(SpatialMaxUnpooling_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THIndexTensor *indices,
int owidth, int oheight)
{
int dimw = 2;
int dimh = 1;
int nbatch = 1;
int nslices;
int iheight;
int iwidth;
real *gradInput_data;
real *gradOutput_data;
THIndex_t *indices_data;
THNN_CHECK_SHAPE_INDICES(input, indices);
/* get contiguous gradOutput and indices */
gradOutput = THTensor_(newContiguous)(gradOutput);
indices = THIndexTensor_(newContiguous)(indices);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
if (input->dim() == 4) {
nbatch = input->size(0);
dimw++;
dimh++;
}
/* sizes */
nslices = input->size(dimh-1);
iheight = input->size(dimh);
iwidth = input->size(dimw);
if(owidth!=gradOutput->size(dimw) || oheight!=gradOutput->size(dimh)){
THError("Inconsistent gradOutput size. oheight= %d, owidth= %d, gradOutput: %dx%d",
oheight, owidth, gradOutput->size(dimh), gradOutput->size(dimw));
}
/* get raw pointers */
gradInput_data = THTensor_(data)(gradInput);
gradOutput_data = THTensor_(data)(gradOutput);
indices_data = THIndexTensor_(data)(indices);
/* backprop */
if (input->dim() == 3)
{
THNN_(SpatialMaxUnpooling_updateGradInput_frame)(gradInput_data, gradOutput_data,
indices_data,
nslices,
iwidth, iheight,
owidth, oheight);
}
else
{
int p;
for (p = 0; p < nbatch; p++)
{
THNN_(SpatialMaxUnpooling_updateGradInput_frame)(gradInput_data+p*nslices*iwidth*iheight, gradOutput_data+p*nslices*owidth*oheight,
indices_data+p*nslices*iwidth*iheight,
nslices,
iwidth, iheight,
owidth, oheight);
}
}
/* cleanup */
THTensor_(free)(gradOutput);
THIndexTensor_(free)(indices);
}
#endif
|
pmghosts.c | #include <string.h>
#include <math.h>
#include <mpi.h>
#include <fastpm/libfastpm.h>
#include <fastpm/logging.h>
#include "pmpfft.h"
#include "pmghosts.h"
#ifdef ENABLE_VALGRIND
#include </usr/include/valgrind/memcheck.h>
#endif
typedef void (*pm_iter_ghosts_func)(PM * pm, PMGhostData * ppd, void * userdata);
void pm_ghosts_free(PMGhostData * pgd) {
if(pgd->p) {
fastpm_store_destroy(pgd->p);
free(pgd->p);
}
fastpm_memory_free(pgd->pm->mem, pgd->ighost_to_ipar);
free(pgd->Nsend);
free(pgd->Osend);
free(pgd->Nrecv);
free(pgd->Orecv);
free(pgd);
}
static void
pm_iter_ghosts(PM * pm, PMGhostData * pgd,
pm_iter_ghosts_func iter_func, void * userdata)
{
ptrdiff_t i;
for (i = 0; i < pgd->source->np; i ++) {
PMGhostData localppd = *pgd;
double pos[3];
int rank;
fastpm_store_get_position(pgd->source, i, pos);
int d;
/* how far the window expands. */
int left[3];
int right[3];
for(d = 0; d < 3; d ++) {
/* this condition is not tightest for CIC painting, because
* a particle touches a cell doesn't mean cic touches the left edge
* of the cell.
* */
left[d] = floor(pos[d] * pm->InvCellSize[d] + pgd->Below[d]);
right[d] = floor(pos[d] * pm->InvCellSize[d] + pgd->Above[d]);
}
/* probe neighbours */
int j[3];
int ranks[1000];
int used = 0;
localppd.ipar = i;
/* no need to run the z loop because the decomposition is in xy */
for(j[2] = left[2]; j[2] <= right[2]; j[2] ++)
for(j[0] = left[0]; j[0] <= right[0]; j[0] ++)
for(j[1] = left[1]; j[1] <= right[1]; j[1] ++)
{
rank = pm_ipos_to_rank(pm, j);
if(LIKELY(rank == pm->ThisTask)) continue;
int ptr;
for(ptr = 0; ptr < used; ptr++) {
if(rank == ranks[ptr]) break;
}
if(UNLIKELY(ptr == used)) {
ranks[used++] = rank;
localppd.rank = rank;
localppd.reason = j;
iter_func(pm, &localppd, userdata);
}
}
}
}
static void
count_ghosts(PM * pm, PMGhostData * pgd, void * userdata)
{
#pragma omp atomic
pgd->Nsend[pgd->rank] ++;
}
static void
build_ghost_buffer(PM * pm, PMGhostData * pgd, void * userdata)
{
FastPMPackingPlan * plan = userdata;
int ighost;
int offset;
#pragma omp atomic capture
offset = pgd->Nsend[pgd->rank] ++;
ighost = pgd->Osend[pgd->rank] + offset;
fastpm_packing_plan_pack(plan, pgd->source, pgd->ipar,
(char*) pgd->send_buffer + ighost * plan->elsize);
pgd->ighost_to_ipar[ighost] = pgd->ipar;
}
/* create ghosts that can hold 'attributes';
* use pm_ghosts_send to send subsets of `attributes`;
* */
PMGhostData *
pm_ghosts_create(PM * pm, FastPMStore * p,
FastPMColumnTags attributes, int support)
{
/* The support of CIC is 2. We do not use
* -1.0000 * pm->CellSize[d] here
* because even though the kernel touches -1 * cellsize,
* we do not paint on the lower edge.
* */
double Below[3]; /* in grid integer units */
double Above[3];
int d;
for(d = 0; d < 3; d ++) {
Below[d] = - (support * 0.5 - 1);
Above[d] = (support * 0.5 );
}
return pm_ghosts_create_full(pm, p, attributes, Below, Above);
}
PMGhostData *
pm_ghosts_create_full(PM * pm, FastPMStore * p,
FastPMColumnTags attributes,
double below[],
double above[]
)
{
PMGhostData * pgd = malloc(sizeof(pgd[0]));
pgd->pm = pm;
pgd->source = p;
int d;
for(d = 0; d < 3; d++) {
pgd->Below[d] = below[d];
pgd->Above[d] = above[d];
}
pgd->ighost_to_ipar = NULL;
pgd->Nsend = calloc(pm->NTask, sizeof(int));
pgd->Osend = calloc(pm->NTask, sizeof(int));
pgd->Nrecv = calloc(pm->NTask, sizeof(int));
pgd->Orecv = calloc(pm->NTask, sizeof(int));
size_t Nsend;
size_t Nrecv;
memset(pgd->Nsend, 0, sizeof(pgd->Nsend[0]) * pm->NTask);
pm_iter_ghosts(pm, pgd, count_ghosts, NULL);
Nsend = cumsum(pgd->Osend, pgd->Nsend, pm->NTask);
MPI_Alltoall(pgd->Nsend, 1, MPI_INT, pgd->Nrecv, 1, MPI_INT, pm->Comm2D);
Nrecv = cumsum(pgd->Orecv, pgd->Nrecv, pm->NTask);
double nmin, nmax, nmean, nstd;
MPIU_stats(pm->Comm2D, Nsend, "<->s", &nmin, &nmean, &nmax, &nstd);
fastpm_info("Sending ghosts: min = %g max = %g mean = %g std = %g\n",
nmin, nmax, nmean, nstd);
MPIU_stats(pm->Comm2D, Nrecv, "<->s", &nmin, &nmean, &nmax, &nstd);
fastpm_info("Receiving ghosts: min = %g max = %g mean = %g std = %g\n",
nmin, nmax, nmean, nstd);
pgd->ighost_to_ipar = fastpm_memory_alloc(pm->mem, "Ghost2Par", Nsend * sizeof(int), FASTPM_MEMORY_HEAP);
pgd->p = malloc(sizeof(pgd->p[0]));
fastpm_store_init(pgd->p, pgd->source->name, Nrecv, attributes, FASTPM_MEMORY_HEAP);
memcpy(&pgd->p->meta, &pgd->source->meta, sizeof(pgd->source->meta));
return pgd;
}
void
pm_ghosts_has_ghosts(PMGhostData * pgd, uint8_t * has_ghosts)
{
size_t Nsend = cumsum(NULL, pgd->Nsend, pgd->pm->NTask);
ptrdiff_t i;
for(i = 0; i < pgd->source->np; i ++) {
has_ghosts[i] = 0;
}
for(i = 0; i < Nsend; i ++) {
has_ghosts[pgd->ighost_to_ipar[i]] = 1;
}
}
void
pm_ghosts_send(PMGhostData * pgd, FastPMColumnTags attributes)
{
PM * pm = pgd->pm;
ptrdiff_t i;
size_t Nsend;
size_t Nrecv;
Nsend = cumsum(pgd->Osend, pgd->Nsend, pm->NTask);
Nrecv = cumsum(pgd->Orecv, pgd->Nrecv, pm->NTask);
FastPMPackingPlan plan[1];
fastpm_packing_plan_init(plan, pgd->p, attributes);
pgd->send_buffer = fastpm_memory_alloc(pm->mem, "SendBuf", Nsend * plan->elsize, FASTPM_MEMORY_STACK);
pgd->recv_buffer = fastpm_memory_alloc(pm->mem, "RecvBuf", Nrecv * plan->elsize, FASTPM_MEMORY_STACK);
/* build buffer */
memset(pgd->Nsend, 0, sizeof(pgd->Nsend[0]) * pm->NTask);
pm_iter_ghosts(pm, pgd, build_ghost_buffer, plan);
/* exchange */
pgd->p->np = Nrecv;
MPI_Datatype GHOST_TYPE;
MPI_Type_contiguous(plan->elsize, MPI_BYTE, &GHOST_TYPE);
MPI_Type_commit(&GHOST_TYPE);
MPI_Alltoallv_sparse(pgd->send_buffer, pgd->Nsend, pgd->Osend, GHOST_TYPE,
pgd->recv_buffer, pgd->Nrecv, pgd->Orecv, GHOST_TYPE,
pm->Comm2D);
MPI_Type_free(&GHOST_TYPE);
#pragma omp parallel for
for(i = 0; i < Nrecv; i ++) {
fastpm_packing_plan_unpack(plan,
pgd->p, i,
(char*) pgd->recv_buffer + i * plan->elsize);
}
fastpm_memory_free(pm->mem, pgd->recv_buffer);
fastpm_memory_free(pm->mem, pgd->send_buffer);
}
void
pm_ghosts_reduce(PMGhostData * pgd, FastPMColumnTags attribute,
reduce_func reduce,
void * userdata
)
{
int ci = fastpm_store_find_column_id(pgd->p, attribute);
PM * pm = pgd->pm;
size_t Nsend = cumsum(NULL, pgd->Nsend, pm->NTask);
size_t Nrecv = cumsum(NULL, pgd->Nrecv, pm->NTask);
ptrdiff_t i;
size_t elsize;
elsize = pgd->p->_column_info[ci].elsize;
pgd->recv_buffer = fastpm_memory_alloc(pm->mem, "RecvBuf", Nrecv * elsize, FASTPM_MEMORY_STACK);
pgd->send_buffer = fastpm_memory_alloc(pm->mem, "SendBuf", Nsend * elsize, FASTPM_MEMORY_STACK);
#pragma omp parallel for
for(i = 0; i < pgd->p->np; i ++) {
pgd->p->_column_info[ci].pack(pgd->p, i, ci,
(char*) pgd->recv_buffer + i * elsize);
}
MPI_Datatype GHOST_TYPE;
MPI_Type_contiguous(elsize, MPI_BYTE, &GHOST_TYPE);
MPI_Type_commit(&GHOST_TYPE);
MPI_Alltoallv_sparse(pgd->recv_buffer, pgd->Nrecv, pgd->Orecv, GHOST_TYPE,
pgd->send_buffer, pgd->Nsend, pgd->Osend, GHOST_TYPE,
pm->Comm2D);
MPI_Type_free(&GHOST_TYPE);
FastPMStore q[1];
fastpm_store_init(q, pgd->p->name, Nsend, attribute, FASTPM_MEMORY_HEAP);
/* now reduce the attributes. */
int ighost;
/* this loop is not parallel because multiple ghosts can be for the same ipar,
* in which case we have a race condition.
* we can fix this by carefully working with ipar (it should / could be made sorted)
* but unlikly worth the effort.
* */
for(ighost = 0; ighost < Nsend; ighost ++) {
pgd->p->_column_info[ci].unpack(q, ighost, ci,
(char*) pgd->send_buffer + ighost * elsize);
}
for(ighost = 0; ighost < Nsend; ighost ++) {
reduce(q, ighost, pgd->source, pgd->ighost_to_ipar[ighost],
ci, userdata);
}
fastpm_store_destroy(q);
fastpm_memory_free(pm->mem, pgd->send_buffer);
fastpm_memory_free(pm->mem, pgd->recv_buffer);
}
|
CLHelper.h | //------------------------------------------
//--cambine:helper function for OpenCL
//--programmer: Jianbin Fang
//--date: 27/12/2010
//------------------------------------------
#ifndef _CL_HELPER_
#define _CL_HELPER_
#include <CL/cl.h>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
using std::string;
using std::ifstream;
using std::cerr;
using std::endl;
using std::cout;
//#pragma OPENCL EXTENSION cl_nv_compiler_options:enable
#define WORK_DIM 2 // work-items dimensions
struct oclHandleStruct {
cl_context context;
cl_device_id *devices;
cl_command_queue queue;
cl_program program;
cl_int cl_status;
std::string error_str;
std::vector<cl_kernel> kernel;
};
struct oclHandleStruct oclHandles;
char kernel_file[100] = "Kernels.cl";
int total_kernels = 2;
string kernel_names[2] = {"BFS_1", "BFS_2"};
int work_group_size = 512;
int device_id_inused = 0; // deviced id used (default : 0)
int read_kernel_file(const char* filename, uint8_t** data, size_t* size) {
if (nullptr == filename || nullptr == data || 0 == size)
return -1;
FILE* fp = fopen(filename, "r");
if (NULL == fp) {
fprintf(stderr, "Failed to load kernel.");
return -1;
}
fseek(fp , 0 , SEEK_END);
long fsize = ftell(fp);
rewind(fp);
*data = (uint8_t*)malloc(fsize);
*size = fread(*data, 1, fsize, fp);
fclose(fp);
return 0;
}
/*
* Converts the contents of a file into a string
*/
string FileToString(const string fileName) {
ifstream f(fileName.c_str(), ifstream::in | ifstream::binary);
try {
size_t size;
char *str;
string s;
if (f.is_open()) {
size_t fileSize;
f.seekg(0, ifstream::end);
size = fileSize = f.tellg();
f.seekg(0, ifstream::beg);
str = new char[size + 1];
if (!str)
throw(string("Could not allocate memory"));
f.read(str, fileSize);
f.close();
str[size] = '\0';
s = str;
delete[] str;
return s;
}
} catch (std::string msg) {
cerr << "Exception caught in FileToString(): " << msg << endl;
if (f.is_open())
f.close();
} catch (...) {
cerr << "Exception caught in FileToString()" << endl;
if (f.is_open())
f.close();
}
string errorMsg = "FileToString()::Error: Unable to open file " + fileName;
throw(errorMsg);
}
//---------------------------------------
// Read command line parameters
//
void _clCmdParams(int argc, char *argv[]) {
for (int i = 0; i < argc; ++i) {
switch (argv[i][1]) {
case 'g': //--g stands for size of work group
if (++i < argc) {
sscanf(argv[i], "%u", &work_group_size);
} else {
std::cerr << "Could not read argument after option " << argv[i - 1]
<< std::endl;
throw;
}
break;
case 'd': //--d stands for device id used in computaion
if (++i < argc) {
sscanf(argv[i], "%u", &device_id_inused);
} else {
std::cerr << "Could not read argument after option " << argv[i - 1]
<< std::endl;
throw;
}
break;
default:;
}
}
}
//---------------------------------------
// Initlize CL objects
//--description: there are 5 steps to initialize all the OpenCL objects needed
//--revised on 04/01/2011: get the number of devices and
// devices have no relationship with context
void _clInit() {
printf("_clInit()\n");
int DEVICE_ID_INUSED = device_id_inused;
cl_int resultCL;
oclHandles.context = NULL;
oclHandles.devices = NULL;
oclHandles.queue = NULL;
oclHandles.program = NULL;
cl_uint deviceListSize;
//-----------------------------------------------
//--cambine-1: find the available platforms and select one
cl_uint numPlatforms = 1;
cl_platform_id targetPlatform = NULL;
cl_platform_id *allPlatforms =
(cl_platform_id *)malloc(numPlatforms * sizeof(cl_platform_id));
resultCL = clGetPlatformIDs(numPlatforms, allPlatforms, NULL);
if (resultCL != CL_SUCCESS)
throw(string("InitCL()::Error: Getting platform ids (clGetPlatformIDs)"));
// Select the target platform. Default: first platform
targetPlatform = allPlatforms[0];
/*for (int i = 0; i < numPlatforms; i++)
{
char pbuff[128];
resultCL = clGetPlatformInfo( allPlatforms[i],
CL_PLATFORM_VENDOR,
sizeof(pbuff),
pbuff,
NULL);
if (resultCL != CL_SUCCESS)
throw (string("InitCL()::Error: Getting platform info (clGetPlatformInfo)"));
//printf("vedor is %s\n",pbuff);
}
free(allPlatforms);*/
//-----------------------------------------------
//--cambine-2: create an OpenCL context
/*cl_context_properties cprops[3] = { CL_CONTEXT_PLATFORM,
(cl_context_properties)targetPlatform, 0 };
oclHandles.context = clCreateContextFromType(cprops,
CL_DEVICE_TYPE_GPU,
NULL,
NULL,
&resultCL);
if ((resultCL != CL_SUCCESS) || (oclHandles.context == NULL))
throw (string("InitCL()::Error: Creating Context
(clCreateContextFromType)"));
//-----------------------------------------------
//--cambine-3: detect OpenCL devices
// First, get the size of device list
oclHandles.cl_status = clGetDeviceIDs(targetPlatform, CL_DEVICE_TYPE_GPU, 0,
NULL, &deviceListSize);
if(oclHandles.cl_status!=CL_SUCCESS){
throw(string("exception in _clInit -> clGetDeviceIDs"));
}
if (deviceListSize == 0)
throw(string("InitCL()::Error: No devices found."));
printf("OK1()\n");
//std::cout<<"device number:"<<deviceListSize<<std::endl;*/
// Now, allocate the device list
deviceListSize = 1;
oclHandles.devices =
(cl_device_id *)malloc(deviceListSize * sizeof(cl_device_id));
if (oclHandles.devices == 0)
throw(string("InitCL()::Error: Could not allocate memory."));
//* Next, get the device list data
oclHandles.cl_status =
clGetDeviceIDs(targetPlatform, CL_DEVICE_TYPE_DEFAULT, deviceListSize,
oclHandles.devices, NULL);
if (oclHandles.cl_status != CL_SUCCESS) {
throw(string("exception in _clInit -> clGetDeviceIDs-2"));
}
oclHandles.context = clCreateContext(NULL, deviceListSize, oclHandles.devices,
NULL, NULL, &resultCL);
if ((resultCL != CL_SUCCESS) || (oclHandles.context == NULL))
throw(string("InitCL()::Error: Creating Context (clCreateContext)"));
//-----------------------------------------------
//--cambine-4: Create an OpenCL command queue
oclHandles.queue = clCreateCommandQueue(
oclHandles.context, oclHandles.devices[DEVICE_ID_INUSED], 0, &resultCL);
//printf("resultCL=%d, queue=0x%x\n", resultCL, oclHandles.queue);
if ((resultCL != CL_SUCCESS) || (oclHandles.queue == NULL))
throw(string("InitCL()::Creating Command Queue. (clCreateCommandQueue)"));
//-----------------------------------------------
//--cambine-5: Load CL file, build CL program object, create CL kernel object
/*std::string source_str = FileToString(kernel_file);
const char * source = source_str.c_str();
size_t sourceSize[] = { source_str.length() };*/
//oclHandles.program = clCreateProgramWithBuiltInKernels(
// oclHandles.context, 1, &oclHandles.devices[DEVICE_ID_INUSED],
// "BFS_1;BFS_2", &resultCL);
/*oclHandles.program = clCreateProgramWithSource(oclHandles.context,
1,
&source,
sourceSize,
&resultCL);*/
// read kernel binary from file
uint8_t *kernel_bin = NULL;
size_t kernel_size;
cl_int binary_status = 0;
if (0 != read_kernel_file("kernel.pocl", &kernel_bin, &kernel_size))
std::abort();
oclHandles.program = clCreateProgramWithBinary(
oclHandles.context, 1, &oclHandles.devices[DEVICE_ID_INUSED], &kernel_size, (const uint8_t**)&kernel_bin, &binary_status, &resultCL);
free(kernel_bin);
if ((resultCL != CL_SUCCESS) || (oclHandles.program == NULL))
throw(string("InitCL()::Error: Loading Binary into cl_program. "
"(clCreateProgramWithBinary)"));
// insert debug information
// std::string options= "-cl-nv-verbose"; //Doesn't work on AMD machines
// options += " -cl-nv-opt-level=3";
resultCL = clBuildProgram(oclHandles.program, deviceListSize,
oclHandles.devices, NULL, NULL, NULL);
if ((resultCL != CL_SUCCESS) || (oclHandles.program == NULL)) {
cerr << "InitCL()::Error: In clBuildProgram" << endl;
size_t length;
resultCL = clGetProgramBuildInfo(oclHandles.program,
oclHandles.devices[DEVICE_ID_INUSED],
CL_PROGRAM_BUILD_LOG, 0, NULL, &length);
if (resultCL != CL_SUCCESS)
throw(string("InitCL()::Error: Getting Program build "
"info(clGetProgramBuildInfo)"));
char *buffer = (char *)malloc(length);
resultCL = clGetProgramBuildInfo(
oclHandles.program, oclHandles.devices[DEVICE_ID_INUSED],
CL_PROGRAM_BUILD_LOG, length, buffer, NULL);
if (resultCL != CL_SUCCESS)
throw(string("InitCL()::Error: Getting Program build "
"info(clGetProgramBuildInfo)"));
cerr << buffer << endl;
free(buffer);
throw(string("InitCL()::Error: Building Program (clBuildProgram)"));
}
// get program information in intermediate representation
#ifdef PTX_MSG
size_t binary_sizes[deviceListSize];
char *binaries[deviceListSize];
// figure out number of devices and the sizes of the binary for each device.
oclHandles.cl_status =
clGetProgramInfo(oclHandles.program, CL_PROGRAM_BINARY_SIZES,
sizeof(size_t) * deviceListSize, &binary_sizes, NULL);
if (oclHandles.cl_status != CL_SUCCESS) {
throw(string("--cambine:exception in _InitCL -> clGetProgramInfo-2"));
}
std::cout << "--cambine:" << binary_sizes << std::endl;
// copy over all of the generated binaries.
for (int i = 0; i < deviceListSize; i++)
binaries[i] = (char *)malloc(sizeof(char) * (binary_sizes[i] + 1));
oclHandles.cl_status =
clGetProgramInfo(oclHandles.program, CL_PROGRAM_BINARIES,
sizeof(char *) * deviceListSize, binaries, NULL);
if (oclHandles.cl_status != CL_SUCCESS) {
throw(string("--cambine:exception in _InitCL -> clGetProgramInfo-3"));
}
for (int i = 0; i < deviceListSize; i++)
binaries[i][binary_sizes[i]] = '\0';
std::cout << "--cambine:writing ptd information..." << std::endl;
FILE *ptx_file = fopen("cl.ptx", "w");
if (ptx_file == NULL) {
throw(string("exceptions in allocate ptx file."));
}
fprintf(ptx_file, "%s", binaries[DEVICE_ID_INUSED]);
fclose(ptx_file);
std::cout << "--cambine:writing ptd information done." << std::endl;
for (int i = 0; i < deviceListSize; i++)
free(binaries[i]);
#endif
for (int nKernel = 0; nKernel < total_kernels; nKernel++) {
/* get a kernel object handle for a kernel with the given name */
cl_kernel kernel = clCreateKernel(
oclHandles.program, (kernel_names[nKernel]).c_str(), &resultCL);
if ((resultCL != CL_SUCCESS) || (kernel == NULL)) {
string errorMsg = "InitCL()::Error: Creating Kernel (clCreateKernel) \"" +
kernel_names[nKernel] + "\"";
throw(errorMsg);
}
oclHandles.kernel.push_back(kernel);
}
// get resource alocation information
#ifdef RES_MSG
char *build_log;
size_t ret_val_size;
oclHandles.cl_status = clGetProgramBuildInfo(
oclHandles.program, oclHandles.devices[DEVICE_ID_INUSED],
CL_PROGRAM_BUILD_LOG, 0, NULL, &ret_val_size);
if (oclHandles.cl_status != CL_SUCCESS) {
throw(string("exceptions in _InitCL -> getting resource information"));
}
build_log = (char *)malloc(ret_val_size + 1);
oclHandles.cl_status = clGetProgramBuildInfo(
oclHandles.program, oclHandles.devices[DEVICE_ID_INUSED],
CL_PROGRAM_BUILD_LOG, ret_val_size, build_log, NULL);
if (oclHandles.cl_status != CL_SUCCESS) {
throw(string(
"exceptions in _InitCL -> getting resources allocation information-2"));
}
build_log[ret_val_size] = '\0';
std::cout << "--cambine:" << build_log << std::endl;
free(build_log);
#endif
}
//---------------------------------------
// release CL objects
void _clRelease() {
char errorFlag = false;
for (int nKernel = 0; nKernel < oclHandles.kernel.size(); nKernel++) {
if (oclHandles.kernel[nKernel] != NULL) {
cl_int resultCL = clReleaseKernel(oclHandles.kernel[nKernel]);
if (resultCL != CL_SUCCESS) {
cerr << "ReleaseCL()::Error: In clReleaseKernel" << endl;
errorFlag = true;
}
oclHandles.kernel[nKernel] = NULL;
printf("clReleaseKernel()\n");
}
}
if (oclHandles.program != NULL) {
cl_int resultCL = clReleaseProgram(oclHandles.program);
if (resultCL != CL_SUCCESS) {
cerr << "ReleaseCL()::Error: In clReleaseProgram" << endl;
errorFlag = true;
}
oclHandles.program = NULL;
printf("clReleaseProgram()\n");
}
if (oclHandles.queue != NULL) {
cl_int resultCL = clReleaseCommandQueue(oclHandles.queue);
if (resultCL != CL_SUCCESS) {
cerr << "ReleaseCL()::Error: In clReleaseCommandQueue" << endl;
errorFlag = true;
}
oclHandles.queue = NULL;
printf("clReleaseCommandQueue()\n");
}
if (oclHandles.context != NULL) {
cl_int resultCL = clReleaseContext(oclHandles.context);
if (resultCL != CL_SUCCESS) {
cerr << "ReleaseCL()::Error: In clReleaseContext" << endl;
errorFlag = true;
}
oclHandles.context = NULL;
printf("clReleaseContext()\n");
}
if (oclHandles.devices != NULL) {
cl_int resultCL = clReleaseDevice(oclHandles.devices[0]);
if (resultCL != CL_SUCCESS) {
cerr << "ReleaseCL()::Error: In clReleaseDevice" << endl;
errorFlag = true;
}
free(oclHandles.devices);
printf("clReleaseDevice()\n");
}
if (errorFlag)
throw(string("ReleaseCL()::Error encountered."));
}
//--------------------------------------------------------
//--cambine:create buffer and then copy data from host to device
cl_mem _clCreateAndCpyMem(int size, void *h_mem_source) throw(string) {
cl_mem d_mem;
d_mem = clCreateBuffer(oclHandles.context,
CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, size,
h_mem_source, &oclHandles.cl_status);
#ifdef ERRMSG
if (oclHandles.cl_status != CL_SUCCESS)
throw(string("excpetion in _clCreateAndCpyMem()"));
#endif
return d_mem;
}
//-------------------------------------------------------
//--cambine: create read only buffer for devices
//--date: 17/01/2011
cl_mem _clMallocRW(int size, void *h_mem_ptr) throw(string) {
cl_mem d_mem;
d_mem = clCreateBuffer(oclHandles.context,
CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR, size,
h_mem_ptr, &oclHandles.cl_status);
#ifdef ERRMSG
if (oclHandles.cl_status != CL_SUCCESS)
throw(string("excpetion in _clMallocRW"));
#endif
return d_mem;
}
//-------------------------------------------------------
//--cambine: create read and write buffer for devices
//--date: 17/01/2011
cl_mem _clMalloc(int size, void *h_mem_ptr) throw(string) {
cl_mem d_mem;
d_mem = clCreateBuffer(oclHandles.context,
CL_MEM_WRITE_ONLY | CL_MEM_COPY_HOST_PTR, size,
h_mem_ptr, &oclHandles.cl_status);
#ifdef ERRMSG
if (oclHandles.cl_status != CL_SUCCESS)
throw(string("excpetion in _clMalloc"));
#endif
return d_mem;
}
//-------------------------------------------------------
//--cambine: transfer data from host to device
//--date: 17/01/2011
void _clMemcpyH2D(cl_mem d_mem, int size, const void *h_mem_ptr) throw(string) {
oclHandles.cl_status = clEnqueueWriteBuffer(
oclHandles.queue, d_mem, CL_TRUE, 0, size, h_mem_ptr, 0, NULL, NULL);
#ifdef ERRMSG
if (oclHandles.cl_status != CL_SUCCESS)
throw(string("excpetion in _clMemcpyH2D"));
#endif
}
//--------------------------------------------------------
//--cambine:create buffer and then copy data from host to device with pinned
// memory
cl_mem _clCreateAndCpyPinnedMem(int size, float *h_mem_source) throw(string) {
cl_mem d_mem, d_mem_pinned;
float *h_mem_pinned = NULL;
d_mem_pinned = clCreateBuffer(oclHandles.context,
CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, size,
NULL, &oclHandles.cl_status);
#ifdef ERRMSG
if (oclHandles.cl_status != CL_SUCCESS)
throw(string("excpetion in _clCreateAndCpyMem()->d_mem_pinned"));
#endif
//------------
d_mem = clCreateBuffer(oclHandles.context, CL_MEM_READ_ONLY, size, NULL,
&oclHandles.cl_status);
#ifdef ERRMSG
if (oclHandles.cl_status != CL_SUCCESS)
throw(string("excpetion in _clCreateAndCpyMem() -> d_mem "));
#endif
//----------
h_mem_pinned = (cl_float *)clEnqueueMapBuffer(
oclHandles.queue, d_mem_pinned, CL_TRUE, CL_MAP_WRITE, 0, size, 0, NULL,
NULL, &oclHandles.cl_status);
#ifdef ERRMSG
if (oclHandles.cl_status != CL_SUCCESS)
throw(string("excpetion in _clCreateAndCpyMem() -> clEnqueueMapBuffer"));
#endif
int element_number = size / sizeof(float);
#pragma omp parallel for
for (int i = 0; i < element_number; i++) {
h_mem_pinned[i] = h_mem_source[i];
}
//----------
oclHandles.cl_status = clEnqueueWriteBuffer(
oclHandles.queue, d_mem, CL_TRUE, 0, size, h_mem_pinned, 0, NULL, NULL);
#ifdef ERRMSG
if (oclHandles.cl_status != CL_SUCCESS)
throw(string("excpetion in _clCreateAndCpyMem() -> clEnqueueWriteBuffer"));
#endif
return d_mem;
}
//--------------------------------------------------------
//--cambine:create write only buffer on device
cl_mem _clMallocWO(int size) throw(string) {
cl_mem d_mem;
d_mem = clCreateBuffer(oclHandles.context, CL_MEM_WRITE_ONLY, size, 0,
&oclHandles.cl_status);
#ifdef ERRMSG
if (oclHandles.cl_status != CL_SUCCESS)
throw(string("excpetion in _clCreateMem()"));
#endif
return d_mem;
}
//--------------------------------------------------------
// transfer data from device to host
void _clMemcpyD2H(cl_mem d_mem, int size, void *h_mem) throw(string) {
oclHandles.cl_status = clEnqueueReadBuffer(oclHandles.queue, d_mem, CL_TRUE,
0, size, h_mem, 0, 0, 0);
#ifdef ERRMSG
oclHandles.error_str = "excpetion in _clCpyMemD2H -> ";
switch (oclHandles.cl_status) {
case CL_INVALID_COMMAND_QUEUE:
oclHandles.error_str += "CL_INVALID_COMMAND_QUEUE";
break;
case CL_INVALID_CONTEXT:
oclHandles.error_str += "CL_INVALID_CONTEXT";
break;
case CL_INVALID_MEM_OBJECT:
oclHandles.error_str += "CL_INVALID_MEM_OBJECT";
break;
case CL_INVALID_VALUE:
oclHandles.error_str += "CL_INVALID_VALUE";
break;
case CL_INVALID_EVENT_WAIT_LIST:
oclHandles.error_str += "CL_INVALID_EVENT_WAIT_LIST";
break;
case CL_MEM_OBJECT_ALLOCATION_FAILURE:
oclHandles.error_str += "CL_MEM_OBJECT_ALLOCATION_FAILURE";
break;
case CL_OUT_OF_HOST_MEMORY:
oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY";
break;
default:
oclHandles.error_str += "Unknown reason";
break;
}
if (oclHandles.cl_status != CL_SUCCESS)
throw(oclHandles.error_str);
#endif
}
//--------------------------------------------------------
// set kernel arguments
void _clSetArgs(int kernel_id, int arg_idx, void *d_mem,
int size = 0) throw(string) {
if (!size) {
oclHandles.cl_status = clSetKernelArg(oclHandles.kernel[kernel_id], arg_idx,
sizeof(d_mem), &d_mem);
#ifdef ERRMSG
oclHandles.error_str = "excpetion in _clSetKernelArg() ";
switch (oclHandles.cl_status) {
case CL_INVALID_KERNEL:
oclHandles.error_str += "CL_INVALID_KERNEL";
break;
case CL_INVALID_ARG_INDEX:
oclHandles.error_str += "CL_INVALID_ARG_INDEX";
break;
case CL_INVALID_ARG_VALUE:
oclHandles.error_str += "CL_INVALID_ARG_VALUE";
break;
case CL_INVALID_MEM_OBJECT:
oclHandles.error_str += "CL_INVALID_MEM_OBJECT";
break;
case CL_INVALID_SAMPLER:
oclHandles.error_str += "CL_INVALID_SAMPLER";
break;
case CL_INVALID_ARG_SIZE:
oclHandles.error_str += "CL_INVALID_ARG_SIZE";
break;
case CL_OUT_OF_RESOURCES:
oclHandles.error_str += "CL_OUT_OF_RESOURCES";
break;
case CL_OUT_OF_HOST_MEMORY:
oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY";
break;
default:
oclHandles.error_str += "Unknown reason";
break;
}
if (oclHandles.cl_status != CL_SUCCESS)
throw(oclHandles.error_str);
#endif
} else {
oclHandles.cl_status =
clSetKernelArg(oclHandles.kernel[kernel_id], arg_idx, size, d_mem);
#ifdef ERRMSG
oclHandles.error_str = "excpetion in _clSetKernelArg() ";
switch (oclHandles.cl_status) {
case CL_INVALID_KERNEL:
oclHandles.error_str += "CL_INVALID_KERNEL";
break;
case CL_INVALID_ARG_INDEX:
oclHandles.error_str += "CL_INVALID_ARG_INDEX";
break;
case CL_INVALID_ARG_VALUE:
oclHandles.error_str += "CL_INVALID_ARG_VALUE";
break;
case CL_INVALID_MEM_OBJECT:
oclHandles.error_str += "CL_INVALID_MEM_OBJECT";
break;
case CL_INVALID_SAMPLER:
oclHandles.error_str += "CL_INVALID_SAMPLER";
break;
case CL_INVALID_ARG_SIZE:
oclHandles.error_str += "CL_INVALID_ARG_SIZE";
break;
case CL_OUT_OF_RESOURCES:
oclHandles.error_str += "CL_OUT_OF_RESOURCES";
break;
case CL_OUT_OF_HOST_MEMORY:
oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY";
break;
default:
oclHandles.error_str += "Unknown reason";
break;
}
if (oclHandles.cl_status != CL_SUCCESS)
throw(oclHandles.error_str);
#endif
}
}
void _clFinish() throw(string) {
oclHandles.cl_status = clFinish(oclHandles.queue);
#ifdef ERRMSG
oclHandles.error_str = "excpetion in _clFinish";
switch (oclHandles.cl_status) {
case CL_INVALID_COMMAND_QUEUE:
oclHandles.error_str += "CL_INVALID_COMMAND_QUEUE";
break;
case CL_OUT_OF_RESOURCES:
oclHandles.error_str += "CL_OUT_OF_RESOURCES";
break;
case CL_OUT_OF_HOST_MEMORY:
oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY";
break;
default:
oclHandles.error_str += "Unknown reasons";
break;
}
if (oclHandles.cl_status != CL_SUCCESS) {
throw(oclHandles.error_str);
}
#endif
}
//--------------------------------------------------------
//--cambine:enqueue kernel
void _clInvokeKernel(int kernel_id, int work_items,
int work_group_size) throw(string) {
cl_uint work_dim = WORK_DIM;
//cl_event e[1];
if (work_items % work_group_size != 0) // process situations that work_items
// cannot be divided by work_group_size
work_items =
work_items + (work_group_size - (work_items % work_group_size));
size_t local_work_size[] = {work_group_size, 1};
size_t global_work_size[] = {work_items, 1};
oclHandles.cl_status = clEnqueueNDRangeKernel(
oclHandles.queue, oclHandles.kernel[kernel_id], work_dim, 0,
global_work_size, local_work_size, 0, 0, NULL);
#ifdef ERRMSG
oclHandles.error_str = "excpetion in _clInvokeKernel() -> ";
switch (oclHandles.cl_status) {
case CL_INVALID_PROGRAM_EXECUTABLE:
oclHandles.error_str += "CL_INVALID_PROGRAM_EXECUTABLE";
break;
case CL_INVALID_COMMAND_QUEUE:
oclHandles.error_str += "CL_INVALID_COMMAND_QUEUE";
break;
case CL_INVALID_KERNEL:
oclHandles.error_str += "CL_INVALID_KERNEL";
break;
case CL_INVALID_CONTEXT:
oclHandles.error_str += "CL_INVALID_CONTEXT";
break;
case CL_INVALID_KERNEL_ARGS:
oclHandles.error_str += "CL_INVALID_KERNEL_ARGS";
break;
case CL_INVALID_WORK_DIMENSION:
oclHandles.error_str += "CL_INVALID_WORK_DIMENSION";
break;
case CL_INVALID_GLOBAL_WORK_SIZE:
oclHandles.error_str += "CL_INVALID_GLOBAL_WORK_SIZE";
break;
case CL_INVALID_WORK_GROUP_SIZE:
oclHandles.error_str += "CL_INVALID_WORK_GROUP_SIZE";
break;
case CL_INVALID_WORK_ITEM_SIZE:
oclHandles.error_str += "CL_INVALID_WORK_ITEM_SIZE";
break;
case CL_INVALID_GLOBAL_OFFSET:
oclHandles.error_str += "CL_INVALID_GLOBAL_OFFSET";
break;
case CL_OUT_OF_RESOURCES:
oclHandles.error_str += "CL_OUT_OF_RESOURCES";
break;
case CL_MEM_OBJECT_ALLOCATION_FAILURE:
oclHandles.error_str += "CL_MEM_OBJECT_ALLOCATION_FAILURE";
break;
case CL_INVALID_EVENT_WAIT_LIST:
oclHandles.error_str += "CL_INVALID_EVENT_WAIT_LIST";
break;
case CL_OUT_OF_HOST_MEMORY:
oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY";
break;
default:
oclHandles.error_str += "Unkown reseason";
break;
}
if (oclHandles.cl_status != CL_SUCCESS)
throw(oclHandles.error_str);
#endif
//_clFinish();
// oclHandles.cl_status = clWaitForEvents(1, &e[0]);
// #ifdef ERRMSG
// if (oclHandles.cl_status!= CL_SUCCESS)
// throw(string("excpetion in _clEnqueueNDRange() -> clWaitForEvents"));
// #endif
}
void _clInvokeKernel2D(int kernel_id, int range_x, int range_y, int group_x,
int group_y) throw(string) {
cl_uint work_dim = WORK_DIM;
size_t local_work_size[] = {group_x, group_y};
size_t global_work_size[] = {range_x, range_y};
//cl_event e[1];
/*if(work_items%work_group_size != 0) //process situations that work_items
cannot be divided by work_group_size
work_items = work_items + (work_group_size-(work_items%work_group_size));*/
oclHandles.cl_status = clEnqueueNDRangeKernel(
oclHandles.queue, oclHandles.kernel[kernel_id], work_dim, 0,
global_work_size, local_work_size, 0, 0, NULL);
#ifdef ERRMSG
oclHandles.error_str = "excpetion in _clInvokeKernel() -> ";
switch (oclHandles.cl_status) {
case CL_INVALID_PROGRAM_EXECUTABLE:
oclHandles.error_str += "CL_INVALID_PROGRAM_EXECUTABLE";
break;
case CL_INVALID_COMMAND_QUEUE:
oclHandles.error_str += "CL_INVALID_COMMAND_QUEUE";
break;
case CL_INVALID_KERNEL:
oclHandles.error_str += "CL_INVALID_KERNEL";
break;
case CL_INVALID_CONTEXT:
oclHandles.error_str += "CL_INVALID_CONTEXT";
break;
case CL_INVALID_KERNEL_ARGS:
oclHandles.error_str += "CL_INVALID_KERNEL_ARGS";
break;
case CL_INVALID_WORK_DIMENSION:
oclHandles.error_str += "CL_INVALID_WORK_DIMENSION";
break;
case CL_INVALID_GLOBAL_WORK_SIZE:
oclHandles.error_str += "CL_INVALID_GLOBAL_WORK_SIZE";
break;
case CL_INVALID_WORK_GROUP_SIZE:
oclHandles.error_str += "CL_INVALID_WORK_GROUP_SIZE";
break;
case CL_INVALID_WORK_ITEM_SIZE:
oclHandles.error_str += "CL_INVALID_WORK_ITEM_SIZE";
break;
case CL_INVALID_GLOBAL_OFFSET:
oclHandles.error_str += "CL_INVALID_GLOBAL_OFFSET";
break;
case CL_OUT_OF_RESOURCES:
oclHandles.error_str += "CL_OUT_OF_RESOURCES";
break;
case CL_MEM_OBJECT_ALLOCATION_FAILURE:
oclHandles.error_str += "CL_MEM_OBJECT_ALLOCATION_FAILURE";
break;
case CL_INVALID_EVENT_WAIT_LIST:
oclHandles.error_str += "CL_INVALID_EVENT_WAIT_LIST";
break;
case CL_OUT_OF_HOST_MEMORY:
oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY";
break;
default:
oclHandles.error_str += "Unkown reseason";
break;
}
if (oclHandles.cl_status != CL_SUCCESS)
throw(oclHandles.error_str);
#endif
//_clFinish();
/*oclHandles.cl_status = clWaitForEvents(1, &e[0]);
#ifdef ERRMSG
if (oclHandles.cl_status!= CL_SUCCESS)
throw(string("excpetion in _clEnqueueNDRange() -> clWaitForEvents"));
#endif*/
}
//--------------------------------------------------------
// release OpenCL objects
void _clFree(cl_mem ob) throw(string) {
if (ob != NULL)
oclHandles.cl_status = clReleaseMemObject(ob);
#ifdef ERRMSG
oclHandles.error_str = "excpetion in _clFree() ->";
switch (oclHandles.cl_status) {
case CL_INVALID_MEM_OBJECT:
oclHandles.error_str += "CL_INVALID_MEM_OBJECT";
break;
case CL_OUT_OF_RESOURCES:
oclHandles.error_str += "CL_OUT_OF_RESOURCES";
break;
case CL_OUT_OF_HOST_MEMORY:
oclHandles.error_str += "CL_OUT_OF_HOST_MEMORY";
break;
default:
oclHandles.error_str += "Unkown reseason";
break;
}
if (oclHandles.cl_status != CL_SUCCESS)
throw(oclHandles.error_str);
#endif
}
#endif //_CL_HELPER_
|
bml_normalize_ellpack_typed.c | #include "../../macros.h"
#include "../../typed.h"
#include "../bml_allocate.h"
#include "../bml_normalize.h"
#include "../bml_parallel.h"
#include "../bml_types.h"
#include "bml_add_ellpack.h"
#include "bml_allocate_ellpack.h"
#include "bml_normalize_ellpack.h"
#include "bml_scale_ellpack.h"
#include "bml_types_ellpack.h"
#include <complex.h>
#include <float.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/* Normalize ellpack matrix given Gershgorin bounds.
*
* \ingroup normalize_group
*
* \param A The matrix
* \param mineval Calculated min value
* \param maxeval Calculated max value
*/
void TYPED_FUNC(
bml_normalize_ellpack) (
bml_matrix_ellpack_t * A,
double mineval,
double maxeval)
{
double maxminusmin = maxeval - mineval;
double gershfact = maxeval / maxminusmin;
REAL_T scalar = (REAL_T) - 1.0 / maxminusmin;
double threshold = 0.0;
bml_scale_inplace_ellpack(&scalar, A);
bml_add_identity_ellpack(A, gershfact, threshold);
}
/** Calculate Gershgorin bounds for an ellpack matrix.
*
* \ingroup normalize_group
*
* \param A The matrix
* \param nrows Number of rows to use
* returns mineval Calculated min value
* returns maxeval Calculated max value
*/
void *TYPED_FUNC(
bml_gershgorin_ellpack) (
bml_matrix_ellpack_t * A)
{
REAL_T radius, absham, dvalue;
double emin = DBL_MAX;
double emax = DBL_MIN;
double *eval = bml_allocate_memory(sizeof(double) * 2);
int N = A->N;
int M = A->M;
int *A_nnz = (int *) A->nnz;
int *A_index = (int *) A->index;
int *A_localRowMin = A->domain->localRowMin;
int *A_localRowMax = A->domain->localRowMax;
int myRank = bml_getMyRank();
REAL_T rad[N];
REAL_T dval[N];
REAL_T *A_value = (REAL_T *) A->value;
#ifdef USE_OMP_OFFLOAD
#pragma omp target update from(A_nnz[:N], A_index[:N*M], A_value[:N*M])
#endif
#pragma omp parallel for \
shared(N, M, A_nnz, A_index, A_value) \
shared(A_localRowMin, A_localRowMax, myRank) \
shared(rad, dval) \
private(absham, radius, dvalue) \
reduction(max:emax) \
reduction(min:emin)
//for (int i = 0; i < N; i++)
for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++)
{
radius = 0.0;
dvalue = 0.0;
for (int j = 0; j < A_nnz[i]; j++)
{
if (i == A_index[ROWMAJOR(i, j, N, M)])
dvalue = A_value[ROWMAJOR(i, j, N, M)];
else
{
absham = ABS(A_value[ROWMAJOR(i, j, N, M)]);
radius += (double) absham;
}
}
dval[i] = dvalue;
rad[i] = radius;
/*
emax =
(emax >
REAL_PART(dvalue + radius) ? emax : REAL_PART(dvalue + radius));
emin =
(emin <
REAL_PART(dvalue - radius) ? emin : REAL_PART(dvalue - radius));
*/
}
//for (int i = 0; i < N; i++)
for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++)
{
if (REAL_PART(dval[i] + rad[i]) > emax)
emax = REAL_PART(dval[i] + rad[i]);
if (REAL_PART(dval[i] - rad[i]) < emin)
emin = REAL_PART(dval[i] - rad[i]);
}
//printf("%d: emin = %e emax = %e\n", myRank, emin, emax);
#ifdef DO_MPI
if (bml_getNRanks() > 1 && A->distribution_mode == distributed)
{
bml_minRealReduce(&emin);
bml_maxRealReduce(&emax);
}
#endif
eval[0] = emin;
eval[1] = emax;
//printf("Global %d: emin = %e emax = %e\n", myRank, emin, emax);
return eval;
}
/** Calculate Gershgorin bounds for a partial ellpack matrix.
*
* \ingroup normalize_group
*
* \param A The matrix
* \param nrows Number of rows to use
* returns mineval Calculated min value
* returns maxeval Calculated max value
*/
void *TYPED_FUNC(
bml_gershgorin_partial_ellpack) (
bml_matrix_ellpack_t * A,
int nrows)
{
REAL_T radius, absham, dvalue;
double emin = DBL_MAX;
double emax = DBL_MIN;
double *eval = bml_allocate_memory(sizeof(double) * 2);
int N = A->N;
int M = A->M;
int *A_nnz = (int *) A->nnz;
int *A_index = (int *) A->index;
REAL_T rad[N];
REAL_T dval[N];
REAL_T *A_value = (REAL_T *) A->value;
#ifdef USE_OMP_OFFLOAD
#pragma omp target update from(A_nnz[:N], A_index[:N*M], A_value[:N*M])
#endif
#pragma omp parallel for \
shared(N, M, A_nnz, A_index, A_value) \
shared(rad, dval) \
private(absham, radius, dvalue) \
reduction(max:emax) \
reduction(min:emin)
for (int i = 0; i < nrows; i++)
{
radius = 0.0;
dvalue = 0.0;
for (int j = 0; j < A_nnz[i]; j++)
{
if (i == A_index[ROWMAJOR(i, j, N, M)])
dvalue = A_value[ROWMAJOR(i, j, N, M)];
else
{
absham = ABS(A_value[ROWMAJOR(i, j, N, M)]);
radius += (double) absham;
}
}
dval[i] = dvalue;
rad[i] = radius;
}
for (int i = 0; i < nrows; i++)
{
if (REAL_PART(dval[i] + rad[i]) > emax)
emax = REAL_PART(dval[i] + rad[i]);
if (REAL_PART(dval[i] - rad[i]) < emin)
emin = REAL_PART(dval[i] - rad[i]);
}
eval[0] = emin;
eval[1] = emax;
return eval;
}
|
resize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE %
% R R E SS I ZZ E %
% RRRR EEE SSS I ZZZ EEE %
% R R E SS I ZZ E %
% R R EEEEE SSSSS IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Image Resize Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/draw.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resize-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
#if defined(MAGICKCORE_LQR_DELEGATE)
#include <lqr.h>
#endif
/*
Typedef declarations.
*/
struct _ResizeFilter
{
double
(*filter)(const double,const ResizeFilter *),
(*window)(const double,const ResizeFilter *),
support, /* filter region of support - the filter support limit */
window_support, /* window support, usally equal to support (expert only) */
scale, /* dimension scaling to fit window support (usally 1.0) */
blur, /* x-scale (blur-sharpen) */
coefficient[7]; /* cubic coefficents for BC-cubic filters */
ResizeWeightingFunctionType
filterWeightingType,
windowWeightingType;
size_t
signature;
};
/*
Forward declaractions.
*/
static double
I0(double x),
BesselOrderOne(double),
Sinc(const double, const ResizeFilter *),
SincFast(const double, const ResizeFilter *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F i l t e r F u n c t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% These are the various filter and windowing functions that are provided.
%
% They are internal to this module only. See AcquireResizeFilterInfo() for
% details of the access to these functions, via the GetResizeFilterSupport()
% and GetResizeFilterWeight() API interface.
%
% The individual filter functions have this format...
%
% static MagickRealtype *FilterName(const double x,const double support)
%
% A description of each parameter follows:
%
% o x: the distance from the sampling point generally in the range of 0 to
% support. The GetResizeFilterWeight() ensures this a positive value.
%
% o resize_filter: current filter information. This allows function to
% access support, and possibly other pre-calculated information defining
% the functions.
%
*/
static double Blackman(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Blackman: 2nd order cosine windowing function:
0.42 + 0.5 cos(pi x) + 0.08 cos(2pi x)
Refactored by Chantal Racette and Nicolas Robidoux to one trig call and
five flops.
*/
const double cosine=cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.34+cosine*(0.5+cosine*0.16));
}
static double Bohman(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Bohman: 2rd Order cosine windowing function:
(1-x) cos(pi x) + sin(pi x) / pi.
Refactored by Nicolas Robidoux to one trig call, one sqrt call, and 7 flops,
taking advantage of the fact that the support of Bohman is 1.0 (so that we
know that sin(pi x) >= 0).
*/
const double cosine=cos((double) (MagickPI*x));
const double sine=sqrt(1.0-cosine*cosine);
magick_unreferenced(resize_filter);
return((1.0-x)*cosine+(1.0/MagickPI)*sine);
}
static double Box(const double magick_unused(x),
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(x);
magick_unreferenced(resize_filter);
/*
A Box filter is a equal weighting function (all weights equal).
DO NOT LIMIT results by support or resize point sampling will work
as it requests points beyond its normal 0.0 support size.
*/
return(1.0);
}
static double Cosine(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Cosine window function:
cos((pi/2)*x).
*/
return((double)cos((double) (MagickPI2*x)));
}
static double CubicBC(const double x,const ResizeFilter *resize_filter)
{
/*
Cubic Filters using B,C determined values:
Mitchell-Netravali B = 1/3 C = 1/3 "Balanced" cubic spline filter
Catmull-Rom B = 0 C = 1/2 Interpolatory and exact on linears
Spline B = 1 C = 0 B-Spline Gaussian approximation
Hermite B = 0 C = 0 B-Spline interpolator
See paper by Mitchell and Netravali, Reconstruction Filters in Computer
Graphics Computer Graphics, Volume 22, Number 4, August 1988
http://www.cs.utexas.edu/users/fussell/courses/cs384g/lectures/mitchell/
Mitchell.pdf.
Coefficents are determined from B,C values:
P0 = ( 6 - 2*B )/6 = coeff[0]
P1 = 0
P2 = (-18 +12*B + 6*C )/6 = coeff[1]
P3 = ( 12 - 9*B - 6*C )/6 = coeff[2]
Q0 = ( 8*B +24*C )/6 = coeff[3]
Q1 = ( -12*B -48*C )/6 = coeff[4]
Q2 = ( 6*B +30*C )/6 = coeff[5]
Q3 = ( - 1*B - 6*C )/6 = coeff[6]
which are used to define the filter:
P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1
Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x < 2
which ensures function is continuous in value and derivative (slope).
*/
if (x < 1.0)
return(resize_filter->coefficient[0]+x*(x*
(resize_filter->coefficient[1]+x*resize_filter->coefficient[2])));
if (x < 2.0)
return(resize_filter->coefficient[3]+x*(resize_filter->coefficient[4]+x*
(resize_filter->coefficient[5]+x*resize_filter->coefficient[6])));
return(0.0);
}
static double CubicSpline(const double x,const ResizeFilter *resize_filter)
{
if (resize_filter->support <= 2.0)
{
/*
2-lobe Spline filter.
*/
if (x < 1.0)
return(((x-9.0/5.0)*x-1.0/5.0)*x+1.0);
if (x < 2.0)
return(((-1.0/3.0*(x-1.0)+4.0/5.0)*(x-1.0)-7.0/15.0)*(x-1.0));
return(0.0);
}
if (resize_filter->support <= 3.0)
{
/*
3-lobe Spline filter.
*/
if (x < 1.0)
return(((13.0/11.0*x-453.0/209.0)*x-3.0/209.0)*x+1.0);
if (x < 2.0)
return(((-6.0/11.0*(x-1.0)+270.0/209.0)*(x-1.0)-156.0/209.0)*(x-1.0));
if (x < 3.0)
return(((1.0/11.0*(x-2.0)-45.0/209.0)*(x-2.0)+26.0/209.0)*(x-2.0));
return(0.0);
}
/*
4-lobe Spline filter.
*/
if (x < 1.0)
return(((49.0/41.0*x-6387.0/2911.0)*x-3.0/2911.0)*x+1.0);
if (x < 2.0)
return(((-24.0/41.0*(x-1.0)+4032.0/2911.0)*(x-1.0)-2328.0/2911.0)*(x-1.0));
if (x < 3.0)
return(((6.0/41.0*(x-2.0)-1008.0/2911.0)*(x-2.0)+582.0/2911.0)*(x-2.0));
if (x < 4.0)
return(((-1.0/41.0*(x-3.0)+168.0/2911.0)*(x-3.0)-97.0/2911.0)*(x-3.0));
return(0.0);
}
static double Gaussian(const double x,const ResizeFilter *resize_filter)
{
/*
Gaussian with a sigma = 1/2 (or as user specified)
Gaussian Formula (1D) ...
exp( -(x^2)/((2.0*sigma^2) ) / (sqrt(2*PI)*sigma^2))
Gaussian Formula (2D) ...
exp( -(x^2+y^2)/(2.0*sigma^2) ) / (PI*sigma^2) )
or for radius
exp( -(r^2)/(2.0*sigma^2) ) / (PI*sigma^2) )
Note that it is only a change from 1-d to radial form is in the
normalization multiplier which is not needed or used when Gaussian is used
as a filter.
The constants are pre-calculated...
coeff[0]=sigma;
coeff[1]=1.0/(2.0*sigma^2);
coeff[2]=1.0/(sqrt(2*PI)*sigma^2);
exp( -coeff[1]*(x^2)) ) * coeff[2];
However the multiplier coeff[1] is need, the others are informative only.
This separates the gaussian 'sigma' value from the 'blur/support'
settings allowing for its use in special 'small sigma' gaussians,
without the filter 'missing' pixels because the support becomes too
small.
*/
return(exp((double)(-resize_filter->coefficient[1]*x*x)));
}
static double Hann(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Cosine window function:
0.5+0.5*cos(pi*x).
*/
const double cosine=cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.5+0.5*cosine);
}
static double Hamming(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Offset cosine window function:
.54 + .46 cos(pi x).
*/
const double cosine=cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.54+0.46*cosine);
}
static double Jinc(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
See Pratt "Digital Image Processing" p.97 for Jinc/Bessel functions.
http://mathworld.wolfram.com/JincFunction.html and page 11 of
http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf
The original "zoom" program by Paul Heckbert called this "Bessel". But
really it is more accurately named "Jinc".
*/
if (x == 0.0)
return(0.5*MagickPI);
return(BesselOrderOne(MagickPI*x)/x);
}
static double Kaiser(const double x,const ResizeFilter *resize_filter)
{
/*
Kaiser Windowing Function (bessel windowing)
I0( beta * sqrt( 1-x^2) ) / IO(0)
Beta (coeff[0]) is a free value from 5 to 8 (defaults to 6.5).
However it is typically defined in terms of Alpha*PI
The normalization factor (coeff[1]) is not actually needed,
but without it the filters has a large value at x=0 making it
difficult to compare the function with other windowing functions.
*/
return(resize_filter->coefficient[1]*I0(resize_filter->coefficient[0]*
sqrt((double) (1.0-x*x))));
}
static double Lagrange(const double x,const ResizeFilter *resize_filter)
{
double
value;
register ssize_t
i;
ssize_t
n,
order;
/*
Lagrange piecewise polynomial fit of sinc: N is the 'order' of the lagrange
function and depends on the overall support window size of the filter. That
is: for a support of 2, it gives a lagrange-4 (piecewise cubic function).
"n" identifies the piece of the piecewise polynomial.
See Survey: Interpolation Methods, IEEE Transactions on Medical Imaging,
Vol 18, No 11, November 1999, p1049-1075, -- Equation 27 on p1064.
*/
if (x > resize_filter->support)
return(0.0);
order=(ssize_t) (2.0*resize_filter->window_support); /* number of pieces */
n=(ssize_t) (resize_filter->window_support+x);
value=1.0f;
for (i=0; i < order; i++)
if (i != n)
value*=(n-i-x)/(n-i);
return(value);
}
static double Quadratic(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
2rd order (quadratic) B-Spline approximation of Gaussian.
*/
if (x < 0.5)
return(0.75-x*x);
if (x < 1.5)
return(0.5*(x-1.5)*(x-1.5));
return(0.0);
}
static double Sinc(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Scaled sinc(x) function using a trig call:
sinc(x) == sin(pi x)/(pi x).
*/
if (x != 0.0)
{
const double alpha=(double) (MagickPI*x);
return(sin((double) alpha)/alpha);
}
return((double) 1.0);
}
static double SincFast(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Approximations of the sinc function sin(pi x)/(pi x) over the interval
[-4,4] constructed by Nicolas Robidoux and Chantal Racette with funding
from the Natural Sciences and Engineering Research Council of Canada.
Although the approximations are polynomials (for low order of
approximation) and quotients of polynomials (for higher order of
approximation) and consequently are similar in form to Taylor polynomials /
Pade approximants, the approximations are computed with a completely
different technique.
Summary: These approximations are "the best" in terms of bang (accuracy)
for the buck (flops). More specifically: Among the polynomial quotients
that can be computed using a fixed number of flops (with a given "+ - * /
budget"), the chosen polynomial quotient is the one closest to the
approximated function with respect to maximum absolute relative error over
the given interval.
The Remez algorithm, as implemented in the boost library's minimax package,
is the key to the construction: http://www.boost.org/doc/libs/1_36_0/libs/
math/doc/sf_and_dist/html/math_toolkit/backgrounders/remez.html
If outside of the interval of approximation, use the standard trig formula.
*/
if (x > 4.0)
{
const double alpha=(double) (MagickPI*x);
return(sin((double) alpha)/alpha);
}
{
/*
The approximations only depend on x^2 (sinc is an even function).
*/
const double xx = x*x;
#if MAGICKCORE_QUANTUM_DEPTH <= 8
/*
Maximum absolute relative error 6.3e-6 < 1/2^17.
*/
const double c0 = 0.173610016489197553621906385078711564924e-2L;
const double c1 = -0.384186115075660162081071290162149315834e-3L;
const double c2 = 0.393684603287860108352720146121813443561e-4L;
const double c3 = -0.248947210682259168029030370205389323899e-5L;
const double c4 = 0.107791837839662283066379987646635416692e-6L;
const double c5 = -0.324874073895735800961260474028013982211e-8L;
const double c6 = 0.628155216606695311524920882748052490116e-10L;
const double c7 = -0.586110644039348333520104379959307242711e-12L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
#elif MAGICKCORE_QUANTUM_DEPTH <= 16
/*
Max. abs. rel. error 2.2e-8 < 1/2^25.
*/
const double c0 = 0.173611107357320220183368594093166520811e-2L;
const double c1 = -0.384240921114946632192116762889211361285e-3L;
const double c2 = 0.394201182359318128221229891724947048771e-4L;
const double c3 = -0.250963301609117217660068889165550534856e-5L;
const double c4 = 0.111902032818095784414237782071368805120e-6L;
const double c5 = -0.372895101408779549368465614321137048875e-8L;
const double c6 = 0.957694196677572570319816780188718518330e-10L;
const double c7 = -0.187208577776590710853865174371617338991e-11L;
const double c8 = 0.253524321426864752676094495396308636823e-13L;
const double c9 = -0.177084805010701112639035485248501049364e-15L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*(c7+xx*(c8+xx*c9))))))));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
#else
/*
Max. abs. rel. error 1.2e-12 < 1/2^39.
*/
const double c0 = 0.173611111110910715186413700076827593074e-2L;
const double c1 = -0.289105544717893415815859968653611245425e-3L;
const double c2 = 0.206952161241815727624413291940849294025e-4L;
const double c3 = -0.834446180169727178193268528095341741698e-6L;
const double c4 = 0.207010104171026718629622453275917944941e-7L;
const double c5 = -0.319724784938507108101517564300855542655e-9L;
const double c6 = 0.288101675249103266147006509214934493930e-11L;
const double c7 = -0.118218971804934245819960233886876537953e-13L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
const double d0 = 1.0L;
const double d1 = 0.547981619622284827495856984100563583948e-1L;
const double d2 = 0.134226268835357312626304688047086921806e-2L;
const double d3 = 0.178994697503371051002463656833597608689e-4L;
const double d4 = 0.114633394140438168641246022557689759090e-6L;
const double q = d0+xx*(d1+xx*(d2+xx*(d3+xx*d4)));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)/q*p);
#endif
}
}
static double Triangle(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
1st order (linear) B-Spline, bilinear interpolation, Tent 1D filter, or
a Bartlett 2D Cone filter. Also used as a Bartlett Windowing function
for Sinc().
*/
if (x < 1.0)
return(1.0-x);
return(0.0);
}
static double Welch(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Welch parabolic windowing filter.
*/
if (x < 1.0)
return(1.0-x*x);
return(0.0);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireResizeFilter() allocates the ResizeFilter structure. Choose from
% these filters:
%
% FIR (Finite impulse Response) Filters
% Box Triangle Quadratic
% Spline Hermite Catrom
% Mitchell
%
% IIR (Infinite impulse Response) Filters
% Gaussian Sinc Jinc (Bessel)
%
% Windowed Sinc/Jinc Filters
% Blackman Bohman Lanczos
% Hann Hamming Cosine
% Kaiser Welch Parzen
% Bartlett
%
% Special Purpose Filters
% Cubic SincFast LanczosSharp Lanczos2 Lanczos2Sharp
% Robidoux RobidouxSharp
%
% The users "-filter" selection is used to lookup the default 'expert'
% settings for that filter from a internal table. However any provided
% 'expert' settings (see below) may override this selection.
%
% FIR filters are used as is, and are limited to that filters support window
% (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also
% simply clipped by its support size (currently 1.5 or approximately 3*sigma
% as recommended by many references)
%
% The special a 'cylindrical' filter flag will promote the default 4-lobed
% Windowed Sinc filter to a 3-lobed Windowed Jinc equivalent, which is better
% suited to this style of image resampling. This typically happens when using
% such a filter for images distortions.
%
% SPECIFIC FILTERS:
%
% Directly requesting 'Sinc', 'Jinc' function as a filter will force the use
% of function without any windowing, or promotion for cylindrical usage. This
% is not recommended, except by image processing experts, especially as part
% of expert option filter function selection.
%
% Two forms of the 'Sinc' function are available: Sinc and SincFast. Sinc is
% computed using the traditional sin(pi*x)/(pi*x); it is selected if the user
% specifically specifies the use of a Sinc filter. SincFast uses highly
% accurate (and fast) polynomial (low Q) and rational (high Q) approximations,
% and will be used by default in most cases.
%
% The Lanczos filter is a special 3-lobed Sinc-windowed Sinc filter (promoted
% to Jinc-windowed Jinc for cylindrical (Elliptical Weighted Average) use).
% The Sinc version is the most popular windowed filter.
%
% LanczosSharp is a slightly sharpened (blur=0.9812505644269356 < 1) form of
% the Lanczos filter, specifically designed for EWA distortion (as a
% Jinc-Jinc); it can also be used as a slightly sharper orthogonal Lanczos
% (Sinc-Sinc) filter. The chosen blur value comes as close as possible to
% satisfying the following condition without changing the character of the
% corresponding EWA filter:
%
% 'No-Op' Vertical and Horizontal Line Preservation Condition: Images with
% only vertical or horizontal features are preserved when performing 'no-op"
% with EWA distortion.
%
% The Lanczos2 and Lanczos2Sharp filters are 2-lobe versions of the Lanczos
% filters. The 'sharp' version uses a blur factor of 0.9549963639785485,
% again chosen because the resulting EWA filter comes as close as possible to
% satisfying the above condition.
%
% Robidoux is another filter tuned for EWA. It is the Keys cubic filter
% defined by B=(228 - 108 sqrt(2))/199. Robidoux satisfies the "'No-Op'
% Vertical and Horizontal Line Preservation Condition" exactly, and it
% moderately blurs high frequency 'pixel-hash' patterns under no-op. It turns
% out to be close to both Mitchell and Lanczos2Sharp. For example, its first
% crossing is at (36 sqrt(2) + 123)/(72 sqrt(2) + 47), almost the same as the
% first crossing of Mitchell and Lanczos2Sharp.
%
% RodidouxSharp is a slightly sharper version of Rodidoux, some believe it
% is too sharp. It is designed to minimize the maximum possible change in
% a pixel value which is at one of the extremes (e.g., 0 or 255) under no-op
% conditions. Amazingly Mitchell falls roughly between Rodidoux and
% RodidouxSharp, though this seems to have been pure coincidence.
%
% 'EXPERT' OPTIONS:
%
% These artifact "defines" are not recommended for production use without
% expert knowledge of resampling, filtering, and the effects they have on the
% resulting resampled (resized or distorted) image.
%
% They can be used to override any and all filter default, and it is
% recommended you make good use of "filter:verbose" to make sure that the
% overall effect of your selection (before and after) is as expected.
%
% "filter:verbose" controls whether to output the exact results of the
% filter selections made, as well as plotting data for graphing the
% resulting filter over the filters support range.
%
% "filter:filter" select the main function associated with this filter
% name, as the weighting function of the filter. This can be used to
% set a windowing function as a weighting function, for special
% purposes, such as graphing.
%
% If a "filter:window" operation has not been provided, a 'Box'
% windowing function will be set to denote that no windowing function is
% being used.
%
% "filter:window" Select this windowing function for the filter. While any
% filter could be used as a windowing function, using the 'first lobe' of
% that filter over the whole support window, using a non-windowing
% function is not advisible. If no weighting filter function is specified
% a 'SincFast' filter is used.
%
% "filter:lobes" Number of lobes to use for the Sinc/Jinc filter. This a
% simpler method of setting filter support size that will correctly
% handle the Sinc/Jinc switch for an operators filtering requirements.
% Only integers should be given.
%
% "filter:support" Set the support size for filtering to the size given.
% This not recommended for Sinc/Jinc windowed filters (lobes should be
% used instead). This will override any 'filter:lobes' option.
%
% "filter:win-support" Scale windowing function to this size instead. This
% causes the windowing (or self-windowing Lagrange filter) to act is if
% the support window it much much larger than what is actually supplied
% to the calling operator. The filter however is still clipped to the
% real support size given, by the support range supplied to the caller.
% If unset this will equal the normal filter support size.
%
% "filter:blur" Scale the filter and support window by this amount. A value
% of > 1 will generally result in a more blurred image with more ringing
% effects, while a value <1 will sharpen the resulting image with more
% aliasing effects.
%
% "filter:sigma" The sigma value to use for the Gaussian filter only.
% Defaults to '1/2'. Using a different sigma effectively provides a
% method of using the filter as a 'blur' convolution. Particularly when
% using it for Distort.
%
% "filter:b"
% "filter:c" Override the preset B,C values for a Cubic filter.
% If only one of these are given it is assumes to be a 'Keys' type of
% filter such that B+2C=1, where Keys 'alpha' value = C.
%
% Examples:
%
% Set a true un-windowed Sinc filter with 10 lobes (very slow):
% -define filter:filter=Sinc
% -define filter:lobes=8
%
% Set an 8 lobe Lanczos (Sinc or Jinc) filter:
% -filter Lanczos
% -define filter:lobes=8
%
% The format of the AcquireResizeFilter method is:
%
% ResizeFilter *AcquireResizeFilter(const Image *image,
% const FilterType filter_type,const MagickBooleanType cylindrical,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filter: the filter type, defining a preset filter, window and support.
% The artifact settings listed above will override those selections.
%
% o blur: blur the filter by this amount, use 1.0 if unknown. Image
% artifact "filter:blur" will override this API call usage, including any
% internal change (such as for cylindrical usage).
%
% o radial: use a 1D orthogonal filter (Sinc) or 2D cylindrical (radial)
% filter (Jinc).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate ResizeFilter *AcquireResizeFilter(const Image *image,
const FilterType filter,const MagickBooleanType cylindrical,
ExceptionInfo *exception)
{
const char
*artifact;
FilterType
filter_type,
window_type;
double
B,
C,
value;
register ResizeFilter
*resize_filter;
/*
Table Mapping given Filter, into Weighting and Windowing functions.
A 'Box' windowing function means its a simble non-windowed filter.
An 'SincFast' filter function could be upgraded to a 'Jinc' filter if a
"cylindrical" is requested, unless a 'Sinc' or 'SincFast' filter was
specifically requested by the user.
WARNING: The order of this table must match the order of the FilterType
enumeration specified in "resample.h", or the filter names will not match
the filter being setup.
You can check filter setups with the "filter:verbose" expert setting.
*/
static struct
{
FilterType
filter,
window;
} const mapping[SentinelFilter] =
{
{ UndefinedFilter, BoxFilter }, /* Undefined (default to Box) */
{ PointFilter, BoxFilter }, /* SPECIAL: Nearest neighbour */
{ BoxFilter, BoxFilter }, /* Box averaging filter */
{ TriangleFilter, BoxFilter }, /* Linear interpolation filter */
{ HermiteFilter, BoxFilter }, /* Hermite interpolation filter */
{ SincFastFilter, HannFilter }, /* Hann -- cosine-sinc */
{ SincFastFilter, HammingFilter }, /* Hamming -- '' variation */
{ SincFastFilter, BlackmanFilter }, /* Blackman -- 2*cosine-sinc */
{ GaussianFilter, BoxFilter }, /* Gaussian blur filter */
{ QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approx */
{ CubicFilter, BoxFilter }, /* General Cubic Filter, Spline */
{ CatromFilter, BoxFilter }, /* Cubic-Keys interpolator */
{ MitchellFilter, BoxFilter }, /* 'Ideal' Cubic-Keys filter */
{ JincFilter, BoxFilter }, /* Raw 3-lobed Jinc function */
{ SincFilter, BoxFilter }, /* Raw 4-lobed Sinc function */
{ SincFastFilter, BoxFilter }, /* Raw fast sinc ("Pade"-type) */
{ SincFastFilter, KaiserFilter }, /* Kaiser -- square root-sinc */
{ LanczosFilter, WelchFilter }, /* Welch -- parabolic (3 lobe) */
{ SincFastFilter, CubicFilter }, /* Parzen -- cubic-sinc */
{ SincFastFilter, BohmanFilter }, /* Bohman -- 2*cosine-sinc */
{ SincFastFilter, TriangleFilter }, /* Bartlett -- triangle-sinc */
{ LagrangeFilter, BoxFilter }, /* Lagrange self-windowing */
{ LanczosFilter, LanczosFilter }, /* Lanczos Sinc-Sinc filters */
{ LanczosSharpFilter, LanczosSharpFilter }, /* | these require */
{ Lanczos2Filter, Lanczos2Filter }, /* | special handling */
{ Lanczos2SharpFilter, Lanczos2SharpFilter },
{ RobidouxFilter, BoxFilter }, /* Cubic Keys tuned for EWA */
{ RobidouxSharpFilter, BoxFilter }, /* Sharper Cubic Keys for EWA */
{ LanczosFilter, CosineFilter }, /* Cosine window (3 lobes) */
{ SplineFilter, BoxFilter }, /* Spline Cubic Filter */
{ LanczosRadiusFilter, LanczosFilter }, /* Lanczos with integer radius */
{ CubicSplineFilter, BoxFilter }, /* CubicSpline (2/3/4 lobes) */
};
/*
Table mapping the filter/window from the above table to an actual function.
The default support size for that filter as a weighting function, the range
to scale with to use that function as a sinc windowing function, (typ 1.0).
Note that the filter_type -> function is 1 to 1 except for Sinc(),
SincFast(), and CubicBC() functions, which may have multiple filter to
function associations.
See "filter:verbose" handling below for the function -> filter mapping.
*/
static struct
{
double
(*function)(const double,const ResizeFilter*),
support, /* Default lobes/support size of the weighting filter. */
scale, /* Support when function used as a windowing function
Typically equal to the location of the first zero crossing. */
B,C; /* BC-spline coefficients, ignored if not a CubicBC filter. */
ResizeWeightingFunctionType weightingFunctionType;
} const filters[SentinelFilter] =
{
/* .--- support window (if used as a Weighting Function)
| .--- first crossing (if used as a Windowing Function)
| | .--- B value for Cubic Function
| | | .---- C value for Cubic Function
| | | | */
{ Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Undefined (default to Box) */
{ Box, 0.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Point (special handling) */
{ Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Box */
{ Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Triangle */
{ CubicBC, 1.0, 1.0, 0.0, 0.0, CubicBCWeightingFunction }, /* Hermite (cubic B=C=0) */
{ Hann, 1.0, 1.0, 0.0, 0.0, HannWeightingFunction }, /* Hann, cosine window */
{ Hamming, 1.0, 1.0, 0.0, 0.0, HammingWeightingFunction }, /* Hamming, '' variation */
{ Blackman, 1.0, 1.0, 0.0, 0.0, BlackmanWeightingFunction }, /* Blackman, 2*cosine window */
{ Gaussian, 2.0, 1.5, 0.0, 0.0, GaussianWeightingFunction }, /* Gaussian */
{ Quadratic, 1.5, 1.5, 0.0, 0.0, QuadraticWeightingFunction },/* Quadratic gaussian */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* General Cubic Filter */
{ CubicBC, 2.0, 1.0, 0.0, 0.5, CubicBCWeightingFunction }, /* Catmull-Rom (B=0,C=1/2) */
{ CubicBC, 2.0, 8.0/7.0, 1./3., 1./3., CubicBCWeightingFunction }, /* Mitchell (B=C=1/3) */
{ Jinc, 3.0, 1.2196698912665045, 0.0, 0.0, JincWeightingFunction }, /* Raw 3-lobed Jinc */
{ Sinc, 4.0, 1.0, 0.0, 0.0, SincWeightingFunction }, /* Raw 4-lobed Sinc */
{ SincFast, 4.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Raw fast sinc ("Pade"-type) */
{ Kaiser, 1.0, 1.0, 0.0, 0.0, KaiserWeightingFunction }, /* Kaiser (square root window) */
{ Welch, 1.0, 1.0, 0.0, 0.0, WelchWeightingFunction }, /* Welch (parabolic window) */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Parzen (B-Spline window) */
{ Bohman, 1.0, 1.0, 0.0, 0.0, BohmanWeightingFunction }, /* Bohman, 2*Cosine window */
{ Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Bartlett (triangle window) */
{ Lagrange, 2.0, 1.0, 0.0, 0.0, LagrangeWeightingFunction }, /* Lagrange sinc approximation */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 3-lobed Sinc-Sinc */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Sharpened */
{ SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 2-lobed */
{ SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos2, sharpened */
/* Robidoux: Keys cubic close to Lanczos2D sharpened */
{ CubicBC, 2.0, 1.1685777620836932,
0.37821575509399867, 0.31089212245300067, CubicBCWeightingFunction },
/* RobidouxSharp: Sharper version of Robidoux */
{ CubicBC, 2.0, 1.105822933719019,
0.2620145123990142, 0.3689927438004929, CubicBCWeightingFunction },
{ Cosine, 1.0, 1.0, 0.0, 0.0, CosineWeightingFunction }, /* Low level cosine window */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Cubic B-Spline (B=1,C=0) */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Interger Radius */
{ CubicSpline,2.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Spline Lobes 2-lobed */
};
/*
The known zero crossings of the Jinc() or more accurately the Jinc(x*PI)
function being used as a filter. It is used by the "filter:lobes" expert
setting and for 'lobes' for Jinc functions in the previous table. This way
users do not have to deal with the highly irrational lobe sizes of the Jinc
filter.
Values taken from
http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp
using Jv-function with v=1, then dividing by PI.
*/
static double
jinc_zeros[16] =
{
1.2196698912665045,
2.2331305943815286,
3.2383154841662362,
4.2410628637960699,
5.2427643768701817,
6.2439216898644877,
7.2447598687199570,
8.2453949139520427,
9.2458926849494673,
10.246293348754916,
11.246622794877883,
12.246898461138105,
13.247132522181061,
14.247333735806849,
15.247508563037300,
16.247661874700962
};
/*
Allocate resize filter.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(UndefinedFilter < filter && filter < SentinelFilter);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) exception;
resize_filter=(ResizeFilter *) AcquireCriticalMemory(sizeof(*resize_filter));
(void) memset(resize_filter,0,sizeof(*resize_filter));
/*
Defaults for the requested filter.
*/
filter_type=mapping[filter].filter;
window_type=mapping[filter].window;
resize_filter->blur=1.0;
/* Promote 1D Windowed Sinc Filters to a 2D Windowed Jinc filters */
if ((cylindrical != MagickFalse) && (filter_type == SincFastFilter) &&
(filter != SincFastFilter))
filter_type=JincFilter; /* 1D Windowed Sinc => 2D Windowed Jinc filters */
/* Expert filter setting override */
artifact=GetImageArtifact(image,"filter:filter");
if (IsStringTrue(artifact) != MagickFalse)
{
ssize_t
option;
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
{ /* Raw filter request - no window function. */
filter_type=(FilterType) option;
window_type=BoxFilter;
}
/* Filter override with a specific window function. */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL)
{
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
window_type=(FilterType) option;
}
}
else
{
/* Window specified, but no filter function? Assume Sinc/Jinc. */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL)
{
ssize_t
option;
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
{
filter_type= cylindrical != MagickFalse ? JincFilter
: SincFastFilter;
window_type=(FilterType) option;
}
}
}
/* Assign the real functions to use for the filters selected. */
resize_filter->filter=filters[filter_type].function;
resize_filter->support=filters[filter_type].support;
resize_filter->filterWeightingType=filters[filter_type].weightingFunctionType;
resize_filter->window=filters[window_type].function;
resize_filter->windowWeightingType=filters[window_type].weightingFunctionType;
resize_filter->scale=filters[window_type].scale;
resize_filter->signature=MagickCoreSignature;
/* Filter Modifications for orthogonal/cylindrical usage */
if (cylindrical != MagickFalse)
switch (filter_type)
{
case BoxFilter:
/* Support for Cylindrical Box should be sqrt(2)/2 */
resize_filter->support=(double) MagickSQ1_2;
break;
case LanczosFilter:
case LanczosSharpFilter:
case Lanczos2Filter:
case Lanczos2SharpFilter:
case LanczosRadiusFilter:
resize_filter->filter=filters[JincFilter].function;
resize_filter->window=filters[JincFilter].function;
resize_filter->scale=filters[JincFilter].scale;
/* number of lobes (support window size) remain unchanged */
break;
default:
break;
}
/* Global Sharpening (regardless of orthoginal/cylindrical) */
switch (filter_type)
{
case LanczosSharpFilter:
resize_filter->blur *= 0.9812505644269356;
break;
case Lanczos2SharpFilter:
resize_filter->blur *= 0.9549963639785485;
break;
/* case LanczosRadius: blur adjust is done after lobes */
default:
break;
}
/*
Expert Option Modifications.
*/
/* User Gaussian Sigma Override - no support change */
if ((resize_filter->filter == Gaussian) ||
(resize_filter->window == Gaussian) ) {
value=0.5; /* guassian sigma default, half pixel */
artifact=GetImageArtifact(image,"filter:sigma");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
/* Define coefficents for Gaussian */
resize_filter->coefficient[0]=value; /* note sigma too */
resize_filter->coefficient[1]=PerceptibleReciprocal(2.0*value*value); /* sigma scaling */
resize_filter->coefficient[2]=PerceptibleReciprocal(Magick2PI*value*value);
/* normalization - not actually needed or used! */
if ( value > 0.5 )
resize_filter->support *= 2*value; /* increase support linearly */
}
/* User Kaiser Alpha Override - no support change */
if ((resize_filter->filter == Kaiser) ||
(resize_filter->window == Kaiser) ) {
value=6.5; /* default beta value for Kaiser bessel windowing function */
artifact=GetImageArtifact(image,"filter:alpha"); /* FUTURE: depreciate */
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"filter:kaiser-beta");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"filter:kaiser-alpha");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL)*MagickPI;
/* Define coefficents for Kaiser Windowing Function */
resize_filter->coefficient[0]=value; /* alpha */
resize_filter->coefficient[1]=PerceptibleReciprocal(I0(value));
/* normalization */
}
/* Support Overrides */
artifact=GetImageArtifact(image,"filter:lobes");
if (artifact != (const char *) NULL)
{
ssize_t
lobes;
lobes=(ssize_t) StringToLong(artifact);
if (lobes < 1)
lobes=1;
resize_filter->support=(double) lobes;
}
if (resize_filter->filter == Jinc)
{
/*
Convert a Jinc function lobes value to a real support value.
*/
if (resize_filter->support > 16)
resize_filter->support=jinc_zeros[15]; /* largest entry in table */
else
resize_filter->support=jinc_zeros[((long) resize_filter->support)-1];
/*
Blur this filter so support is a integer value (lobes dependant).
*/
if (filter_type == LanczosRadiusFilter)
resize_filter->blur*=floor(resize_filter->support)/
resize_filter->support;
}
/*
Expert blur override.
*/
artifact=GetImageArtifact(image,"filter:blur");
if (artifact != (const char *) NULL)
resize_filter->blur*=StringToDouble(artifact,(char **) NULL);
if (resize_filter->blur < MagickEpsilon)
resize_filter->blur=(double) MagickEpsilon;
/*
Expert override of the support setting.
*/
artifact=GetImageArtifact(image,"filter:support");
if (artifact != (const char *) NULL)
resize_filter->support=fabs(StringToDouble(artifact,(char **) NULL));
/*
Scale windowing function separately to the support 'clipping' window
that calling operator is planning to actually use. (Expert override)
*/
resize_filter->window_support=resize_filter->support; /* default */
artifact=GetImageArtifact(image,"filter:win-support");
if (artifact != (const char *) NULL)
resize_filter->window_support=fabs(StringToDouble(artifact,(char **) NULL));
/*
Adjust window function scaling to match windowing support for weighting
function. This avoids a division on every filter call.
*/
resize_filter->scale/=resize_filter->window_support;
/*
* Set Cubic Spline B,C values, calculate Cubic coefficients.
*/
B=0.0;
C=0.0;
if ((resize_filter->filter == CubicBC) ||
(resize_filter->window == CubicBC) )
{
B=filters[filter_type].B;
C=filters[filter_type].C;
if (filters[window_type].function == CubicBC)
{
B=filters[window_type].B;
C=filters[window_type].C;
}
artifact=GetImageArtifact(image,"filter:b");
if (artifact != (const char *) NULL)
{
B=StringToDouble(artifact,(char **) NULL);
C=(1.0-B)/2.0; /* Calculate C to get a Keys cubic filter. */
artifact=GetImageArtifact(image,"filter:c"); /* user C override */
if (artifact != (const char *) NULL)
C=StringToDouble(artifact,(char **) NULL);
}
else
{
artifact=GetImageArtifact(image,"filter:c");
if (artifact != (const char *) NULL)
{
C=StringToDouble(artifact,(char **) NULL);
B=1.0-2.0*C; /* Calculate B to get a Keys cubic filter. */
}
}
{
const double
twoB = B+B;
/*
Convert B,C values into Cubic Coefficents. See CubicBC().
*/
resize_filter->coefficient[0]=1.0-(1.0/3.0)*B;
resize_filter->coefficient[1]=-3.0+twoB+C;
resize_filter->coefficient[2]=2.0-1.5*B-C;
resize_filter->coefficient[3]=(4.0/3.0)*B+4.0*C;
resize_filter->coefficient[4]=-8.0*C-twoB;
resize_filter->coefficient[5]=B+5.0*C;
resize_filter->coefficient[6]=(-1.0/6.0)*B-C;
}
}
/*
Expert Option Request for verbose details of the resulting filter.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp master
{
#endif
if (IsStringTrue(GetImageArtifact(image,"filter:verbose")) != MagickFalse)
{
double
support,
x;
/*
Set the weighting function properly when the weighting function
may not exactly match the filter of the same name. EG: a Point
filter is really uses a Box weighting function with a different
support than is typically used.
*/
if (resize_filter->filter == Box) filter_type=BoxFilter;
if (resize_filter->filter == Sinc) filter_type=SincFilter;
if (resize_filter->filter == SincFast) filter_type=SincFastFilter;
if (resize_filter->filter == Jinc) filter_type=JincFilter;
if (resize_filter->filter == CubicBC) filter_type=CubicFilter;
if (resize_filter->window == Box) window_type=BoxFilter;
if (resize_filter->window == Sinc) window_type=SincFilter;
if (resize_filter->window == SincFast) window_type=SincFastFilter;
if (resize_filter->window == Jinc) window_type=JincFilter;
if (resize_filter->window == CubicBC) window_type=CubicFilter;
/*
Report Filter Details.
*/
support=GetResizeFilterSupport(resize_filter); /* practical_support */
(void) FormatLocaleFile(stdout,
"# Resampling Filter (for graphing)\n#\n");
(void) FormatLocaleFile(stdout,"# filter = %s\n",
CommandOptionToMnemonic(MagickFilterOptions,filter_type));
(void) FormatLocaleFile(stdout,"# window = %s\n",
CommandOptionToMnemonic(MagickFilterOptions,window_type));
(void) FormatLocaleFile(stdout,"# support = %.*g\n",
GetMagickPrecision(),(double) resize_filter->support);
(void) FormatLocaleFile(stdout,"# window-support = %.*g\n",
GetMagickPrecision(),(double) resize_filter->window_support);
(void) FormatLocaleFile(stdout,"# scale-blur = %.*g\n",
GetMagickPrecision(),(double) resize_filter->blur);
if ((filter_type == GaussianFilter) || (window_type == GaussianFilter))
(void) FormatLocaleFile(stdout,"# gaussian-sigma = %.*g\n",
GetMagickPrecision(),(double) resize_filter->coefficient[0]);
if ( filter_type == KaiserFilter || window_type == KaiserFilter )
(void) FormatLocaleFile(stdout,"# kaiser-beta = %.*g\n",
GetMagickPrecision(),(double) resize_filter->coefficient[0]);
(void) FormatLocaleFile(stdout,"# practical-support = %.*g\n",
GetMagickPrecision(), (double) support);
if ((filter_type == CubicFilter) || (window_type == CubicFilter))
(void) FormatLocaleFile(stdout,"# B,C = %.*g,%.*g\n",
GetMagickPrecision(),(double) B,GetMagickPrecision(),(double) C);
(void) FormatLocaleFile(stdout,"\n");
/*
Output values of resulting filter graph -- for graphing filter result.
*/
for (x=0.0; x <= support; x+=0.01f)
(void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",x,
GetMagickPrecision(),(double)
GetResizeFilterWeight(resize_filter,x));
/*
A final value so gnuplot can graph the 'stop' properly.
*/
(void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",support,
GetMagickPrecision(),0.0);
}
/* Output the above once only for each image - remove setting */
(void) DeleteImageArtifact((Image *) image,"filter:verbose");
#if defined(MAGICKCORE_OPENMP_SUPPORT)
}
#endif
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveResizeImage() adaptively resize image with pixel resampling.
%
% This is shortcut function for a fast interpolative resize using mesh
% interpolation. It works well for small resizes of less than +/- 50%
% of the original image size. For larger resizing on images a full
% filtered and slower resize function should be used instead.
%
% The format of the AdaptiveResizeImage method is:
%
% Image *AdaptiveResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
Image
*resize_image;
resize_image=InterpolativeResizeImage(image,columns,rows,MeshInterpolatePixel,
exception);
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ B e s s e l O r d e r O n e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BesselOrderOne() computes the Bessel function of x of the first kind of
% order 0. This is used to create the Jinc() filter function below.
%
% Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8]
%
% j1(x) = x*j1(x);
%
% For x in (8,inf)
%
% j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1))
%
% where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow:
%
% cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4)
% = 1/sqrt(2) * (sin(x) - cos(x))
% sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
% = -1/sqrt(2) * (sin(x) + cos(x))
%
% The format of the BesselOrderOne method is:
%
% double BesselOrderOne(double x)
%
% A description of each parameter follows:
%
% o x: double value.
%
*/
#undef I0
static double I0(double x)
{
double
sum,
t,
y;
register ssize_t
i;
/*
Zeroth order Bessel function of the first kind.
*/
sum=1.0;
y=x*x/4.0;
t=y;
for (i=2; t > MagickEpsilon; i++)
{
sum+=t;
t*=y/((double) i*i);
}
return(sum);
}
#undef J1
static double J1(double x)
{
double
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.581199354001606143928050809e+21,
-0.6672106568924916298020941484e+20,
0.2316433580634002297931815435e+19,
-0.3588817569910106050743641413e+17,
0.2908795263834775409737601689e+15,
-0.1322983480332126453125473247e+13,
0.3413234182301700539091292655e+10,
-0.4695753530642995859767162166e+7,
0.270112271089232341485679099e+4
},
Qone[] =
{
0.11623987080032122878585294e+22,
0.1185770712190320999837113348e+20,
0.6092061398917521746105196863e+17,
0.2081661221307607351240184229e+15,
0.5243710262167649715406728642e+12,
0.1013863514358673989967045588e+10,
0.1501793594998585505921097578e+7,
0.1606931573481487801970916749e+4,
0.1e+1
};
p=Pone[8];
q=Qone[8];
for (i=7; i >= 0; i--)
{
p=p*x*x+Pone[i];
q=q*x*x+Qone[i];
}
return(p/q);
}
#undef P1
static double P1(double x)
{
double
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.352246649133679798341724373e+5,
0.62758845247161281269005675e+5,
0.313539631109159574238669888e+5,
0.49854832060594338434500455e+4,
0.2111529182853962382105718e+3,
0.12571716929145341558495e+1
},
Qone[] =
{
0.352246649133679798068390431e+5,
0.626943469593560511888833731e+5,
0.312404063819041039923015703e+5,
0.4930396490181088979386097e+4,
0.2030775189134759322293574e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
#undef Q1
static double Q1(double x)
{
double
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.3511751914303552822533318e+3,
0.7210391804904475039280863e+3,
0.4259873011654442389886993e+3,
0.831898957673850827325226e+2,
0.45681716295512267064405e+1,
0.3532840052740123642735e-1
},
Qone[] =
{
0.74917374171809127714519505e+4,
0.154141773392650970499848051e+5,
0.91522317015169922705904727e+4,
0.18111867005523513506724158e+4,
0.1038187585462133728776636e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
static double BesselOrderOne(double x)
{
double
p,
q;
if (x == 0.0)
return(0.0);
p=x;
if (x < 0.0)
x=(-x);
if (x < 8.0)
return(p*J1(x));
q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin((double) x)-
cos((double) x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin((double) x)+
cos((double) x))));
if (p < 0.0)
q=(-q);
return(q);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyResizeFilter() destroy the resize filter.
%
% The format of the DestroyResizeFilter method is:
%
% ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o resize_filter: the resize filter.
%
*/
MagickPrivate ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
resize_filter->signature=(~MagickCoreSignature);
resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter);
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r S u p p o r t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterSupport() return the current support window size for this
% filter. Note that this may have been enlarged by filter:blur factor.
%
% The format of the GetResizeFilterSupport method is:
%
% double GetResizeFilterSupport(const ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o filter: Image filter to use.
%
*/
MagickPrivate double *GetResizeFilterCoefficient(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return((double *) resize_filter->coefficient);
}
MagickPrivate double GetResizeFilterBlur(const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->blur);
}
MagickPrivate double GetResizeFilterScale(const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->scale);
}
MagickPrivate double GetResizeFilterWindowSupport(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->window_support);
}
MagickPrivate ResizeWeightingFunctionType GetResizeFilterWeightingType(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->filterWeightingType);
}
MagickPrivate ResizeWeightingFunctionType GetResizeFilterWindowWeightingType(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->windowWeightingType);
}
MagickPrivate double GetResizeFilterSupport(const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->support*resize_filter->blur);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r W e i g h t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterWeight evaluates the specified resize filter at the point x
% which usally lies between zero and the filters current 'support' and
% returns the weight of the filter function at that point.
%
% The format of the GetResizeFilterWeight method is:
%
% double GetResizeFilterWeight(const ResizeFilter *resize_filter,
% const double x)
%
% A description of each parameter follows:
%
% o filter: the filter type.
%
% o x: the point.
%
*/
MagickPrivate double GetResizeFilterWeight(const ResizeFilter *resize_filter,
const double x)
{
double
scale,
weight,
x_blur;
/*
Windowing function - scale the weighting filter by this amount.
*/
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
x_blur=fabs((double) x)/resize_filter->blur; /* X offset with blur scaling */
if ((resize_filter->window_support < MagickEpsilon) ||
(resize_filter->window == Box))
scale=1.0; /* Point or Box Filter -- avoid division by zero */
else
{
scale=resize_filter->scale;
scale=resize_filter->window(x_blur*scale,resize_filter);
}
weight=scale*resize_filter->filter(x_blur,resize_filter);
return(weight);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p o l a t i v e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpolativeResizeImage() resizes an image using the specified
% interpolation method.
%
% The format of the InterpolativeResizeImage method is:
%
% Image *InterpolativeResizeImage(const Image *image,const size_t columns,
% const size_t rows,const PixelInterpolateMethod method,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *InterpolativeResizeImage(const Image *image,
const size_t columns,const size_t rows,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
#define InterpolativeResizeImageTag "Resize/Image"
CacheView
*image_view,
*resize_view;
Image
*resize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
scale;
ssize_t
y;
/*
Interpolatively resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(resize_image,DirectClass,exception) == MagickFalse)
{
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
scale.x=(double) image->columns/resize_image->columns;
scale.y=(double) image->rows/resize_image->rows;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,resize_image,resize_image->rows,1)
#endif
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
PointInfo
offset;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if (q == (Quantum *) NULL)
continue;
offset.y=((double) y+0.5)*scale.y-0.5;
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
offset.x=((double) x+0.5)*scale.x-0.5;
status=InterpolatePixelChannels(image,image_view,resize_image,method,
offset.x,offset.y,q,exception);
if (status == MagickFalse)
break;
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,InterpolativeResizeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
resize_image=DestroyImage(resize_image);
return(resize_image);
}
#if defined(MAGICKCORE_LQR_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i q u i d R e s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LiquidRescaleImage() rescales image with seam carving.
%
% The format of the LiquidRescaleImage method is:
%
% Image *LiquidRescaleImage(const Image *image,const size_t columns,
% const size_t rows,const double delta_x,const double rigidity,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the rescaled image.
%
% o rows: the number of rows in the rescaled image.
%
% o delta_x: maximum seam transversal step (0 means straight seams).
%
% o rigidity: introduce a bias for non-straight seams (typically 0).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *LiquidRescaleImage(const Image *image,const size_t columns,
const size_t rows,const double delta_x,const double rigidity,
ExceptionInfo *exception)
{
#define LiquidRescaleImageTag "Rescale/Image"
CacheView
*image_view,
*rescale_view;
gfloat
*packet,
*pixels;
Image
*rescale_image;
int
x_offset,
y_offset;
LqrCarver
*carver;
LqrRetVal
lqr_status;
MagickBooleanType
status;
MemoryInfo
*pixel_info;
register gfloat
*q;
ssize_t
y;
/*
Liquid rescale image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
if ((columns <= 2) || (rows <= 2))
return(ResizeImage(image,columns,rows,image->filter,exception));
pixel_info=AcquireVirtualMemory(image->columns,image->rows*MaxPixelChannels*
sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
return((Image *) NULL);
pixels=(gfloat *) GetVirtualMemoryBlob(pixel_info);
status=MagickTrue;
q=pixels;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
*q++=QuantumScale*p[i];
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
carver=lqr_carver_new_ext(pixels,(int) image->columns,(int) image->rows,
(int) GetPixelChannels(image),LQR_COLDEPTH_32F);
if (carver == (LqrCarver *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
lqr_carver_set_preserve_input_image(carver);
lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity);
lqr_status=lqr_carver_resize(carver,(int) columns,(int) rows);
(void) lqr_status;
rescale_image=CloneImage(image,lqr_carver_get_width(carver),
lqr_carver_get_height(carver),MagickTrue,exception);
if (rescale_image == (Image *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
return((Image *) NULL);
}
if (SetImageStorageClass(rescale_image,DirectClass,exception) == MagickFalse)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
rescale_image=DestroyImage(rescale_image);
return((Image *) NULL);
}
rescale_view=AcquireAuthenticCacheView(rescale_image,exception);
(void) lqr_carver_scan_reset(carver);
while (lqr_carver_scan_ext(carver,&x_offset,&y_offset,(void **) &packet) != 0)
{
register Quantum
*magick_restrict p;
register ssize_t
i;
p=QueueCacheViewAuthenticPixels(rescale_view,x_offset,y_offset,1,1,
exception);
if (p == (Quantum *) NULL)
break;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
rescale_traits,
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
rescale_traits=GetPixelChannelTraits(rescale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(rescale_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rescale_image,channel,ClampToQuantum(QuantumRange*
packet[i]),p);
}
if (SyncCacheViewAuthenticPixels(rescale_view,exception) == MagickFalse)
break;
}
rescale_view=DestroyCacheView(rescale_view);
pixel_info=RelinquishVirtualMemory(pixel_info);
lqr_carver_destroy(carver);
return(rescale_image);
}
#else
MagickExport Image *LiquidRescaleImage(const Image *image,
const size_t magick_unused(columns),const size_t magick_unused(rows),
const double magick_unused(delta_x),const double magick_unused(rigidity),
ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError,
"DelegateLibrarySupportNotBuiltIn","'%s' (LQR)",image->filename);
return((Image *) NULL);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagnifyImage() doubles the size of the image with a pixel art scaling
% algorithm.
%
% The format of the MagnifyImage method is:
%
% Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
{
#define MagnifyImageTag "Magnify/Image"
CacheView
*image_view,
*magnify_view;
Image
*magnify_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize magnified image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
magnify_image=CloneImage(image,2*image->columns,2*image->rows,MagickTrue,
exception);
if (magnify_image == (Image *) NULL)
return((Image *) NULL);
/*
Magnify image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
magnify_view=AcquireAuthenticCacheView(magnify_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,magnify_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(magnify_view,0,2*y,magnify_image->columns,2,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
/*
Magnify this row of pixels.
*/
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
intensity[9];
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict r;
register ssize_t
i;
size_t
channels;
p=GetCacheViewVirtualPixels(image_view,x-1,y-1,3,3,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
channels=GetPixelChannels(image);
for (i=0; i < 9; i++)
intensity[i]=GetPixelIntensity(image,p+i*channels);
r=q;
if ((fabs(intensity[1]-intensity[7]) < MagickEpsilon) ||
(fabs(intensity[3]-intensity[5]) < MagickEpsilon))
{
/*
Clone center pixel.
*/
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image);
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image)*(magnify_image->columns-1);
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image);
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
}
else
{
/*
Selectively clone pixel.
*/
if (fabs(intensity[1]-intensity[3]) < MagickEpsilon)
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[3*channels+i];
else
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image);
if (fabs(intensity[1]-intensity[5]) < MagickEpsilon)
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[5*channels+i];
else
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image)*(magnify_image->columns-1);
if (fabs(intensity[3]-intensity[7]) < MagickEpsilon)
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[3*channels+i];
else
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image);
if (fabs(intensity[5]-intensity[7]) < MagickEpsilon)
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[5*channels+i];
else
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
}
q+=2*GetPixelChannels(magnify_image);
}
if (SyncCacheViewAuthenticPixels(magnify_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MagnifyImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
magnify_view=DestroyCacheView(magnify_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
magnify_image=DestroyImage(magnify_image);
return(magnify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M i n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MinifyImage() is a convenience method that scales an image proportionally to
% half its size.
%
% The format of the MinifyImage method is:
%
% Image *MinifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception)
{
Image
*minify_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
minify_image=ResizeImage(image,image->columns/2,image->rows/2,SplineFilter,
exception);
return(minify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResampleImage() resize image in terms of its pixel size, so that when
% displayed at the given resolution it will be the same size in terms of
% real world units as the original image at the original resolution.
%
% The format of the ResampleImage method is:
%
% Image *ResampleImage(Image *image,const double x_resolution,
% const double y_resolution,const FilterType filter,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be resized to fit the given resolution.
%
% o x_resolution: the new image x resolution.
%
% o y_resolution: the new image y resolution.
%
% o filter: Image filter to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ResampleImage(const Image *image,const double x_resolution,
const double y_resolution,const FilterType filter,ExceptionInfo *exception)
{
#define ResampleImageTag "Resample/Image"
Image
*resample_image;
size_t
height,
width;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=(size_t) (x_resolution*image->columns/(image->resolution.x == 0.0 ?
72.0 : image->resolution.x)+0.5);
height=(size_t) (y_resolution*image->rows/(image->resolution.y == 0.0 ?
72.0 : image->resolution.y)+0.5);
resample_image=ResizeImage(image,width,height,filter,exception);
if (resample_image != (Image *) NULL)
{
resample_image->resolution.x=x_resolution;
resample_image->resolution.y=y_resolution;
}
return(resample_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResizeImage() scales an image to the desired dimensions, using the given
% filter (see AcquireFilterInfo()).
%
% If an undefined filter is given the filter defaults to Mitchell for a
% colormapped image, a image with a matte channel, or if the image is
% enlarged. Otherwise the filter defaults to a Lanczos.
%
% ResizeImage() was inspired by Paul Heckbert's "zoom" program.
%
% The format of the ResizeImage method is:
%
% Image *ResizeImage(Image *image,const size_t columns,const size_t rows,
% const FilterType filter,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o filter: Image filter to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _ContributionInfo
{
double
weight;
ssize_t
pixel;
} ContributionInfo;
static ContributionInfo **DestroyContributionThreadSet(
ContributionInfo **contribution)
{
register ssize_t
i;
assert(contribution != (ContributionInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (contribution[i] != (ContributionInfo *) NULL)
contribution[i]=(ContributionInfo *) RelinquishAlignedMemory(
contribution[i]);
contribution=(ContributionInfo **) RelinquishMagickMemory(contribution);
return(contribution);
}
static ContributionInfo **AcquireContributionThreadSet(const size_t count)
{
register ssize_t
i;
ContributionInfo
**contribution;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
contribution=(ContributionInfo **) AcquireQuantumMemory(number_threads,
sizeof(*contribution));
if (contribution == (ContributionInfo **) NULL)
return((ContributionInfo **) NULL);
(void) memset(contribution,0,number_threads*sizeof(*contribution));
for (i=0; i < (ssize_t) number_threads; i++)
{
contribution[i]=(ContributionInfo *) MagickAssumeAligned(
AcquireAlignedMemory(count,sizeof(**contribution)));
if (contribution[i] == (ContributionInfo *) NULL)
return(DestroyContributionThreadSet(contribution));
}
return(contribution);
}
static MagickBooleanType HorizontalFilter(const ResizeFilter *resize_filter,
const Image *image,Image *resize_image,const double x_factor,
const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception)
{
#define ResizeImageTag "Resize/Image"
CacheView
*image_view,
*resize_view;
ClassType
storage_class;
ContributionInfo
**magick_restrict contributions;
MagickBooleanType
status;
double
scale,
support;
ssize_t
x;
/*
Apply filter to resize horizontally from image to resize image.
*/
scale=MagickMax(1.0/x_factor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse)
return(MagickFalse);
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point sampling.
*/
support=(double) 0.5;
scale=1.0;
}
contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0));
if (contributions == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=PerceptibleReciprocal(scale);
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,resize_image,resize_image->columns,1)
#endif
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
const int
id = GetOpenMPThreadId();
double
bisect,
density;
register const Quantum
*magick_restrict p;
register ContributionInfo
*magick_restrict contribution;
register Quantum
*magick_restrict q;
register ssize_t
y;
ssize_t
n,
start,
stop;
if (status == MagickFalse)
continue;
bisect=(double) (x+0.5)/x_factor+MagickEpsilon;
start=(ssize_t) MagickMax(bisect-support+0.5,0.0);
stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->columns);
density=0.0;
contribution=contributions[id];
for (n=0; n < (stop-start); n++)
{
contribution[n].pixel=start+n;
contribution[n].weight=GetResizeFilterWeight(resize_filter,scale*
((double) (start+n)-bisect+0.5));
density+=contribution[n].weight;
}
if (n == 0)
continue;
if ((density != 0.0) && (density != 1.0))
{
register ssize_t
i;
/*
Normalize.
*/
density=PerceptibleReciprocal(density);
for (i=0; i < n; i++)
contribution[i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,contribution[0].pixel,0,(size_t)
(contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception);
q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
register ssize_t
j;
ssize_t
k;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
if (((resize_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(resize_image,q) <= (QuantumRange/2)))
{
j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double)
stop-1.0)+0.5);
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j-start].pixel-contribution[0].pixel);
SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i],
q);
continue;
}
pixel=0.0;
if ((resize_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (j=0; j < n; j++)
{
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j].pixel-contribution[0].pixel);
alpha=contribution[j].weight;
pixel+=alpha*p[k*GetPixelChannels(image)+i];
}
SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q);
continue;
}
/*
Alpha blending.
*/
gamma=0.0;
for (j=0; j < n; j++)
{
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j].pixel-contribution[0].pixel);
alpha=contribution[j].weight*QuantumScale*
GetPixelAlpha(image,p+k*GetPixelChannels(image));
pixel+=alpha*p[k*GetPixelChannels(image)+i];
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contributions=DestroyContributionThreadSet(contributions);
return(status);
}
static MagickBooleanType VerticalFilter(const ResizeFilter *resize_filter,
const Image *image,Image *resize_image,const double y_factor,
const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception)
{
CacheView
*image_view,
*resize_view;
ClassType
storage_class;
ContributionInfo
**magick_restrict contributions;
double
scale,
support;
MagickBooleanType
status;
ssize_t
y;
/*
Apply filter to resize vertically from image to resize image.
*/
scale=MagickMax(1.0/y_factor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse)
return(MagickFalse);
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point sampling.
*/
support=(double) 0.5;
scale=1.0;
}
contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0));
if (contributions == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=PerceptibleReciprocal(scale);
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,resize_image,resize_image->rows,1)
#endif
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
double
bisect,
density;
register const Quantum
*magick_restrict p;
register ContributionInfo
*magick_restrict contribution;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
n,
start,
stop;
if (status == MagickFalse)
continue;
bisect=(double) (y+0.5)/y_factor+MagickEpsilon;
start=(ssize_t) MagickMax(bisect-support+0.5,0.0);
stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->rows);
density=0.0;
contribution=contributions[id];
for (n=0; n < (stop-start); n++)
{
contribution[n].pixel=start+n;
contribution[n].weight=GetResizeFilterWeight(resize_filter,scale*
((double) (start+n)-bisect+0.5));
density+=contribution[n].weight;
}
if (n == 0)
continue;
if ((density != 0.0) && (density != 1.0))
{
register ssize_t
i;
/*
Normalize.
*/
density=PerceptibleReciprocal(density);
for (i=0; i < n; i++)
contribution[i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,0,contribution[0].pixel,
image->columns,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1),
exception);
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
register ssize_t
j;
ssize_t
k;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
if (((resize_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(resize_image,q) <= (QuantumRange/2)))
{
j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double)
stop-1.0)+0.5);
k=(ssize_t) ((contribution[j-start].pixel-contribution[0].pixel)*
image->columns+x);
SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i],
q);
continue;
}
pixel=0.0;
if ((resize_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (j=0; j < n; j++)
{
k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[j].weight;
pixel+=alpha*p[k*GetPixelChannels(image)+i];
}
SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q);
continue;
}
gamma=0.0;
for (j=0; j < n; j++)
{
k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[j].weight*QuantumScale*GetPixelAlpha(image,p+k*
GetPixelChannels(image));
pixel+=alpha*p[k*GetPixelChannels(image)+i];
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contributions=DestroyContributionThreadSet(contributions);
return(status);
}
MagickExport Image *ResizeImage(const Image *image,const size_t columns,
const size_t rows,const FilterType filter,ExceptionInfo *exception)
{
double
x_factor,
y_factor;
FilterType
filter_type;
Image
*filter_image,
*resize_image;
MagickOffsetType
offset;
MagickSizeType
span;
MagickStatusType
status;
ResizeFilter
*resize_filter;
/*
Acquire resize image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows) &&
(filter == UndefinedFilter))
return(CloneImage(image,0,0,MagickTrue,exception));
/*
Acquire resize filter.
*/
x_factor=(double) columns/(double) image->columns;
y_factor=(double) rows/(double) image->rows;
filter_type=LanczosFilter;
if (filter != UndefinedFilter)
filter_type=filter;
else
if ((x_factor == 1.0) && (y_factor == 1.0))
filter_type=PointFilter;
else
if ((image->storage_class == PseudoClass) ||
(image->alpha_trait != UndefinedPixelTrait) ||
((x_factor*y_factor) > 1.0))
filter_type=MitchellFilter;
resize_filter=AcquireResizeFilter(image,filter_type,MagickFalse,exception);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
resize_image=AccelerateResizeImage(image,columns,rows,resize_filter,
exception);
if (resize_image != (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(resize_image);
}
#endif
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(resize_image);
}
if (x_factor > y_factor)
filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception);
else
filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception);
if (filter_image == (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(DestroyImage(resize_image));
}
/*
Resize image.
*/
offset=0;
if (x_factor > y_factor)
{
span=(MagickSizeType) (filter_image->columns+rows);
status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span,
&offset,exception);
status&=VerticalFilter(resize_filter,filter_image,resize_image,y_factor,
span,&offset,exception);
}
else
{
span=(MagickSizeType) (filter_image->rows+columns);
status=VerticalFilter(resize_filter,image,filter_image,y_factor,span,
&offset,exception);
status&=HorizontalFilter(resize_filter,filter_image,resize_image,x_factor,
span,&offset,exception);
}
/*
Free resources.
*/
filter_image=DestroyImage(filter_image);
resize_filter=DestroyResizeFilter(resize_filter);
if (status == MagickFalse)
{
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
resize_image->type=image->type;
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SampleImage() scales an image to the desired dimensions with pixel
% sampling. Unlike other scaling methods, this method does not introduce
% any additional color into the scaled image.
%
% The format of the SampleImage method is:
%
% Image *SampleImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the sampled image.
%
% o rows: the number of rows in the sampled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SampleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleImageTag "Sample/Image"
CacheView
*image_view,
*sample_view;
Image
*sample_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
x1;
ssize_t
*x_offset,
y;
PointInfo
sample_offset;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
sample_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
/*
Set the sampling offset, default is in the mid-point of sample regions.
*/
sample_offset.x=sample_offset.y=0.5-MagickEpsilon;
{
const char
*value;
value=GetImageArtifact(image,"sample:offset");
if (value != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
(void) ParseGeometry(value,&geometry_info);
flags=ParseGeometry(value,&geometry_info);
sample_offset.x=sample_offset.y=geometry_info.rho/100.0-MagickEpsilon;
if ((flags & SigmaValue) != 0)
sample_offset.y=geometry_info.sigma/100.0-MagickEpsilon;
}
}
/*
Allocate scan line buffer and column offset buffers.
*/
x_offset=(ssize_t *) AcquireQuantumMemory((size_t) sample_image->columns,
sizeof(*x_offset));
if (x_offset == (ssize_t *) NULL)
{
sample_image=DestroyImage(sample_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (x1=0; x1 < (ssize_t) sample_image->columns; x1++)
x_offset[x1]=(ssize_t) ((((double) x1+sample_offset.x)*image->columns)/
sample_image->columns);
/*
Sample each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sample_view=AcquireAuthenticCacheView(sample_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,sample_image,sample_image->rows,1)
#endif
for (y=0; y < (ssize_t) sample_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
y_offset;
if (status == MagickFalse)
continue;
y_offset=(ssize_t) ((((double) y+sample_offset.y)*image->rows)/
sample_image->rows);
p=GetCacheViewVirtualPixels(image_view,0,y_offset,image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(sample_view,0,y,sample_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
/*
Sample each column.
*/
for (x=0; x < (ssize_t) sample_image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(sample_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(sample_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(sample_image); i++)
{
PixelChannel
channel;
PixelTrait
image_traits,
traits;
channel=GetPixelChannelChannel(sample_image,i);
traits=GetPixelChannelTraits(sample_image,channel);
image_traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(image_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(sample_image,channel,p[x_offset[x]*GetPixelChannels(
image)+i],q);
}
q+=GetPixelChannels(sample_image);
}
if (SyncCacheViewAuthenticPixels(sample_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SampleImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
sample_view=DestroyCacheView(sample_view);
x_offset=(ssize_t *) RelinquishMagickMemory(x_offset);
sample_image->type=image->type;
if (status == MagickFalse)
sample_image=DestroyImage(sample_image);
return(sample_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleImage() changes the size of an image to the given dimensions.
%
% The format of the ScaleImage method is:
%
% Image *ScaleImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ScaleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define ScaleImageTag "Scale/Image"
CacheView
*image_view,
*scale_view;
double
alpha,
pixel[CompositePixelChannel],
*scale_scanline,
*scanline,
*x_vector,
*y_vector;
Image
*scale_image;
MagickBooleanType
next_column,
next_row,
proceed,
status;
PixelTrait
scale_traits;
PointInfo
scale,
span;
register ssize_t
i;
ssize_t
n,
number_rows,
y;
/*
Initialize scaled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
scale_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (scale_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(scale_image,DirectClass,exception) == MagickFalse)
{
scale_image=DestroyImage(scale_image);
return((Image *) NULL);
}
/*
Allocate memory.
*/
x_vector=(double *) AcquireQuantumMemory((size_t) image->columns,
MaxPixelChannels*sizeof(*x_vector));
scanline=x_vector;
if (image->rows != scale_image->rows)
scanline=(double *) AcquireQuantumMemory((size_t) image->columns,
MaxPixelChannels*sizeof(*scanline));
scale_scanline=(double *) AcquireQuantumMemory((size_t) scale_image->columns,
MaxPixelChannels*sizeof(*scale_scanline));
y_vector=(double *) AcquireQuantumMemory((size_t) image->columns,
MaxPixelChannels*sizeof(*y_vector));
if ((scanline == (double *) NULL) || (scale_scanline == (double *) NULL) ||
(x_vector == (double *) NULL) || (y_vector == (double *) NULL))
{
if ((image->rows != scale_image->rows) && (scanline != (double *) NULL))
scanline=(double *) RelinquishMagickMemory(scanline);
if (scale_scanline != (double *) NULL)
scale_scanline=(double *) RelinquishMagickMemory(scale_scanline);
if (x_vector != (double *) NULL)
x_vector=(double *) RelinquishMagickMemory(x_vector);
if (y_vector != (double *) NULL)
y_vector=(double *) RelinquishMagickMemory(y_vector);
scale_image=DestroyImage(scale_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Scale image.
*/
number_rows=0;
next_row=MagickTrue;
span.y=1.0;
scale.y=(double) scale_image->rows/(double) image->rows;
(void) memset(y_vector,0,(size_t) MaxPixelChannels*image->columns*
sizeof(*y_vector));
n=0;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
scale_view=AcquireAuthenticCacheView(scale_image,exception);
for (y=0; y < (ssize_t) scale_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
break;
q=QueueCacheViewAuthenticPixels(scale_view,0,y,scale_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
alpha=1.0;
if (scale_image->rows == image->rows)
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
alpha=QuantumScale*GetPixelAlpha(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double) p[i];
continue;
}
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
}
else
{
/*
Scale Y direction.
*/
while (scale.y < span.y)
{
if ((next_row != MagickFalse) &&
(number_rows < (ssize_t) image->rows))
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
alpha=QuantumScale*GetPixelAlpha(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double) p[i];
continue;
}
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
number_rows++;
}
for (x=0; x < (ssize_t) image->columns; x++)
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
y_vector[x*GetPixelChannels(image)+i]+=scale.y*
x_vector[x*GetPixelChannels(image)+i];
span.y-=scale.y;
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows))
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
alpha=QuantumScale*GetPixelAlpha(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double) p[i];
continue;
}
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
number_rows++;
next_row=MagickFalse;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
pixel[i]=y_vector[x*GetPixelChannels(image)+i]+span.y*
x_vector[x*GetPixelChannels(image)+i];
scanline[x*GetPixelChannels(image)+i]=pixel[i];
y_vector[x*GetPixelChannels(image)+i]=0.0;
}
}
scale.y-=span.y;
if (scale.y <= 0)
{
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
span.y=1.0;
}
if (scale_image->columns == image->columns)
{
/*
Transfer scanline to scaled image.
*/
for (x=0; x < (ssize_t) scale_image->columns; x++)
{
if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(scale_image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=QuantumScale*scanline[x*GetPixelChannels(image)+
GetPixelChannelOffset(image,AlphaPixelChannel)];
alpha=PerceptibleReciprocal(alpha);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
scale_traits=GetPixelChannelTraits(scale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(scale_traits == UndefinedPixelTrait))
continue;
if ((traits & BlendPixelTrait) == 0)
{
SetPixelChannel(scale_image,channel,ClampToQuantum(
scanline[x*GetPixelChannels(image)+i]),q);
continue;
}
SetPixelChannel(scale_image,channel,ClampToQuantum(alpha*scanline[
x*GetPixelChannels(image)+i]),q);
}
q+=GetPixelChannels(scale_image);
}
}
else
{
ssize_t
t;
/*
Scale X direction.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
next_column=MagickFalse;
span.x=1.0;
t=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
scale.x=(double) scale_image->columns/(double) image->columns;
while (scale.x >= span.x)
{
if (next_column != MagickFalse)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
t++;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
pixel[i]+=span.x*scanline[x*GetPixelChannels(image)+i];
scale_scanline[t*GetPixelChannels(image)+i]=pixel[i];
}
scale.x-=span.x;
span.x=1.0;
next_column=MagickTrue;
}
if (scale.x > 0)
{
if (next_column != MagickFalse)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
next_column=MagickFalse;
t++;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]+=scale.x*scanline[x*GetPixelChannels(image)+i];
span.x-=scale.x;
}
}
if (span.x > 0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]+=span.x*scanline[(x-1)*GetPixelChannels(image)+i];
}
if ((next_column == MagickFalse) &&
(t < (ssize_t) scale_image->columns))
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
scale_scanline[t*GetPixelChannels(image)+i]=pixel[i];
/*
Transfer scanline to scaled image.
*/
for (x=0; x < (ssize_t) scale_image->columns; x++)
{
if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(scale_image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=QuantumScale*scale_scanline[x*GetPixelChannels(image)+
GetPixelChannelOffset(image,AlphaPixelChannel)];
alpha=PerceptibleReciprocal(alpha);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
scale_traits=GetPixelChannelTraits(scale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(scale_traits == UndefinedPixelTrait))
continue;
if ((traits & BlendPixelTrait) == 0)
{
SetPixelChannel(scale_image,channel,ClampToQuantum(
scale_scanline[x*GetPixelChannels(image)+i]),q);
continue;
}
SetPixelChannel(scale_image,channel,ClampToQuantum(alpha*
scale_scanline[x*GetPixelChannels(image)+i]),q);
}
q+=GetPixelChannels(scale_image);
}
}
if (SyncCacheViewAuthenticPixels(scale_view,exception) == MagickFalse)
{
status=MagickFalse;
break;
}
proceed=SetImageProgress(image,ScaleImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
scale_view=DestroyCacheView(scale_view);
image_view=DestroyCacheView(image_view);
/*
Free allocated memory.
*/
y_vector=(double *) RelinquishMagickMemory(y_vector);
scale_scanline=(double *) RelinquishMagickMemory(scale_scanline);
if (scale_image->rows != image->rows)
scanline=(double *) RelinquishMagickMemory(scanline);
x_vector=(double *) RelinquishMagickMemory(x_vector);
scale_image->type=image->type;
if (status == MagickFalse)
scale_image=DestroyImage(scale_image);
return(scale_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T h u m b n a i l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ThumbnailImage() changes the size of an image to the given dimensions and
% removes any associated profiles. The goal is to produce small low cost
% thumbnail images suited for display on the Web.
%
% The format of the ThumbnailImage method is:
%
% Image *ThumbnailImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ThumbnailImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleFactor 5
char
filename[MagickPathExtent],
value[MagickPathExtent];
const char
*name;
Image
*thumbnail_image;
double
x_factor,
y_factor;
struct stat
attributes;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
x_factor=(double) columns/(double) image->columns;
y_factor=(double) rows/(double) image->rows;
if ((x_factor*y_factor) > 0.1)
thumbnail_image=ResizeImage(image,columns,rows,image->filter,exception);
else
if (((SampleFactor*columns) < 128) || ((SampleFactor*rows) < 128))
thumbnail_image=ResizeImage(image,columns,rows,image->filter,exception);
else
{
Image
*sample_image;
sample_image=SampleImage(image,SampleFactor*columns,SampleFactor*rows,
exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
thumbnail_image=ResizeImage(sample_image,columns,rows,image->filter,
exception);
sample_image=DestroyImage(sample_image);
}
if (thumbnail_image == (Image *) NULL)
return(thumbnail_image);
(void) ParseAbsoluteGeometry("0x0+0+0",&thumbnail_image->page);
if (thumbnail_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(thumbnail_image,OpaqueAlphaChannel,exception);
thumbnail_image->depth=8;
thumbnail_image->interlace=NoInterlace;
/*
Strip all profiles except color profiles.
*/
ResetImageProfileIterator(thumbnail_image);
for (name=GetNextImageProfile(thumbnail_image); name != (const char *) NULL; )
{
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
{
(void) DeleteImageProfile(thumbnail_image,name);
ResetImageProfileIterator(thumbnail_image);
}
name=GetNextImageProfile(thumbnail_image);
}
(void) DeleteImageProperty(thumbnail_image,"comment");
(void) CopyMagickString(value,image->magick_filename,MagickPathExtent);
if (strstr(image->magick_filename,"//") == (char *) NULL)
(void) FormatLocaleString(value,MagickPathExtent,"file://%s",
image->magick_filename);
(void) SetImageProperty(thumbnail_image,"Thumb::URI",value,exception);
GetPathComponent(image->magick_filename,TailPath,filename);
(void) CopyMagickString(value,filename,MagickPathExtent);
if ( GetPathAttributes(image->filename,&attributes) != MagickFalse )
{
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
attributes.st_mtime);
(void) SetImageProperty(thumbnail_image,"Thumb::MTime",value,exception);
}
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
attributes.st_mtime);
(void) FormatMagickSize(GetBlobSize(image),MagickFalse,"B",MagickPathExtent,
value);
(void) SetImageProperty(thumbnail_image,"Thumb::Size",value,exception);
(void) FormatLocaleString(value,MagickPathExtent,"image/%s",image->magick);
LocaleLower(value);
(void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value,exception);
(void) SetImageProperty(thumbnail_image,"software",MagickAuthoritativeURL,
exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->magick_columns);
(void) SetImageProperty(thumbnail_image,"Thumb::Image::Width",value,
exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->magick_rows);
(void) SetImageProperty(thumbnail_image,"Thumb::Image::Height",value,
exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
GetImageListLength(image));
(void) SetImageProperty(thumbnail_image,"Thumb::Document::Pages",value,
exception);
return(thumbnail_image);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.